changeset 20886:0e647427eee4

Merge with dc41766b35e11348281b76fd70b456b6ba3cf7e9
author Michael Van De Vanter <michael.van.de.vanter@oracle.com>
date Fri, 10 Apr 2015 16:58:26 -0700
parents e7ece52e1ff3 (current diff) dc41766b35e1 (diff)
children 37912559d662
files agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTestSubstitutions.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nfi/HotSpotNativeFunctionHandle.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nfi/HotSpotNativeFunctionInterface.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nfi/HotSpotNativeFunctionPointer.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nfi/HotSpotNativeLibraryHandle.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nfi/NativeCallStubGraphBuilder.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nfi/RawNativeCallNodeFactory.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/SystemIdentityHashCodeNode.java graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64Guards.java graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64IntegerSubstitutions.java graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64LongSubstitutions.java graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64Substitutions.java graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/GraalMethodSubstitutions.java graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/MathSubstitutionsX86.java graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/MathIntrinsicNode.java graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/MathPowNode.java graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerAddExactNode.java graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerAddExactSplitNode.java graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerExactArithmeticNode.java graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerExactArithmeticSplitNode.java graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerMulExactNode.java graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerMulExactSplitNode.java graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerMulHighNode.java graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerSubExactNode.java graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerSubExactSplitNode.java graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/UnsignedMulHighNode.java graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/phases/ReplaceIntrinsicsPhase.java make/jprt.properties make/solaris/makefiles/add_gnu_debuglink.make make/solaris/makefiles/fix_empty_sec_hdr_flags.make src/os/solaris/add_gnu_debuglink/add_gnu_debuglink.c src/os/solaris/fix_empty_sec_hdr_flags/fix_empty_sec_hdr_flags.c src/share/vm/gc_implementation/g1/heapRegionSeq.cpp src/share/vm/gc_implementation/g1/heapRegionSeq.hpp src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp src/share/vm/services/memPtr.cpp src/share/vm/services/memPtr.hpp src/share/vm/services/memPtrArray.hpp src/share/vm/services/memRecorder.cpp src/share/vm/services/memRecorder.hpp src/share/vm/services/memSnapshot.cpp src/share/vm/services/memSnapshot.hpp src/share/vm/services/memTrackWorker.cpp src/share/vm/services/memTrackWorker.hpp test/compiler/intrinsics/mathexact/sanity/Verifier.java test/testlibrary/com/oracle/java/testlibrary/DynamicVMOptionChecker.java test/testlibrary/com/oracle/java/testlibrary/TestDynamicVMOption.java
diffstat 935 files changed, 48195 insertions(+), 22422 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri Apr 10 16:55:38 2015 -0700
+++ b/.hgtags	Fri Apr 10 16:58:26 2015 -0700
@@ -501,6 +501,32 @@
 00cf2b6f51b9560b01030e8f4c28c466f0b21fe3 hs25.20-b23
 19408d5fd31c25ce60c43dd33e92b96e8df4a4ea jdk8u20-b25
 eaa4074a7e3975cd33ec55e6b584586e2ac681bd jdk8u20-b26
+7c9925f21c2529a88eb64b8039cc080f60b85e01 jdk8u20-b31
+7edb04063a423e278fe34a0006d25fee198f495e jdk8u20-b32
+4828415ebbf11e205dcc08e97ad5ae7dd03522f9 jdk8u40-b00
+d952af8cf67dd1e7ab5fec9a299c6c6dafd1863e hs25.40-b01
+f0afba33c928ddaa2d5f003b90d683c143f78ea3 hs25.40-b02
+e2976043eac37c8036f6a6dfa454787f64fa3f56 hs25.40-b03
+cb95655ef06fece507bbc2792474411ab2e899ab hs25.40-b04
+dc06b830ea95ed953cac02e9e67a75ab682edb97 jdk8u40-b01
+897333c7e5874625bd26d09fdaf242196024e9c2 hs25.40-b05
+f52cb91647590fe4a12af295a8a87e2cb761b044 jdk8u40-b02
+fbc31318922c31488c0464ccd864d2cd1d9e21a7 hs25.40-b06
+38539608359a6dfc5740abb66f878af643757c3b jdk8u40-b03
+c3990b8c710e4c1996b5cd579681645d9f0408c1 hs25.40-b07
+3f1b3f2dd1cb224747a11a6788e58b5cb7683d57 hs25.40-b08
+fd4dbaff30027832dd21bcc7171ddb466ca2924f jdk8u40-b04
+c9635cad4a5d794a96b4a26d3e7ad1d783133add hs25.40-b09
+232b50b20797424c64da115ca48db131b1489ac1 jdk8u40-b05
+47ec483b936ee8cd2b26752e0aba3d5e6caab393 hs25.40-b10
+3702eb6ec7086186211ab7763a44c68fc7a898eb jdk8u40-b06
+4489ac5b084aae8e2a80b71ff98d8e8acc3bf290 hs25.40-b11
+b63d0e8bfc0738bba21ae67779780f59118a95f7 jdk8u40-b07
+5c1b5be2c69bcae610a790e9438da446c61d3361 hs25.40-b12
+905a16825d2931345a7d6dba9e427f98eb51761a jdk8u40-b08
+d96716f6cbba9f000dfb1da39d2b81264f4cdea7 hs25.40-b13
+7ff8d51e0d8fc71f3ad31fd15817083341416ca8 jdk8u40-b09
+e193bbae24effeaf476f688d8d840787db53d74e hs25.40-b14
 a4d44dfb7d30eea54bc172e4429a655454ae0bbf jdk8u25-b00
 9a2152fbd929b0d8b2f5c326a5526214ae71731a jdk8u25-b01
 d3d5604ea0dea3812e87ba76ac199d0a8be6f49f jdk8u25-b02
@@ -518,6 +544,49 @@
 c77d5db189422e2eef0443ee212644e497113b18 jdk8u25-b14
 e62c06b887310b5bd23be9b817a9a6f0daf0d0e1 jdk8u25-b15
 6467bdd4d22d8b140844dc847c43b9ba7cb0bbd1 jdk8u25-b16
+28b50d07f6f8c5a567b6a25e95a423948114a004 jdk8u25-b17
+639abc668bfe995dba811dd35411b9ea8a9041cd jdk8u25-b18
+c3528699fb33fe3eb1d117504184ae7ab2507aa1 jdk8u25-b31
+5bb683bbe2c74876d585b5c3232fc3aab7b23e97 jdk8u31-b00
+5bb686ae3b89f8aa1c74331b2d24e2a5ebd43448 jdk8u31-b01
+087678da96603c9705b38b6cc4a6569ac7b4420a jdk8u31-b02
+401cbaa475b4efe53153119ab87a82b217980a7f jdk8u31-b03
+060cdf93040c1bfa5fdf580da5e9999042632cc8 jdk8u31-b04
+6e56d7f1634f6c4cd4196e699c06e6ca2e6d6efb jdk8u31-b05
+271a32147391d08b0f338d9353330e2b5584d580 jdk8u31-b06
+e9f815c3f21cf2febd8e3c185917c1519aa52d9a jdk8u31-b07
+cc74ca22516644867be3b8db6c1f8d05ab4f6c27 jdk8u31-b08
+245d29ed5db5ad6914eb0c9fe78b9ba26122c478 jdk8u31-b09
+d7b6bdd51abe68b16411d5b292fb830a43c5bc09 jdk8u31-b10
+9906d432d6dbd2cda242e3f3cfde7cf6c90245bf jdk8u31-b11
+e13839545238d1ecf17f0489bb6fb765de46719a jdk8u31-b12
+4206e725d584be942c25ff46ff23d8e299ca4a4c jdk8u31-b13
+1b3abbeee961dee49780c0e4af5337feb918c555 jdk8u40-b10
+f10fe402dfb1543723b4b117a7cba3ea3d4159f1 hs25.40-b15
+99372b2fee0eb8b3452f47230e84aa6e97003184 jdk8u40-b11
+8b9ec2da541a74ac698560b6a2bc45fccb789919 hs25.40-b16
+6b93bf9ea3ea57ed0fe53cfedb2f9ab912c324e5 jdk8u40-b12
+521e269ae1daa9df1cb0835b97aa76bdf340fcb2 hs25.40-b17
+86307d47790785398d0695acc361bccaefe25f94 jdk8u40-b13
+4d5dc0d0f8799fafa1135d51d85edd4edd566501 hs25.40-b18
+b8ca8ec1daea70f7c0d519e866f9f147ec247055 jdk8u40-b14
+eb16b24e2eba9bdf04a9b377bebc2db9f713ff5e jdk8u40-b15
+3a8a0fd171c5876023112941b1c7254262f9adfc hs25.40-b19
+aa2442f89230dc46147c721812f3b3bd4c612e83 hs25.40-b20
+5ea68fb91139081304357f9b937f32c5fdfeca6d jdk8u40-b16
+6bf89bfe8185747a57193efb6cec1f17ccc80414 hs25.40-b21
+fc1f9b67fd8c5d5cd94ecc03569d93e7ce7fb574 jdk8u40-b17
+bc5a90a4db47f1c497d7894434c42325f595cd02 hs25.40-b22
+31d3306aad29e39929418ed43f28212a5f5306a3 jdk8u40-b18
+f8fc5cbe082ce0fb0c6c1dcd39493a16ed916353 hs25.40-b23
+d9349fa8822336e0244da0a8448f3e6b2d62741d jdk8u40-b19
+c3933f52eeb33f70ee562464edddfe9f01d944fd jdk8u40-b20
+d2e9a6bec4f2eec8506eed16f7324992a85d8480 hs25.40-b24
+25ec4a67433744bbe3406e5069e7fd1876ebbf2f jdk8u40-b21
+0f0cb4eeab2d871274f4ffdcd6017d2fdfa89238 hs25.40-b25
+0ee548a1cda08c884eccd563e2d5fdb6ee769b5a jdk8u40-b22
+0e67683b700174eab71ea205d1cfa4f1cf4523ba jdk8u40-b23
+fa4e797f61e6dda1a60e06944018213bff2a1b76 jdk8u40-b24
 b124e22eb772806c13d942cc110de38da0108147 graal-0.1
 483d05bf77a7c2a762aca1e06c4191bc06647176 graal-0.2
 9535eccd2a115f6c6f0b15efb508b11ff74cc0d3 graal-0.3
--- a/THIRD_PARTY_README	Fri Apr 10 16:55:38 2015 -0700
+++ b/THIRD_PARTY_README	Fri Apr 10 16:58:26 2015 -0700
@@ -3385,7 +3385,7 @@
 included with JRE 8, JDK 8, and OpenJDK 8.
 
   Apache Commons Math 3.2
-  Apache Derby 10.10.1.3        
+  Apache Derby 10.11.1.2
   Apache Jakarta BCEL 5.1 
   Apache Jakarta Regexp 1.4 
   Apache Santuario XML Security for Java 1.5.4
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1Allocator.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,40 @@
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class G1Allocator extends VMObject {
+
+  //size_t _summary_bytes_used;
+  static private CIntegerField summaryBytesUsedField;
+
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+      public void update(Observable o, Object data) {
+        initialize(VM.getVM().getTypeDataBase());
+      }
+    });
+  }
+
+  static private synchronized void initialize(TypeDataBase db) {
+    Type type = db.lookupType("G1Allocator");
+
+    summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
+  }
+
+  public long getSummaryBytes() {
+    return summaryBytesUsedField.getValue(addr);
+  }
+
+  public G1Allocator(Address addr) {
+    super(addr);
+
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java	Fri Apr 10 16:58:26 2015 -0700
@@ -36,19 +36,18 @@
 import sun.jvm.hotspot.runtime.VM;
 import sun.jvm.hotspot.runtime.VMObjectFactory;
 import sun.jvm.hotspot.types.AddressField;
-import sun.jvm.hotspot.types.CIntegerField;
 import sun.jvm.hotspot.types.Type;
 import sun.jvm.hotspot.types.TypeDataBase;
 
 // Mirror class for G1CollectedHeap.
 
 public class G1CollectedHeap extends SharedHeap {
-    // HeapRegionSeq _seq;
-    static private long hrsFieldOffset;
-    // MemRegion _g1_committed;
-    static private long g1CommittedFieldOffset;
-    // size_t _summary_bytes_used;
-    static private CIntegerField summaryBytesUsedField;
+    // HeapRegionManager _hrm;
+    static private long hrmFieldOffset;
+    // MemRegion _g1_reserved;
+    static private long g1ReservedFieldOffset;
+    // G1Allocator* _allocator
+    static private AddressField g1Allocator;
     // G1MonitoringSupport* _g1mm;
     static private AddressField g1mmField;
     // HeapRegionSet _old_set;
@@ -67,32 +66,29 @@
     static private synchronized void initialize(TypeDataBase db) {
         Type type = db.lookupType("G1CollectedHeap");
 
-        hrsFieldOffset = type.getField("_hrs").getOffset();
-        g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
-        summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
+        hrmFieldOffset = type.getField("_hrm").getOffset();
+        g1Allocator = type.getAddressField("_allocator");
         g1mmField = type.getAddressField("_g1mm");
         oldSetFieldOffset = type.getField("_old_set").getOffset();
         humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
     }
 
     public long capacity() {
-        Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
-        MemRegion g1Committed = new MemRegion(g1CommittedAddr);
-        return g1Committed.byteSize();
+        return hrm().capacity();
     }
 
     public long used() {
-        return summaryBytesUsedField.getValue(addr);
+        return allocator().getSummaryBytes();
     }
 
     public long n_regions() {
-        return hrs().length();
+        return hrm().length();
     }
 
-    private HeapRegionSeq hrs() {
-        Address hrsAddr = addr.addOffsetTo(hrsFieldOffset);
-        return (HeapRegionSeq) VMObjectFactory.newObject(HeapRegionSeq.class,
-                                                         hrsAddr);
+    private HeapRegionManager hrm() {
+        Address hrmAddr = addr.addOffsetTo(hrmFieldOffset);
+        return (HeapRegionManager) VMObjectFactory.newObject(HeapRegionManager.class,
+                                                         hrmAddr);
     }
 
     public G1MonitoringSupport g1mm() {
@@ -100,6 +96,11 @@
         return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
     }
 
+    public G1Allocator allocator() {
+        Address g1AllocatorAddr = g1Allocator.getValue(addr);
+        return (G1Allocator) VMObjectFactory.newObject(G1Allocator.class, g1AllocatorAddr);
+    }
+
     public HeapRegionSetBase oldSet() {
         Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
         return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
@@ -113,7 +114,7 @@
     }
 
     private Iterator<HeapRegion> heapRegionIterator() {
-        return hrs().heapRegionIterator();
+        return hrm().heapRegionIterator();
     }
 
     public void heapRegionIterate(SpaceClosure scl) {
--- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java	Fri Apr 10 16:58:26 2015 -0700
@@ -93,19 +93,35 @@
     private class HeapRegionIterator implements Iterator<HeapRegion> {
         private long index;
         private long length;
+        private HeapRegion next;
 
-        @Override
-        public boolean hasNext() { return index < length; }
+        public HeapRegion positionToNext() {
+          HeapRegion result = next;
+          while (index < length && at(index) == null) {
+            index++;
+          }
+          if (index < length) {
+            next = at(index);
+            index++; // restart search at next element
+          } else {
+            next = null;
+          }
+          return result;
+        }
 
         @Override
-        public HeapRegion next() { return at(index++);    }
+        public boolean hasNext() { return next != null;     }
+
+        @Override
+        public HeapRegion next() { return positionToNext(); }
 
         @Override
-        public void remove()     { /* not supported */    }
+        public void remove()     { /* not supported */      }
 
-        HeapRegionIterator(long committedLength) {
+        HeapRegionIterator(long totalLength) {
             index = 0;
-            length = committedLength;
+            length = totalLength;
+            positionToNext();
         }
     }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegion.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegion.java	Fri Apr 10 16:58:26 2015 -0700
@@ -24,23 +24,26 @@
 
 package sun.jvm.hotspot.gc_implementation.g1;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Observable;
 import java.util.Observer;
-
 import sun.jvm.hotspot.debugger.Address;
-import sun.jvm.hotspot.memory.ContiguousSpace;
+import sun.jvm.hotspot.memory.CompactibleSpace;
+import sun.jvm.hotspot.memory.MemRegion;
 import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.types.AddressField;
 import sun.jvm.hotspot.types.CIntegerField;
 import sun.jvm.hotspot.types.Type;
 import sun.jvm.hotspot.types.TypeDataBase;
 
 // Mirror class for HeapRegion. Currently we don't actually include
-// any of its fields but only iterate over it (which we get "for free"
-// as HeapRegion ultimately inherits from ContiguousSpace).
+// any of its fields but only iterate over it.
 
-public class HeapRegion extends ContiguousSpace {
+public class HeapRegion extends CompactibleSpace {
     // static int GrainBytes;
     static private CIntegerField grainBytesField;
+    static private AddressField topField;
 
     static {
         VM.registerVMInitializedObserver(new Observer() {
@@ -54,6 +57,8 @@
         Type type = db.lookupType("HeapRegion");
 
         grainBytesField = type.getCIntegerField("GrainBytes");
+        topField = type.getAddressField("_top");
+
     }
 
     static public long grainBytes() {
@@ -63,4 +68,25 @@
     public HeapRegion(Address addr) {
         super(addr);
     }
+
+    public Address top() {
+        return topField.getValue(addr);
+    }
+
+    @Override
+    public List getLiveRegions() {
+        List res = new ArrayList();
+        res.add(new MemRegion(bottom(), top()));
+        return res;
+    }
+
+    @Override
+    public long used() {
+        return top().minus(bottom());
+    }
+
+    @Override
+    public long free() {
+        return end().minus(top());
+    }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionManager.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc_implementation.g1;
+
+import java.util.Iterator;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+// Mirror class for HeapRegionManager.
+
+public class HeapRegionManager extends VMObject {
+    // G1HeapRegionTable _regions
+    static private long regionsFieldOffset;
+    // uint _committed_length
+    static private CIntegerField numCommittedField;
+
+    static {
+        VM.registerVMInitializedObserver(new Observer() {
+                public void update(Observable o, Object data) {
+                    initialize(VM.getVM().getTypeDataBase());
+                }
+            });
+    }
+
+    static private synchronized void initialize(TypeDataBase db) {
+        Type type = db.lookupType("HeapRegionManager");
+
+        regionsFieldOffset = type.getField("_regions").getOffset();
+        numCommittedField = type.getCIntegerField("_num_committed");
+    }
+
+    private G1HeapRegionTable regions() {
+        Address regionsAddr = addr.addOffsetTo(regionsFieldOffset);
+        return (G1HeapRegionTable) VMObjectFactory.newObject(G1HeapRegionTable.class,
+                                                             regionsAddr);
+    }
+
+    public long capacity() {
+        return length() * HeapRegion.grainBytes();
+    }
+
+    public long length() {
+        return regions().length();
+    }
+
+    public long committedLength() {
+        return numCommittedField.getValue(addr);
+    }
+
+    public Iterator<HeapRegion> heapRegionIterator() {
+        return regions().heapRegionIterator(length());
+    }
+
+    public HeapRegionManager(Address addr) {
+        super(addr);
+    }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc_implementation.g1;
-
-import java.util.Iterator;
-import java.util.Observable;
-import java.util.Observer;
-
-import sun.jvm.hotspot.debugger.Address;
-import sun.jvm.hotspot.runtime.VM;
-import sun.jvm.hotspot.runtime.VMObject;
-import sun.jvm.hotspot.runtime.VMObjectFactory;
-import sun.jvm.hotspot.types.AddressField;
-import sun.jvm.hotspot.types.CIntegerField;
-import sun.jvm.hotspot.types.Type;
-import sun.jvm.hotspot.types.TypeDataBase;
-
-// Mirror class for HeapRegionSeq. It essentially encapsulates the G1HeapRegionTable.
-
-public class HeapRegionSeq extends VMObject {
-    // G1HeapRegionTable _regions
-    static private long regionsFieldOffset;
-    // uint _committed_length
-    static private CIntegerField committedLengthField;
-
-    static {
-        VM.registerVMInitializedObserver(new Observer() {
-                public void update(Observable o, Object data) {
-                    initialize(VM.getVM().getTypeDataBase());
-                }
-            });
-    }
-
-    static private synchronized void initialize(TypeDataBase db) {
-        Type type = db.lookupType("HeapRegionSeq");
-
-        regionsFieldOffset = type.getField("_regions").getOffset();
-        committedLengthField = type.getCIntegerField("_committed_length");
-    }
-
-    private G1HeapRegionTable regions() {
-        Address regionsAddr = addr.addOffsetTo(regionsFieldOffset);
-        return (G1HeapRegionTable) VMObjectFactory.newObject(G1HeapRegionTable.class,
-                                                             regionsAddr);
-    }
-
-    public long length() {
-        return regions().length();
-    }
-
-    public long committedLength() {
-        return committedLengthField.getValue(addr);
-    }
-
-    public Iterator<HeapRegion> heapRegionIterator() {
-        return regions().heapRegionIterator(committedLength());
-    }
-
-    public HeapRegionSeq(Address addr) {
-        super(addr);
-    }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Fri Apr 10 16:58:26 2015 -0700
@@ -152,7 +152,7 @@
 
   private long indexOffset(long index) {
     if (Assert.ASSERTS_ENABLED) {
-      Assert.that(index > 0 && index < getLength(),  "invalid cp index " + index + " " + getLength());
+      Assert.that(index >= 0 && index < getLength(),  "invalid cp index " + index + " " + getLength());
     }
     return (index * getElementSize()) + headerSize;
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java	Fri Apr 10 16:58:26 2015 -0700
@@ -98,11 +98,14 @@
            break;
        default: throw new IllegalArgumentException();
        }
+
        if (cpCache == null) {
           return (short) cpCacheIndex;
        } else if (fmt.indexOf("JJJJ") >= 0) {
-          // change byte-ordering and go via secondary cache entry
-           throw new InternalError("unimplemented");
+          // Invokedynamic require special handling
+          cpCacheIndex = ~cpCacheIndex;
+          cpCacheIndex = bytes.swapInt(cpCacheIndex);
+          return (short) cpCache.getEntryAt(cpCacheIndex).getConstantPoolIndex();
        } else if (fmt.indexOf("JJ") >= 0) {
           // change byte-ordering and go via cache
           return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort((short)cpCacheIndex))).getConstantPoolIndex();
--- a/graal/com.oracle.graal.api.meta.test/src/com/oracle/graal/api/meta/test/TestResolvedJavaType.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.api.meta.test/src/com/oracle/graal/api/meta/test/TestResolvedJavaType.java	Fri Apr 10 16:58:26 2015 -0700
@@ -678,6 +678,9 @@
         if (f.getDeclaringClass().equals(metaAccess.lookupJavaType(ConstantPool.class)) && f.getName().equals("constantPoolOop")) {
             return true;
         }
+        if (f.getDeclaringClass().equals(metaAccess.lookupJavaType(Class.class)) && f.getName().equals("classLoader")) {
+            return true;
+        }
         return false;
     }
 
@@ -693,7 +696,7 @@
                 }
                 for (ResolvedJavaField rf : actual) {
                     if (!isHiddenFromReflection(rf)) {
-                        assertEquals(lookupField(expected, rf) != null, !rf.isInternal());
+                        assertEquals(rf.toString(), lookupField(expected, rf) != null, !rf.isInternal());
                     }
                 }
 
--- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Fri Apr 10 16:58:26 2015 -0700
@@ -73,7 +73,7 @@
 /**
  * This class implements the AMD64 specific portion of the LIR generator.
  */
-public abstract class AMD64LIRGenerator extends LIRGenerator {
+public abstract class AMD64LIRGenerator extends LIRGenerator implements AMD64ArithmeticLIRGenerator {
 
     private static final RegisterValue RCX_I = AMD64.rcx.asValue(LIRKind.value(Kind.Int));
 
--- a/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/Fields.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/Fields.java	Fri Apr 10 16:58:26 2015 -0700
@@ -178,6 +178,38 @@
     }
 
     /**
+     * Gets the value of a field for a given object.
+     *
+     * @param object the object whose field is to be read
+     * @param index the index of the field (between 0 and {@link #getCount()})
+     * @return the value of the specified field which will be boxed if the field type is primitive
+     */
+    public long getRawPrimitive(Object object, int index) {
+        long offset = offsets[index];
+        Class<?> type = types[index];
+
+        if (type == Integer.TYPE) {
+            return unsafe.getInt(object, offset);
+        } else if (type == Long.TYPE) {
+            return unsafe.getLong(object, offset);
+        } else if (type == Boolean.TYPE) {
+            return unsafe.getBoolean(object, offset) ? 1 : 0;
+        } else if (type == Float.TYPE) {
+            return Float.floatToRawIntBits(unsafe.getFloat(object, offset));
+        } else if (type == Double.TYPE) {
+            return Double.doubleToRawLongBits(unsafe.getDouble(object, offset));
+        } else if (type == Short.TYPE) {
+            return unsafe.getShort(object, offset);
+        } else if (type == Character.TYPE) {
+            return unsafe.getChar(object, offset);
+        } else if (type == Byte.TYPE) {
+            return unsafe.getByte(object, offset);
+        } else {
+            throw GraalInternalError.shouldNotReachHere();
+        }
+    }
+
+    /**
      * Determines if a field in the domain of this object is the same as the field denoted by the
      * same index in another {@link Fields} object.
      */
@@ -244,6 +276,30 @@
         }
     }
 
+    public void setRawPrimitive(Object object, int index, long value) {
+        long offset = offsets[index];
+        Class<?> type = types[index];
+        if (type == Integer.TYPE) {
+            unsafe.putInt(object, offset, (int) value);
+        } else if (type == Long.TYPE) {
+            unsafe.putLong(object, offset, value);
+        } else if (type == Boolean.TYPE) {
+            unsafe.putBoolean(object, offset, value != 0);
+        } else if (type == Float.TYPE) {
+            unsafe.putFloat(object, offset, Float.intBitsToFloat((int) value));
+        } else if (type == Double.TYPE) {
+            unsafe.putDouble(object, offset, Double.longBitsToDouble(value));
+        } else if (type == Short.TYPE) {
+            unsafe.putShort(object, offset, (short) value);
+        } else if (type == Character.TYPE) {
+            unsafe.putChar(object, offset, (char) value);
+        } else if (type == Byte.TYPE) {
+            unsafe.putByte(object, offset, (byte) value);
+        } else {
+            throw GraalInternalError.shouldNotReachHere();
+        }
+    }
+
     @Override
     public String toString() {
         StringBuilder sb = new StringBuilder(getClass().getSimpleName()).append('[');
--- a/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/GraalInternalError.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/GraalInternalError.java	Fri Apr 10 16:58:26 2015 -0700
@@ -48,6 +48,10 @@
         throw new GraalInternalError("should not reach here: %s", msg);
     }
 
+    public static RuntimeException shouldNotReachHere(Throwable cause) {
+        throw new GraalInternalError(cause);
+    }
+
     /**
      * Checks a given condition and throws a {@link GraalInternalError} if it is false. Guarantees
      * are stronger than assertions in that they are always checked. Error messages for guarantee
--- a/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/GraalOptions.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/GraalOptions.java	Fri Apr 10 16:58:26 2015 -0700
@@ -202,7 +202,7 @@
     public static final OptionValue<Boolean> HotSpotPrintInlining = new OptionValue<>(false);
 
     // Register allocator debugging
-    @Option(help = "Comma separated list of register that the allocation is limited to.", type = OptionType.Debug)
+    @Option(help = "Comma separated list of registers that register allocation is limited to.", type = OptionType.Debug)
     public static final OptionValue<String> RegisterPressure = new OptionValue<>(null);
 
     @Option(help = "", type = OptionType.Debug)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/FrequencyEncoder.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.compiler.common.util;
+
+import java.util.*;
+
+/**
+ * Creates an array of T objects order by the occurrence frequency of each object. The most
+ * frequently used object is the first one, the least frequently used the last one. If {@code null}
+ * is added, it is always the first element.
+ *
+ * Either object {@link #createIdentityEncoder() identity} or object
+ * {@link #createEqualityEncoder() equality} can be used to build the array and count the frequency.
+ */
+public class FrequencyEncoder<T> {
+
+    static class Entry<T> {
+        protected final T object;
+        protected int frequency;
+        protected int index;
+
+        protected Entry(T object) {
+            this.object = object;
+            this.index = -1;
+        }
+    }
+
+    protected final Map<T, Entry<T>> map;
+
+    /**
+     * Creates an encoder that uses object identity.
+     */
+    public static <T> FrequencyEncoder<T> createIdentityEncoder() {
+        return new FrequencyEncoder<>(new IdentityHashMap<>());
+    }
+
+    /**
+     * Creates an encoder that uses {@link Object#equals(Object) object equality}.
+     */
+    public static <T> FrequencyEncoder<T> createEqualityEncoder() {
+        return new FrequencyEncoder<>(new HashMap<>());
+    }
+
+    protected FrequencyEncoder(Map<T, Entry<T>> map) {
+        this.map = map;
+    }
+
+    /**
+     * Adds an object to the array.
+     */
+    public void addObject(T object) {
+        Entry<T> entry = map.get(object);
+        if (entry == null) {
+            entry = new Entry<>(object);
+            map.put(object, entry);
+        }
+        if (object == null) {
+            /* null must get index 0, so sort it up. */
+            entry.frequency = Integer.MAX_VALUE;
+        } else {
+            entry.frequency++;
+        }
+    }
+
+    /**
+     * Returns the index of an object in the array. The object must have been
+     * {@link #addObject(Object) added} before.
+     */
+    public int getIndex(Object object) {
+        Entry<T> entry = map.get(object);
+        assert entry != null && entry.index >= 0;
+        return entry.index;
+    }
+
+    /**
+     * Returns the number of distinct objects that have been added, i.e., the length of the array.
+     */
+    public int getLength() {
+        return map.size();
+    }
+
+    /**
+     * Fills the provided array with the added objects. The array must have the {@link #getLength()
+     * correct length}.
+     */
+    public T[] encodeAll(T[] allObjects) {
+        assert allObjects.length == map.size();
+        List<Entry<T>> sortedEntries = new ArrayList<>(map.values());
+        sortedEntries.sort((e1, e2) -> -Integer.compare(e1.frequency, e2.frequency));
+        for (int i = 0; i < sortedEntries.size(); i++) {
+            Entry<T> entry = sortedEntries.get(i);
+            entry.index = i;
+            allObjects[i] = entry.object;
+            assert entry.object != null || entry.index == 0;
+        }
+        return allObjects;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/TypeConversion.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.compiler.common.util;
+
+/**
+ * Provides low-level value checks and conversion for signed and unsigned values of size 1, 2, and 4
+ * bytes.
+ */
+public class TypeConversion {
+
+    public static boolean isS1(long value) {
+        return value >= Byte.MIN_VALUE && value <= Byte.MAX_VALUE;
+    }
+
+    public static boolean isU1(long value) {
+        return value >= 0 && value <= 0xFF;
+    }
+
+    public static boolean isS2(long value) {
+        return value >= Short.MIN_VALUE && value <= Short.MAX_VALUE;
+    }
+
+    public static boolean isU2(long value) {
+        return value >= 0 && value <= 0xFFFF;
+    }
+
+    public static boolean isS4(long value) {
+        return value >= Integer.MIN_VALUE && value <= Integer.MAX_VALUE;
+    }
+
+    public static boolean isU4(long value) {
+        return value >= 0 && value <= 0xFFFFFFFFL;
+    }
+
+    public static byte asS1(long value) {
+        assert isS1(value);
+        return (byte) value;
+    }
+
+    public static byte asU1(long value) {
+        assert isU1(value);
+        return (byte) value;
+    }
+
+    public static short asS2(long value) {
+        assert isS2(value);
+        return (short) value;
+    }
+
+    public static short asU2(long value) {
+        assert isU2(value);
+        return (short) value;
+    }
+
+    public static int asS4(long value) {
+        assert isS4(value);
+        return (int) value;
+    }
+
+    public static int asU4(long value) {
+        assert isU4(value);
+        return (int) value;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/TypeReader.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.compiler.common.util;
+
+/**
+ * Provides low-level read access for signed and unsigned values of size 1, 2, 4, and 8 bytes.
+ */
+public interface TypeReader {
+
+    /** Returns the next byte index to be read. */
+    long getByteIndex();
+
+    /** Sets the next byte index to be read. */
+    void setByteIndex(long byteIndex);
+
+    /** Reads a signed 1 byte value. */
+    int getS1();
+
+    /** Reads an unsigned 1 byte value. */
+    int getU1();
+
+    /** Reads a signed 2 byte value. */
+    int getS2();
+
+    /** Reads an unsigned 2 byte value. */
+    int getU2();
+
+    /** Reads a signed 4 byte value. */
+    int getS4();
+
+    /** Reads an unsigned 4 byte value. */
+    long getU4();
+
+    /** Reads a signed 4 byte value. */
+    long getS8();
+
+    /**
+     * Reads a signed value that has been written using {@link TypeWriter#putSV variable byte size
+     * encoding}.
+     */
+    default long getSV() {
+        long result = 0;
+        int shift = 0;
+        long b;
+        do {
+            b = getU1();
+            result |= (b & 0x7f) << shift;
+            shift += 7;
+        } while ((b & 0x80) != 0);
+
+        if ((b & 0x40) != 0 && shift < 64) {
+            result |= -1L << shift;
+        }
+        return result;
+    }
+
+    /**
+     * Reads a signed variable byte size encoded value that is known to fit into the range of int.
+     */
+    default int getSVInt() {
+        return TypeConversion.asS4(getSV());
+    }
+
+    /**
+     * Reads an unsigned value that has been written using {@link TypeWriter#putSV variable byte
+     * size encoding}.
+     */
+    default long getUV() {
+        long result = 0;
+        int shift = 0;
+        long b;
+        do {
+            b = getU1();
+            result |= (b & 0x7f) << shift;
+            shift += 7;
+        } while ((b & 0x80) != 0);
+
+        return result;
+    }
+
+    /**
+     * Reads an unsigned variable byte size encoded value that is known to fit into the range of
+     * int.
+     */
+    default int getUVInt() {
+        return TypeConversion.asS4(getUV());
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/TypeWriter.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.compiler.common.util;
+
+/**
+ * Provides low-level sequential write access for signed and unsigned values of size 1, 2, 4, and 8
+ * bytes.
+ */
+public interface TypeWriter {
+
+    /**
+     * Returns the number of bytes that have been written, i.e., the byte index of the next byte to
+     * be written.
+     */
+    long getBytesWritten();
+
+    /** Writes a signed 1 byte value. */
+    void putS1(long value);
+
+    /** Writes an unsigned 1 byte value. */
+    void putU1(long value);
+
+    /** Writes a signed 2 byte value. */
+    void putS2(long value);
+
+    /** Writes an unsigned 2 byte value. */
+    void putU2(long value);
+
+    /** Writes a signed 4 byte value. */
+    void putS4(long value);
+
+    /** Writes an unsigned 4 byte value. */
+    void putU4(long value);
+
+    /** Writes a signed 8 byte value. */
+    void putS8(long value);
+
+    /**
+     * Writes a signed value in a variable byte size encoding.
+     */
+    default void putSV(long value) {
+        long cur = value;
+        while (true) {
+            if (cur >= -64 && cur < 64) {
+                putU1(cur & 0x7f);
+                return;
+            }
+            putU1(0x80 | (cur & 0x7f));
+            cur = cur >> 7;
+        }
+    }
+
+    /**
+     * Writes an unsigned value in a variable byte size encoding.
+     */
+    default void putUV(long value) {
+        long cur = value;
+        while (true) {
+            assert cur >= 0;
+            if (cur < 128) {
+                putU1(cur & 0x7f);
+                return;
+            }
+            putU1(0x80 | (cur & 0x7f));
+            cur = cur >> 7;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/UnsafeArrayTypeReader.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.compiler.common.util;
+
+import java.nio.*;
+
+import sun.misc.*;
+
+import com.oracle.graal.compiler.common.*;
+
+/**
+ * Provides low-level read access from a byte[] array for signed and unsigned values of size 1, 2,
+ * 4, and 8 bytes.
+ */
+public class UnsafeArrayTypeReader implements TypeReader {
+
+    public static int getS1(byte[] data, long byteIndex) {
+        return UnsafeAccess.unsafe.getByte(data, readOffset(data, byteIndex, Byte.BYTES));
+    }
+
+    public static int getU1(byte[] data, long byteIndex) {
+        return UnsafeAccess.unsafe.getByte(data, readOffset(data, byteIndex, Byte.BYTES)) & 0xFF;
+    }
+
+    public static int getS2(byte[] data, long byteIndex) {
+        if (byteIndex % Short.BYTES == 0) {
+            return UnsafeAccess.unsafe.getShort(data, readOffset(data, byteIndex, Short.BYTES));
+        } else {
+            ByteBuffer buf = ByteBuffer.wrap(new byte[Short.BYTES]).order(ByteOrder.nativeOrder());
+            buf.put((byte) getU1(data, byteIndex));
+            buf.put((byte) getU1(data, byteIndex + Byte.BYTES));
+            return buf.getShort(0);
+        }
+    }
+
+    public static int getU2(byte[] data, long byteIndex) {
+        return getS2(data, byteIndex) & 0xFFFF;
+    }
+
+    public static int getS4(byte[] data, long byteIndex) {
+        if (byteIndex % Integer.BYTES == 0) {
+            return UnsafeAccess.unsafe.getInt(data, readOffset(data, byteIndex, Integer.BYTES));
+        } else {
+            ByteBuffer buf = ByteBuffer.wrap(new byte[Integer.BYTES]).order(ByteOrder.nativeOrder());
+            buf.putShort((short) getS2(data, byteIndex));
+            buf.putShort((short) getS2(data, byteIndex + Short.BYTES));
+            return buf.getInt(0);
+        }
+    }
+
+    public static long getU4(byte[] data, long byteIndex) {
+        return getS4(data, byteIndex) & 0xFFFFFFFFL;
+    }
+
+    public static long getLong(byte[] data, long byteIndex) {
+        if (byteIndex % Long.BYTES == 0) {
+            return UnsafeAccess.unsafe.getLong(data, readOffset(data, byteIndex, Long.BYTES));
+        } else {
+            ByteBuffer buf = ByteBuffer.wrap(new byte[Long.BYTES]).order(ByteOrder.nativeOrder());
+            buf.putInt(getS4(data, byteIndex));
+            buf.putInt(getS4(data, byteIndex + Integer.BYTES));
+            return buf.getLong(0);
+        }
+    }
+
+    private static long readOffset(byte[] data, long byteIndex, int numBytes) {
+        assert byteIndex >= 0;
+        assert numBytes > 0;
+        assert byteIndex + numBytes <= data.length;
+        assert Unsafe.ARRAY_BYTE_INDEX_SCALE == 1;
+
+        return byteIndex + Unsafe.ARRAY_BYTE_BASE_OFFSET;
+    }
+
+    private final byte[] data;
+    private long byteIndex;
+
+    public UnsafeArrayTypeReader(byte[] data, long byteIndex) {
+        this.data = data;
+        this.byteIndex = byteIndex;
+    }
+
+    @Override
+    public long getByteIndex() {
+        return byteIndex;
+    }
+
+    @Override
+    public void setByteIndex(long byteIndex) {
+        this.byteIndex = byteIndex;
+    }
+
+    @Override
+    public int getS1() {
+        int result = getS1(data, byteIndex);
+        byteIndex += Byte.BYTES;
+        return result;
+    }
+
+    @Override
+    public int getU1() {
+        int result = getU1(data, byteIndex);
+        byteIndex += Byte.BYTES;
+        return result;
+    }
+
+    @Override
+    public int getS2() {
+        int result = getS2(data, byteIndex);
+        byteIndex += Short.BYTES;
+        return result;
+    }
+
+    @Override
+    public int getU2() {
+        int result = getU2(data, byteIndex);
+        byteIndex += Short.BYTES;
+        return result;
+    }
+
+    @Override
+    public int getS4() {
+        int result = getS4(data, byteIndex);
+        byteIndex += Integer.BYTES;
+        return result;
+    }
+
+    @Override
+    public long getU4() {
+        long result = getU4(data, byteIndex);
+        byteIndex += Integer.BYTES;
+        return result;
+    }
+
+    @Override
+    public long getS8() {
+        long result = getLong(data, byteIndex);
+        byteIndex += Long.BYTES;
+        return result;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.common/src/com/oracle/graal/compiler/common/util/UnsafeArrayTypeWriter.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.compiler.common.util;
+
+import static com.oracle.graal.compiler.common.util.TypeConversion.*;
+
+import java.nio.*;
+
+import sun.misc.*;
+
+import com.oracle.graal.compiler.common.*;
+
+/**
+ * Provides low-level sequential write access to a byte[] array for signed and unsigned values of
+ * size 1, 2, 4, and 8 bytes. To avoid copying an array when the buffer size is no longer
+ * sufficient, the buffer is split into chunks of a fixed size.
+ */
+public class UnsafeArrayTypeWriter implements TypeWriter {
+
+    private static final int CHUNK_SIZE = 4000;
+
+    static class Chunk {
+        protected final byte[] data = new byte[CHUNK_SIZE];
+        protected int size;
+        protected Chunk next;
+    }
+
+    private Chunk firstChunk;
+    private Chunk writeChunk;
+    private int totalSize;
+
+    public UnsafeArrayTypeWriter() {
+        firstChunk = new Chunk();
+        writeChunk = firstChunk;
+    }
+
+    @Override
+    public long getBytesWritten() {
+        return totalSize;
+    }
+
+    /**
+     * Copies the buffer into the provided byte[] array of length {@link #getBytesWritten()}.
+     */
+    public byte[] toArray(byte[] result) {
+        assert result.length == totalSize;
+        int resultIdx = 0;
+        for (Chunk cur = firstChunk; cur != null; cur = cur.next) {
+            System.arraycopy(cur.data, 0, result, resultIdx, cur.size);
+            resultIdx += cur.size;
+        }
+        assert resultIdx == totalSize;
+        return result;
+    }
+
+    @Override
+    public void putS1(long value) {
+        long offset = writeOffset(Byte.BYTES);
+        UnsafeAccess.unsafe.putByte(writeChunk.data, offset, asS1(value));
+        commitWrite(Byte.BYTES);
+    }
+
+    @Override
+    public void putU1(long value) {
+        long offset = writeOffset(Byte.BYTES);
+        UnsafeAccess.unsafe.putByte(writeChunk.data, offset, asU1(value));
+        commitWrite(Byte.BYTES);
+    }
+
+    @Override
+    public void putS2(long value) {
+        long offset = writeOffset(Short.BYTES);
+        if (offset % Short.BYTES == 0) {
+            UnsafeAccess.unsafe.putShort(writeChunk.data, offset, asS2(value));
+            commitWrite(Short.BYTES);
+        } else {
+            ByteBuffer buf = ByteBuffer.wrap(new byte[Short.BYTES]).order(ByteOrder.nativeOrder());
+            buf.putShort(asS2(value));
+            putS1(buf.get(0));
+            putS1(buf.get(Byte.BYTES));
+        }
+    }
+
+    @Override
+    public void putU2(long value) {
+        putS2(asU2(value));
+    }
+
+    @Override
+    public void putS4(long value) {
+        long offset = writeOffset(Integer.BYTES);
+        if (offset % Integer.BYTES == 0) {
+            UnsafeAccess.unsafe.putInt(writeChunk.data, offset, asS4(value));
+            commitWrite(Integer.BYTES);
+        } else {
+            ByteBuffer buf = ByteBuffer.wrap(new byte[Integer.BYTES]).order(ByteOrder.nativeOrder());
+            buf.putInt(asS4(value));
+            if (offset % Short.BYTES == 0) {
+                putS2(buf.getShort(0));
+                putS2(buf.getShort(2));
+            } else {
+                putS1(buf.get(0));
+                putS2(buf.getShort(1));
+                putS1(buf.get(3));
+            }
+        }
+    }
+
+    @Override
+    public void putU4(long value) {
+        putS4(asU4(value));
+    }
+
+    @Override
+    public void putS8(long value) {
+        long offset = writeOffset(Long.BYTES);
+        if (offset % Long.BYTES == 0) {
+            UnsafeAccess.unsafe.putLong(writeChunk.data, offset, value);
+            commitWrite(Long.BYTES);
+        } else {
+            ByteBuffer buf = ByteBuffer.wrap(new byte[Long.BYTES]).order(ByteOrder.nativeOrder());
+            buf.putLong(value);
+            if (offset % Integer.BYTES == 0) {
+                putS4(buf.getInt(0));
+                putS4(buf.getInt(4));
+            } else {
+                putS2(buf.getShort(0));
+                putS4(buf.getInt(2));
+                putS2(buf.getShort(6));
+            }
+        }
+    }
+
+    private long writeOffset(int writeBytes) {
+        if (writeChunk.size + writeBytes >= writeChunk.data.length) {
+            Chunk newChunk = new Chunk();
+            writeChunk.next = newChunk;
+            writeChunk = newChunk;
+        }
+
+        assert Unsafe.ARRAY_BYTE_INDEX_SCALE == 1;
+        long result = writeChunk.size + Unsafe.ARRAY_BYTE_BASE_OFFSET;
+
+        return result;
+    }
+
+    private void commitWrite(int writeBytes) {
+        totalSize += writeBytes;
+        writeChunk.size += writeBytes;
+        assert writeChunk.size <= writeChunk.data.length;
+    }
+}
--- a/graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java	Fri Apr 10 16:58:26 2015 -0700
@@ -44,6 +44,7 @@
 import com.oracle.graal.lir.sparc.SPARCArithmetic.BinaryRegReg;
 import com.oracle.graal.lir.sparc.SPARCArithmetic.MulHighOp;
 import com.oracle.graal.lir.sparc.SPARCArithmetic.RemOp;
+import com.oracle.graal.lir.sparc.SPARCArithmetic.SPARCLMulccOp;
 import com.oracle.graal.lir.sparc.SPARCArithmetic.Unary2Op;
 import com.oracle.graal.lir.sparc.SPARCCompare.CompareOp;
 import com.oracle.graal.lir.sparc.SPARCControlFlow.BranchOp;
@@ -467,34 +468,6 @@
     }
 
     @Override
-    public Value emitMathLog(Value input, boolean base10) {
-        Variable result = newVariable(LIRKind.derive(input));
-        append(new SPARCMathIntrinsicOp(LOG, result, asAllocatable(input)));
-        return result;
-    }
-
-    @Override
-    public Value emitMathCos(Value input) {
-        Variable result = newVariable(LIRKind.derive(input));
-        append(new SPARCMathIntrinsicOp(COS, result, asAllocatable(input)));
-        return result;
-    }
-
-    @Override
-    public Value emitMathSin(Value input) {
-        Variable result = newVariable(LIRKind.derive(input));
-        append(new SPARCMathIntrinsicOp(SIN, result, asAllocatable(input)));
-        return result;
-    }
-
-    @Override
-    public Value emitMathTan(Value input) {
-        Variable result = newVariable(LIRKind.derive(input));
-        append(new SPARCMathIntrinsicOp(TAN, result, asAllocatable(input)));
-        return result;
-    }
-
-    @Override
     public Variable emitByteSwap(Value input) {
         Variable result = newVariable(LIRKind.derive(input));
         append(new SPARCByteSwapOp(this, result, input));
@@ -626,7 +599,7 @@
             case Long:
                 if (setFlags) {
                     Variable result = newVariable(LIRKind.derive(a, b));
-                    append(new SPARCLMulccOp(result, a, b, this));
+                    append(new SPARCLMulccOp(result, load(a), load(b), this));
                     return result;
                 } else {
                     return emitBinary(LMUL, true, a, b);
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -49,7 +49,8 @@
 import com.oracle.graal.debug.Debug.Scope;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.graphbuilderconf.*;
-import com.oracle.graal.graphbuilderconf.GraphBuilderConfiguration.*;
+import com.oracle.graal.graphbuilderconf.GraphBuilderConfiguration.Plugins;
+import com.oracle.graal.graphbuilderconf.InvocationPlugins.Receiver;
 import com.oracle.graal.java.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.phases.*;
@@ -123,15 +124,6 @@
         return true;
     }
 
-    private static boolean substitutionsInstalled;
-
-    private void installSubstitutions() {
-        if (!substitutionsInstalled) {
-            this.providers.getReplacements().registerSubstitutions(GraalCompilerTest.class, GraalCompilerTestSubstitutions.class);
-            substitutionsInstalled = true;
-        }
-    }
-
     protected static void breakpoint() {
     }
 
@@ -180,7 +172,6 @@
         this.providers = getBackend().getProviders();
         this.suites = new DerivedOptionValue<>(this::createSuites);
         this.lirSuites = new DerivedOptionValue<>(this::createLIRSuites);
-        installSubstitutions();
     }
 
     /**
@@ -201,7 +192,6 @@
         this.providers = backend.getProviders();
         this.suites = new DerivedOptionValue<>(this::createSuites);
         this.lirSuites = new DerivedOptionValue<>(this::createLIRSuites);
-        installSubstitutions();
     }
 
     @BeforeClass
@@ -855,6 +845,13 @@
     }
 
     protected GraphBuilderConfiguration editGraphBuilderConfiguration(GraphBuilderConfiguration conf) {
+        InvocationPlugins invocationPlugins = conf.getPlugins().getInvocationPlugins();
+        invocationPlugins.register(new InvocationPlugin() {
+            public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver) {
+                b.add(new BreakpointNode());
+                return true;
+            }
+        }, GraalCompilerTest.class, "breakpoint");
         return conf;
     }
 
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTestSubstitutions.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.compiler.test;
-
-import com.oracle.graal.api.replacements.*;
-import com.oracle.graal.nodes.*;
-
-@ClassSubstitution(GraalCompilerTest.class)
-class GraalCompilerTestSubstitutions {
-
-    @MethodSubstitution
-    public static void breakpoint() {
-        BreakpointNode.breakpoint();
-    }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraphEncoderTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.compiler.test;
+
+import java.lang.reflect.*;
+import java.util.*;
+
+import org.junit.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.StructuredGraph.AllowAssumptions;
+import com.oracle.graal.phases.common.*;
+import com.oracle.graal.phases.tiers.*;
+
+public class GraphEncoderTest extends GraalCompilerTest {
+
+    @Test
+    public void test01() {
+        testStringMethods(false);
+    }
+
+    @Test
+    public void test02() {
+        testStringMethods(true);
+    }
+
+    public void testStringMethods(boolean canonicalize) {
+        /* Encode and decode all methods of java.lang.String. */
+        List<StructuredGraph> originalGraphs = new ArrayList<>();
+        for (Method method : String.class.getDeclaredMethods()) {
+            ResolvedJavaMethod javaMethod = getMetaAccess().lookupJavaMethod(method);
+            if (javaMethod.hasBytecodes()) {
+                StructuredGraph originalGraph = parseEager(javaMethod, AllowAssumptions.YES);
+                if (canonicalize) {
+                    PhaseContext context = new PhaseContext(getProviders());
+                    new CanonicalizerPhase().apply(originalGraph, context);
+                }
+                originalGraphs.add(originalGraph);
+            }
+        }
+
+        GraphEncoder encoder = new GraphEncoder();
+        for (StructuredGraph originalGraph : originalGraphs) {
+            encoder.prepare(originalGraph);
+        }
+        encoder.finishPrepare();
+        Map<StructuredGraph, Long> startOffsets = new HashMap<>();
+        for (StructuredGraph originalGraph : originalGraphs) {
+            startOffsets.put(originalGraph, encoder.encode(originalGraph));
+        }
+
+        for (StructuredGraph originalGraph : originalGraphs) {
+            EncodedGraph encodedGraph = new EncodedGraph(encoder.getEncoding(), startOffsets.get(originalGraph), encoder.getObjects(), encoder.getNodeClasses(), originalGraph.getAssumptions(),
+                            originalGraph.getInlinedMethods());
+            GraphEncoder.verifyEncoding(originalGraph, encodedGraph);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/TypeWriterTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.compiler.test;
+
+import org.junit.*;
+
+import com.oracle.graal.compiler.common.util.*;
+
+public class TypeWriterTest {
+
+    private static void putValue(TypeWriter writer, long value) {
+        if (TypeConversion.isS1(value)) {
+            writer.putS1(value);
+        }
+        if (TypeConversion.isU1(value)) {
+            writer.putU1(value);
+        }
+        if (TypeConversion.isS2(value)) {
+            writer.putS2(value);
+        }
+        if (TypeConversion.isU2(value)) {
+            writer.putU2(value);
+        }
+        if (TypeConversion.isS4(value)) {
+            writer.putS4(value);
+        }
+        if (TypeConversion.isU4(value)) {
+            writer.putU4(value);
+        }
+        writer.putS8(value);
+        writer.putSV(value);
+        if (value >= 0) {
+            writer.putUV(value);
+        }
+    }
+
+    private static void checkValue(TypeReader reader, long value) {
+        if (TypeConversion.isS1(value)) {
+            Assert.assertEquals(value, reader.getS1());
+        }
+        if (TypeConversion.isU1(value)) {
+            Assert.assertEquals(value, reader.getU1());
+        }
+        if (TypeConversion.isS2(value)) {
+            Assert.assertEquals(value, reader.getS2());
+        }
+        if (TypeConversion.isU2(value)) {
+            Assert.assertEquals(value, reader.getU2());
+        }
+        if (TypeConversion.isS4(value)) {
+            Assert.assertEquals(value, reader.getS4());
+        }
+        if (TypeConversion.isU4(value)) {
+            Assert.assertEquals(value, reader.getU4());
+        }
+        Assert.assertEquals(value, reader.getS8());
+        Assert.assertEquals(value, reader.getSV());
+        if (value >= 0) {
+            Assert.assertEquals(value, reader.getUV());
+        }
+    }
+
+    private static void putValues(TypeWriter writer) {
+        for (int i = 0; i < 64; i++) {
+            long value = 1L << i;
+            putValue(writer, value - 2);
+            putValue(writer, value - 1);
+            putValue(writer, value);
+            putValue(writer, value + 1);
+            putValue(writer, value + 2);
+
+            putValue(writer, -value - 2);
+            putValue(writer, -value - 1);
+            putValue(writer, -value);
+            putValue(writer, -value + 1);
+            putValue(writer, -value + 2);
+        }
+    }
+
+    private static void checkValues(TypeReader reader) {
+        for (int i = 0; i < 64; i++) {
+            long value = 1L << i;
+            checkValue(reader, value - 2);
+            checkValue(reader, value - 1);
+            checkValue(reader, value);
+            checkValue(reader, value + 1);
+            checkValue(reader, value + 2);
+
+            checkValue(reader, -value - 2);
+            checkValue(reader, -value - 1);
+            checkValue(reader, -value);
+            checkValue(reader, -value + 1);
+            checkValue(reader, -value + 2);
+        }
+    }
+
+    @Test
+    public void test01() {
+        UnsafeArrayTypeWriter writer = new UnsafeArrayTypeWriter();
+        putValues(writer);
+
+        byte[] array = new byte[(int) writer.getBytesWritten()];
+        writer.toArray(array);
+        UnsafeArrayTypeReader reader = new UnsafeArrayTypeReader(array, 0);
+        checkValues(reader);
+    }
+
+    private static void checkSignedSize(TypeWriter writer, long value, int expectedSize) {
+        long sizeBefore = writer.getBytesWritten();
+        writer.putSV(value);
+        Assert.assertEquals(expectedSize, writer.getBytesWritten() - sizeBefore);
+    }
+
+    private static void checkUnsignedSize(TypeWriter writer, long value, int expectedSize) {
+        long sizeBefore = writer.getBytesWritten();
+        writer.putUV(value);
+        Assert.assertEquals(expectedSize, writer.getBytesWritten() - sizeBefore);
+    }
+
+    private static void checkSizes(TypeWriter writer) {
+        checkSignedSize(writer, 0, 1);
+        checkSignedSize(writer, 63, 1);
+        checkSignedSize(writer, -64, 1);
+        checkSignedSize(writer, 64, 2);
+        checkSignedSize(writer, -65, 2);
+        checkSignedSize(writer, 8191, 2);
+        checkSignedSize(writer, -8192, 2);
+        checkSignedSize(writer, 8192, 3);
+        checkSignedSize(writer, -8193, 3);
+        checkSignedSize(writer, Long.MAX_VALUE, 10);
+        checkSignedSize(writer, Long.MIN_VALUE, 10);
+
+        checkUnsignedSize(writer, 0, 1);
+        checkUnsignedSize(writer, 127, 1);
+        checkUnsignedSize(writer, 128, 2);
+        checkUnsignedSize(writer, 16383, 2);
+        checkUnsignedSize(writer, 16384, 3);
+        checkUnsignedSize(writer, Long.MAX_VALUE, 9);
+    }
+
+    @Test
+    public void test02() {
+        checkSizes(new UnsafeArrayTypeWriter());
+    }
+}
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java	Fri Apr 10 16:58:26 2015 -0700
@@ -44,6 +44,14 @@
 
     private final Providers providers;
 
+    public static final ForeignCallDescriptor ARITHMETIC_SIN = new ForeignCallDescriptor("arithmeticSin", double.class, double.class);
+    public static final ForeignCallDescriptor ARITHMETIC_COS = new ForeignCallDescriptor("arithmeticCos", double.class, double.class);
+    public static final ForeignCallDescriptor ARITHMETIC_TAN = new ForeignCallDescriptor("arithmeticTan", double.class, double.class);
+    public static final ForeignCallDescriptor ARITHMETIC_EXP = new ForeignCallDescriptor("arithmeticExp", double.class, double.class);
+    public static final ForeignCallDescriptor ARITHMETIC_LOG = new ForeignCallDescriptor("arithmeticLog", double.class, double.class);
+    public static final ForeignCallDescriptor ARITHMETIC_LOG10 = new ForeignCallDescriptor("arithmeticLog10", double.class, double.class);
+    public static final ForeignCallDescriptor ARITHMETIC_POW = new ForeignCallDescriptor("arithmeticPow", double.class, double.class, double.class);
+
     protected Backend(Providers providers) {
         this.providers = providers;
     }
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/Edges.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/Edges.java	Fri Apr 10 16:58:26 2015 -0700
@@ -263,7 +263,7 @@
         update(node, old, value);
     }
 
-    protected abstract void update(Node node, Node oldValue, Node newValue);
+    public abstract void update(Node node, Node oldValue, Node newValue);
 
     public boolean contains(Node node, Node value) {
         final long[] curOffsets = this.offsets;
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/InputEdges.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/InputEdges.java	Fri Apr 10 16:58:26 2015 -0700
@@ -60,7 +60,7 @@
     }
 
     @Override
-    protected void update(Node node, Node oldValue, Node newValue) {
+    public void update(Node node, Node oldValue, Node newValue) {
         node.updateUsages(oldValue, newValue);
     }
 }
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/Node.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/Node.java	Fri Apr 10 16:58:26 2015 -0700
@@ -209,12 +209,12 @@
     public static final int NOT_ITERABLE = -1;
 
     public Node(NodeClass<? extends Node> c) {
-        init();
+        init(c);
+    }
+
+    final void init(NodeClass<? extends Node> c) {
         assert c.getJavaClass() == this.getClass();
         this.nodeClass = c;
-    }
-
-    final void init() {
         id = INITIAL_ID;
         extraUsages = NO_NODES;
     }
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeClass.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/NodeClass.java	Fri Apr 10 16:58:26 2015 -0700
@@ -23,6 +23,7 @@
 package com.oracle.graal.graph;
 
 import static com.oracle.graal.compiler.common.Fields.*;
+import static com.oracle.graal.compiler.common.GraalInternalError.*;
 import static com.oracle.graal.graph.Edges.*;
 import static com.oracle.graal.graph.InputEdges.*;
 import static com.oracle.graal.graph.Node.*;
@@ -663,22 +664,16 @@
     }
 
     /**
-     * Initializes a fresh allocated node for which no constructor is called yet. Needed to
-     * implement node factories in svm.
+     * Returns a newly allocated node for which no subclass-specific constructor has been called.
      */
-    public void initRawNode(Node node) {
-        node.init();
-        initNullEdgeLists(node, Edges.Type.Inputs);
-        initNullEdgeLists(node, Edges.Type.Successors);
-    }
-
-    private void initNullEdgeLists(Node node, Edges.Type type) {
-        Edges edges = getEdges(type);
-        final long[] curOffsets = edges.getOffsets();
-        for (int inputPos = edges.getDirectCount(); inputPos < edges.getCount(); inputPos++) {
-            if (Edges.getNodeList(node, curOffsets, inputPos) == null) {
-                Edges.initializeList(node, curOffsets, inputPos, type == Edges.Type.Inputs ? new NodeInputList<>(node) : new NodeSuccessorList<>(node));
-            }
+    @SuppressWarnings("unchecked")
+    public Node allocateInstance() {
+        try {
+            Node node = (Node) UnsafeAccess.unsafe.allocateInstance(getJavaClass());
+            node.init((NodeClass<? extends Node>) this);
+            return node;
+        } catch (InstantiationException ex) {
+            throw shouldNotReachHere(ex);
         }
     }
 
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/SuccessorEdges.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/SuccessorEdges.java	Fri Apr 10 16:58:26 2015 -0700
@@ -35,7 +35,7 @@
     }
 
     @Override
-    protected void update(Node node, Node oldValue, Node newValue) {
+    public void update(Node node, Node oldValue, Node newValue) {
         node.updatePredecessor(oldValue, newValue);
     }
 }
--- a/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/spi/CanonicalizerTool.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.graph/src/com/oracle/graal/graph/spi/CanonicalizerTool.java	Fri Apr 10 16:58:26 2015 -0700
@@ -23,6 +23,7 @@
 package com.oracle.graal.graph.spi;
 
 import com.oracle.graal.api.meta.*;
+import com.oracle.graal.graph.*;
 
 public interface CanonicalizerTool {
 
@@ -31,4 +32,11 @@
     ConstantReflectionProvider getConstantReflection();
 
     boolean canonicalizeReads();
+
+    /**
+     * If this method returns false, not all {@link Node#usages() usages of a node} are yet
+     * available. So a node must not be canonicalized base on, e.g., information returned from
+     * {@link Node#hasNoUsages()}.
+     */
+    boolean allUsagesAvailable();
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.graphbuilderconf/src/com/oracle/graal/graphbuilderconf/ForeignCallPlugin.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.graphbuilderconf;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.graphbuilderconf.InvocationPlugins.Receiver;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.extended.*;
+
+/**
+ * {@link InvocationPlugin} for converting a method call directly to a foreign call.
+ */
+public final class ForeignCallPlugin implements InvocationPlugin {
+    private final ForeignCallsProvider foreignCalls;
+    private final ForeignCallDescriptor descriptor;
+
+    public ForeignCallPlugin(ForeignCallsProvider foreignCalls, ForeignCallDescriptor descriptor) {
+        this.foreignCalls = foreignCalls;
+        this.descriptor = descriptor;
+    }
+
+    public boolean execute(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode[] args) {
+        b.addPush(new ForeignCallNode(foreignCalls, descriptor, args));
+        return true;
+    }
+}
--- a/graal/com.oracle.graal.graphbuilderconf/src/com/oracle/graal/graphbuilderconf/GraphBuilderContext.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.graphbuilderconf/src/com/oracle/graal/graphbuilderconf/GraphBuilderContext.java	Fri Apr 10 16:58:26 2015 -0700
@@ -22,12 +22,18 @@
  */
 package com.oracle.graal.graphbuilderconf;
 
+import static com.oracle.graal.api.meta.DeoptimizationAction.*;
+import static com.oracle.graal.api.meta.DeoptimizationReason.*;
+import static com.oracle.graal.compiler.common.type.StampFactory.*;
+
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
-import com.oracle.graal.api.replacements.*;
+import com.oracle.graal.compiler.common.type.*;
 import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
 import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.calc.*;
 import com.oracle.graal.nodes.spi.*;
+import com.oracle.graal.nodes.type.*;
 
 /**
  * Used by a {@link GraphBuilderPlugin} to interface with a graph builder object.
@@ -161,6 +167,16 @@
      */
     void handleReplacedInvoke(InvokeKind invokeKind, ResolvedJavaMethod targetMethod, ValueNode[] args);
 
+    /**
+     * Intrinsifies an invocation of a given method by inlining the bytecodes of a given
+     * substitution method.
+     *
+     * @param targetMethod the method being intrinsified
+     * @param substitute the intrinsic implementation
+     * @param args the arguments with which to inline the invocation
+     */
+    void intrinsify(ResolvedJavaMethod targetMethod, ResolvedJavaMethod substitute, ValueNode[] args);
+
     StampProvider getStampProvider();
 
     MetaAccessProvider getMetaAccess();
@@ -171,8 +187,6 @@
 
     ConstantReflectionProvider getConstantReflection();
 
-    SnippetReflectionProvider getSnippetReflection();
-
     /**
      * Gets the graph being constructed.
      */
@@ -222,7 +236,7 @@
      * Determines if the current parsing context is a snippet or method substitution.
      */
     default boolean parsingReplacement() {
-        return getReplacement() == null;
+        return getReplacement() != null;
     }
 
     /**
@@ -232,4 +246,27 @@
     Replacement getReplacement();
 
     BailoutException bailout(String string);
+
+    /**
+     * Gets a version of a given value that has a {@linkplain StampTool#isPointerNonNull(ValueNode)
+     * non-null} stamp.
+     */
+    default ValueNode nullCheckedValue(ValueNode value) {
+        if (!StampTool.isPointerNonNull(value.stamp())) {
+            IsNullNode condition = getGraph().unique(new IsNullNode(value));
+            ObjectStamp receiverStamp = (ObjectStamp) value.stamp();
+            Stamp stamp = receiverStamp.join(objectNonNull());
+            FixedGuardNode fixedGuard = append(new FixedGuardNode(condition, NullCheckException, InvalidateReprofile, true));
+            PiNode nonNullReceiver = getGraph().unique(new PiNode(value, stamp));
+            nonNullReceiver.setGuard(fixedGuard);
+            // TODO: Propogating the non-null into the frame state would
+            // remove subsequent null-checks on the same value. However,
+            // it currently causes an assertion failure when merging states.
+            //
+            // frameState.replace(value, nonNullReceiver);
+            return nonNullReceiver;
+        }
+        return value;
+    }
+
 }
--- a/graal/com.oracle.graal.graphbuilderconf/src/com/oracle/graal/graphbuilderconf/InvocationPlugin.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.graphbuilderconf/src/com/oracle/graal/graphbuilderconf/InvocationPlugin.java	Fri Apr 10 16:58:26 2015 -0700
@@ -97,60 +97,57 @@
         return defaultHandler(b, targetMethod, receiver, arg1, arg2, arg3, arg4, arg5);
     }
 
-    default ResolvedJavaMethod getSubstitute() {
-        return null;
-    }
-
     /**
-     * Executes a given plugin against a set of invocation arguments by dispatching to the
-     * {@code apply(...)} method that matches the number of arguments or to
-     * {@link #applyPolymorphic} if {@code plugin} is {@linkplain #isSignaturePolymorphic()
-     * signature polymorphic}.
+     * Executes this plugin against a set of invocation arguments.
      *
-     * @param targetMethod the method for which plugin is being applied
+     * The default implementation in {@link InvocationPlugin} dispatches to the {@code apply(...)}
+     * method that matches the number of arguments or to {@link #applyPolymorphic} if {@code plugin}
+     * is {@linkplain #isSignaturePolymorphic() signature polymorphic}.
+     *
+     * @param targetMethod the method for which this plugin is being applied
      * @param receiver access to the receiver, {@code null} if {@code targetMethod} is static
      * @param argsIncludingReceiver all arguments to the invocation include the receiver in position
      *            0 if {@code targetMethod} is not static
-     * @return {@code true} if the plugin handled the invocation of {@code targetMethod}
+     * @return {@code true} if this plugin handled the invocation of {@code targetMethod}
      *         {@code false} if the graph builder should process the invoke further (e.g., by
      *         inlining it or creating an {@link Invoke} node). A plugin that does not handle an
      *         invocation must not modify the graph being constructed.
      */
-    static boolean execute(GraphBuilderContext b, ResolvedJavaMethod targetMethod, InvocationPlugin plugin, Receiver receiver, ValueNode[] argsIncludingReceiver) {
-        if (plugin.isSignaturePolymorphic()) {
-            return plugin.applyPolymorphic(b, targetMethod, receiver, argsIncludingReceiver);
+    default boolean execute(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode[] argsIncludingReceiver) {
+        if (isSignaturePolymorphic()) {
+            return applyPolymorphic(b, targetMethod, receiver, argsIncludingReceiver);
         } else if (receiver != null) {
             assert !targetMethod.isStatic();
             assert argsIncludingReceiver.length > 0;
             if (argsIncludingReceiver.length == 1) {
-                return plugin.apply(b, targetMethod, receiver);
+                return apply(b, targetMethod, receiver);
             } else if (argsIncludingReceiver.length == 2) {
-                return plugin.apply(b, targetMethod, receiver, argsIncludingReceiver[1]);
+                return apply(b, targetMethod, receiver, argsIncludingReceiver[1]);
             } else if (argsIncludingReceiver.length == 3) {
-                return plugin.apply(b, targetMethod, receiver, argsIncludingReceiver[1], argsIncludingReceiver[2]);
+                return apply(b, targetMethod, receiver, argsIncludingReceiver[1], argsIncludingReceiver[2]);
             } else if (argsIncludingReceiver.length == 4) {
-                return plugin.apply(b, targetMethod, receiver, argsIncludingReceiver[1], argsIncludingReceiver[2], argsIncludingReceiver[3]);
+                return apply(b, targetMethod, receiver, argsIncludingReceiver[1], argsIncludingReceiver[2], argsIncludingReceiver[3]);
             } else if (argsIncludingReceiver.length == 5) {
-                return plugin.apply(b, targetMethod, receiver, argsIncludingReceiver[1], argsIncludingReceiver[2], argsIncludingReceiver[3], argsIncludingReceiver[4]);
+                return apply(b, targetMethod, receiver, argsIncludingReceiver[1], argsIncludingReceiver[2], argsIncludingReceiver[3], argsIncludingReceiver[4]);
             } else {
-                return plugin.defaultHandler(b, targetMethod, receiver, argsIncludingReceiver);
+                return defaultHandler(b, targetMethod, receiver, argsIncludingReceiver);
             }
         } else {
             assert targetMethod.isStatic();
             if (argsIncludingReceiver.length == 0) {
-                return plugin.apply(b, targetMethod, null);
+                return apply(b, targetMethod, null);
             } else if (argsIncludingReceiver.length == 1) {
-                return plugin.apply(b, targetMethod, null, argsIncludingReceiver[0]);
+                return apply(b, targetMethod, null, argsIncludingReceiver[0]);
             } else if (argsIncludingReceiver.length == 2) {
-                return plugin.apply(b, targetMethod, null, argsIncludingReceiver[0], argsIncludingReceiver[1]);
+                return apply(b, targetMethod, null, argsIncludingReceiver[0], argsIncludingReceiver[1]);
             } else if (argsIncludingReceiver.length == 3) {
-                return plugin.apply(b, targetMethod, null, argsIncludingReceiver[0], argsIncludingReceiver[1], argsIncludingReceiver[2]);
+                return apply(b, targetMethod, null, argsIncludingReceiver[0], argsIncludingReceiver[1], argsIncludingReceiver[2]);
             } else if (argsIncludingReceiver.length == 4) {
-                return plugin.apply(b, targetMethod, null, argsIncludingReceiver[0], argsIncludingReceiver[1], argsIncludingReceiver[2], argsIncludingReceiver[3]);
+                return apply(b, targetMethod, null, argsIncludingReceiver[0], argsIncludingReceiver[1], argsIncludingReceiver[2], argsIncludingReceiver[3]);
             } else if (argsIncludingReceiver.length == 5) {
-                return plugin.apply(b, targetMethod, null, argsIncludingReceiver[0], argsIncludingReceiver[1], argsIncludingReceiver[2], argsIncludingReceiver[3], argsIncludingReceiver[4]);
+                return apply(b, targetMethod, null, argsIncludingReceiver[0], argsIncludingReceiver[1], argsIncludingReceiver[2], argsIncludingReceiver[3], argsIncludingReceiver[4]);
             } else {
-                return plugin.defaultHandler(b, targetMethod, receiver, argsIncludingReceiver);
+                return defaultHandler(b, targetMethod, receiver, argsIncludingReceiver);
             }
 
         }
--- a/graal/com.oracle.graal.graphbuilderconf/src/com/oracle/graal/graphbuilderconf/InvocationPlugins.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.graphbuilderconf/src/com/oracle/graal/graphbuilderconf/InvocationPlugins.java	Fri Apr 10 16:58:26 2015 -0700
@@ -63,6 +63,42 @@
         }
     }
 
+    public static class InvocationPluginReceiver implements Receiver {
+        private final GraphBuilderContext parser;
+        private ValueNode[] args;
+        private ValueNode value;
+
+        public InvocationPluginReceiver(GraphBuilderContext parser) {
+            this.parser = parser;
+        }
+
+        @Override
+        public ValueNode get() {
+            assert args != null : "Cannot get the receiver of a static method";
+            if (value == null) {
+                value = parser.nullCheckedValue(args[0]);
+                if (value != args[0]) {
+                    args[0] = value;
+                }
+            }
+            return value;
+        }
+
+        @Override
+        public boolean isConstant() {
+            return args[0].isConstant();
+        }
+
+        public InvocationPluginReceiver init(ResolvedJavaMethod targetMethod, ValueNode[] newArgs) {
+            if (!targetMethod.isStatic()) {
+                this.args = newArgs;
+                this.value = null;
+                return this;
+            }
+            return null;
+        }
+    }
+
     /**
      * Utility for
      * {@linkplain InvocationPlugins#register(InvocationPlugin, Class, String, Class...)
@@ -145,6 +181,18 @@
         public void register5(String name, Class<?> arg1, Class<?> arg2, Class<?> arg3, Class<?> arg4, Class<?> arg5, InvocationPlugin plugin) {
             plugins.register(plugin, declaringClass, name, arg1, arg2, arg3, arg4, arg5);
         }
+
+        /**
+         * Registers a plugin that implements a method based on the bytecode of a substitute method.
+         *
+         * @param substituteDeclaringClass the class declaring the substitute method
+         * @param name the name of both the original and substitute method
+         * @param argumentTypes the parameter types of the substitute
+         */
+        public void registerMethodSubstitution(Class<?> substituteDeclaringClass, String name, Class<?>... argumentTypes) {
+            MethodSubstitutionPlugin plugin = new MethodSubstitutionPlugin(substituteDeclaringClass, name, argumentTypes);
+            plugins.register(plugin, declaringClass, name, argumentTypes);
+        }
     }
 
     static final class MethodInfo {
@@ -165,6 +213,7 @@
             if (!isStatic) {
                 argumentTypes[0] = declaringClass;
             }
+            assert resolveJava() != null;
         }
 
         @Override
@@ -185,16 +234,20 @@
         }
 
         ResolvedJavaMethod resolve(MetaAccessProvider metaAccess) {
+            return metaAccess.lookupJavaMethod(resolveJava());
+        }
+
+        Executable resolveJava() {
             try {
-                ResolvedJavaMethod method;
+                Executable res;
                 Class<?>[] parameterTypes = isStatic ? argumentTypes : Arrays.copyOfRange(argumentTypes, 1, argumentTypes.length);
                 if (name.equals("<init>")) {
-                    method = metaAccess.lookupJavaMethod(declaringClass.getDeclaredConstructor(parameterTypes));
+                    res = declaringClass.getDeclaredConstructor(parameterTypes);
                 } else {
-                    method = metaAccess.lookupJavaMethod(declaringClass.getDeclaredMethod(name, parameterTypes));
+                    res = declaringClass.getDeclaredMethod(name, parameterTypes);
                 }
-                assert method.isStatic() == isStatic;
-                return method;
+                assert Modifier.isStatic(res.getModifiers()) == isStatic;
+                return res;
             } catch (NoSuchMethodException | SecurityException e) {
                 throw new GraalInternalError(e);
             }
@@ -373,6 +426,14 @@
                 assert !p.registrations.contains(method) : "a plugin is already registered for " + method;
                 p = p.parent;
             }
+            if (plugin instanceof ForeignCallPlugin) {
+                return true;
+            }
+            if (plugin instanceof MethodSubstitutionPlugin) {
+                MethodSubstitutionPlugin msplugin = (MethodSubstitutionPlugin) plugin;
+                msplugin.getJavaSubstitute();
+                return true;
+            }
             int arguments = method.isStatic ? method.argumentTypes.length : method.argumentTypes.length - 1;
             assert arguments < SIGS.length : format("need to extend %s to support method with %d arguments: %s", InvocationPlugin.class.getSimpleName(), arguments, method);
             for (Method m : plugin.getClass().getDeclaredMethods()) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.graphbuilderconf/src/com/oracle/graal/graphbuilderconf/MethodSubstitutionPlugin.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.graphbuilderconf;
+
+import java.lang.reflect.*;
+import java.util.*;
+import java.util.stream.*;
+
+import sun.misc.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.*;
+import com.oracle.graal.graphbuilderconf.InvocationPlugins.Receiver;
+import com.oracle.graal.nodes.*;
+
+/**
+ * An {@link InvocationPlugin} for a method where the implementation of the method is provided by a
+ * {@linkplain #getSubstitute(MetaAccessProvider) substitute} method. A substitute method must be
+ * static even if the substituted method is not.
+ */
+public final class MethodSubstitutionPlugin implements InvocationPlugin {
+
+    private ResolvedJavaMethod cachedSubstitute;
+    private final Class<?> declaringClass;
+    private final String name;
+    private final Class<?>[] parameters;
+    private final boolean originalIsStatic;
+
+    /**
+     * Creates a method substitution plugin.
+     *
+     * @param declaringClass the class in which the substitute method is declared
+     * @param name the name of the substitute method
+     * @param parameters the parameter types of the substitute method. If the original method is not
+     *            static, then {@code parameters[0]} must be the {@link Class} value denoting
+     *            {@link Receiver}
+     */
+    public MethodSubstitutionPlugin(Class<?> declaringClass, String name, Class<?>... parameters) {
+        this.declaringClass = declaringClass;
+        this.name = name;
+        this.parameters = parameters;
+        this.originalIsStatic = parameters.length == 0 || parameters[0] != Receiver.class;
+    }
+
+    /**
+     * Gets the substitute method, resolving it first if necessary.
+     */
+    public ResolvedJavaMethod getSubstitute(MetaAccessProvider metaAccess) {
+        if (cachedSubstitute == null) {
+            cachedSubstitute = metaAccess.lookupJavaMethod(getJavaSubstitute());
+        }
+        return cachedSubstitute;
+    }
+
+    /**
+     * Gets the reflection API version of the substitution method.
+     */
+    Method getJavaSubstitute() throws GraalInternalError {
+        Method substituteMethod = lookupSubstitute();
+        int modifiers = substituteMethod.getModifiers();
+        if (Modifier.isAbstract(modifiers) || Modifier.isNative(modifiers)) {
+            throw new GraalInternalError("Substitution method must not be abstract or native: " + substituteMethod);
+        }
+        if (!Modifier.isStatic(modifiers)) {
+            throw new GraalInternalError("Substitution method must be static: " + substituteMethod);
+        }
+        return substituteMethod;
+    }
+
+    /**
+     * Determines if a given method is the substitute method of this plugin.
+     */
+    private boolean isSubstitute(Method m) {
+        if (Modifier.isStatic(m.getModifiers()) && m.getName().equals(name)) {
+            if (parameters.length == m.getParameterCount()) {
+                Class<?>[] mparams = m.getParameterTypes();
+                int start = 0;
+                if (!originalIsStatic) {
+                    start = 1;
+                    if (!mparams[0].isAssignableFrom(parameters[0])) {
+                        return false;
+                    }
+                }
+                for (int i = start; i < mparams.length; i++) {
+                    if (mparams[i] != parameters[i]) {
+                        return false;
+                    }
+                }
+            }
+            return true;
+        }
+        return false;
+    }
+
+    /**
+     * Gets the substitute method of this plugin.
+     */
+    private Method lookupSubstitute() {
+        for (Method m : declaringClass.getDeclaredMethods()) {
+            if (isSubstitute(m)) {
+                return m;
+            }
+        }
+        throw new GraalInternalError("No method found in %s compatible with the signature (%s)", declaringClass.getName(), Arrays.asList(parameters).stream().map(c -> c.getSimpleName()).collect(
+                        Collectors.joining(",")));
+    }
+
+    /**
+     * Resolves a name to a class.
+     *
+     * @param className the name of the class to resolve
+     * @param optional if true, resolution failure returns null
+     * @return the resolved class or null if resolution fails and {@code optional} is true
+     */
+    public static Class<?> resolveClass(String className, boolean optional) {
+        try {
+            // Need to use launcher class path to handle classes
+            // that are not on the boot class path
+            ClassLoader cl = Launcher.getLauncher().getClassLoader();
+            return Class.forName(className, false, cl);
+        } catch (ClassNotFoundException e) {
+            if (optional) {
+                return null;
+            }
+            throw new GraalInternalError("Could not resolve type " + className);
+        }
+    }
+
+    @Override
+    public boolean execute(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode[] argsIncludingReceiver) {
+        ResolvedJavaMethod subst = getSubstitute(b.getMetaAccess());
+        if (receiver != null) {
+            receiver.get();
+        }
+        b.intrinsify(targetMethod, subst, argsIncludingReceiver);
+        return true;
+    }
+
+    public StackTraceElement getApplySourceLocation(MetaAccessProvider metaAccess) {
+        Class<?> c = getClass();
+        for (Method m : c.getDeclaredMethods()) {
+            if (m.getName().equals("execute")) {
+                return metaAccess.lookupJavaMethod(m).asStackTraceElement(0);
+            }
+        }
+        throw new GraalInternalError("could not find method named \"execute\" in " + c.getName());
+    }
+}
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Fri Apr 10 16:58:26 2015 -0700
@@ -41,7 +41,6 @@
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.hotspot.meta.HotSpotCodeCacheProvider.MarkId;
 import com.oracle.graal.hotspot.meta.*;
-import com.oracle.graal.hotspot.nfi.*;
 import com.oracle.graal.hotspot.stubs.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
@@ -50,7 +49,6 @@
 import com.oracle.graal.lir.gen.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.spi.*;
-import com.oracle.nfi.api.*;
 
 /**
  * HotSpot AMD64 specific backend.
@@ -326,18 +324,4 @@
         }
     }
 
-    /**
-     * Called from the VM.
-     */
-    public static NativeFunctionInterface createNativeFunctionInterface() {
-        HotSpotVMConfig config = HotSpotGraalRuntime.runtime().getConfig();
-        RawNativeCallNodeFactory factory = new RawNativeCallNodeFactory() {
-            public FixedWithNextNode createRawCallNode(Kind returnType, JavaConstant functionPointer, ValueNode... args) {
-                return new AMD64RawNativeCallNode(returnType, functionPointer, args);
-            }
-        };
-        Backend backend = HotSpotGraalRuntime.runtime().getHostBackend();
-        return new HotSpotNativeFunctionInterface(HotSpotGraalRuntime.runtime().getHostProviders(), factory, backend, config.dllLoad, config.dllLookup, config.rtldDefault);
-    }
-
 }
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackendFactory.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackendFactory.java	Fri Apr 10 16:58:26 2015 -0700
@@ -36,6 +36,7 @@
 import com.oracle.graal.hotspot.meta.*;
 import com.oracle.graal.hotspot.word.*;
 import com.oracle.graal.phases.util.*;
+import com.oracle.graal.replacements.amd64.*;
 
 @ServiceProvider(HotSpotBackendFactory.class)
 public class AMD64HotSpotBackendFactory implements HotSpotBackendFactory {
@@ -187,7 +188,9 @@
     protected Plugins createGraphBuilderPlugins(HotSpotGraalRuntimeProvider runtime, TargetDescription target, HotSpotConstantReflectionProvider constantReflection,
                     HotSpotHostForeignCallsProvider foreignCalls, HotSpotMetaAccessProvider metaAccess, HotSpotSnippetReflectionProvider snippetReflection, HotSpotReplacementsImpl replacements,
                     HotSpotWordTypes wordTypes, HotSpotStampProvider stampProvider) {
-        return HotSpotGraphBuilderPlugins.create(runtime.getConfig(), wordTypes, metaAccess, constantReflection, snippetReflection, foreignCalls, stampProvider, replacements, target.arch);
+        Plugins plugins = HotSpotGraphBuilderPlugins.create(runtime.getConfig(), wordTypes, metaAccess, constantReflection, snippetReflection, foreignCalls, stampProvider, replacements);
+        AMD64GraphBuilderPlugins.register(plugins, foreignCalls, (AMD64) target.arch);
+        return plugins;
     }
 
     protected AMD64HotSpotBackend createBackend(HotSpotGraalRuntimeProvider runtime, HotSpotProviders providers) {
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackendFactory.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackendFactory.java	Fri Apr 10 16:58:26 2015 -0700
@@ -33,6 +33,7 @@
 import com.oracle.graal.hotspot.word.*;
 import com.oracle.graal.nodes.spi.*;
 import com.oracle.graal.phases.util.*;
+import com.oracle.graal.replacements.sparc.*;
 import com.oracle.graal.sparc.*;
 import com.oracle.graal.sparc.SPARC.CPUFeature;
 
@@ -68,7 +69,7 @@
         HotSpotReplacementsImpl replacements = new HotSpotReplacementsImpl(p, snippetReflection, runtime.getConfig(), target);
         HotSpotDisassemblerProvider disassembler = new HotSpotDisassemblerProvider(runtime);
         HotSpotWordTypes wordTypes = new HotSpotWordTypes(metaAccess, target.wordKind);
-        Plugins plugins = createGraphBuilderPlugins(runtime, target, metaAccess, constantReflection, foreignCalls, stampProvider, snippetReflection, replacements, wordTypes);
+        Plugins plugins = createGraphBuilderPlugins(runtime, metaAccess, constantReflection, foreignCalls, stampProvider, snippetReflection, replacements, wordTypes);
         replacements.setGraphBuilderPlugins(plugins);
         HotSpotSuitesProvider suites = createSuites(runtime, plugins);
         HotSpotProviders providers = new HotSpotProviders(metaAccess, codeCache, constantReflection, foreignCalls, lowerer, replacements, disassembler, suites, registers, snippetReflection,
@@ -77,10 +78,12 @@
         return createBackend(runtime, providers);
     }
 
-    protected Plugins createGraphBuilderPlugins(HotSpotGraalRuntimeProvider runtime, TargetDescription target, HotSpotMetaAccessProvider metaAccess,
-                    HotSpotConstantReflectionProvider constantReflection, HotSpotForeignCallsProvider foreignCalls, HotSpotStampProvider stampProvider,
-                    HotSpotSnippetReflectionProvider snippetReflection, HotSpotReplacementsImpl replacements, HotSpotWordTypes wordTypes) {
-        return HotSpotGraphBuilderPlugins.create(runtime.getConfig(), wordTypes, metaAccess, constantReflection, snippetReflection, foreignCalls, stampProvider, replacements, target.arch);
+    protected Plugins createGraphBuilderPlugins(HotSpotGraalRuntimeProvider runtime, HotSpotMetaAccessProvider metaAccess, HotSpotConstantReflectionProvider constantReflection,
+                    HotSpotForeignCallsProvider foreignCalls, HotSpotStampProvider stampProvider, HotSpotSnippetReflectionProvider snippetReflection, HotSpotReplacementsImpl replacements,
+                    HotSpotWordTypes wordTypes) {
+        Plugins plugins = HotSpotGraphBuilderPlugins.create(runtime.getConfig(), wordTypes, metaAccess, constantReflection, snippetReflection, foreignCalls, stampProvider, replacements);
+        SPARCGraphBuilderPlugins.register(plugins, foreignCalls);
+        return plugins;
     }
 
     protected HotSpotSuitesProvider createSuites(HotSpotGraalRuntimeProvider runtime, Plugins plugins) {
--- a/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLIRGenerator.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLIRGenerator.java	Fri Apr 10 16:58:26 2015 -0700
@@ -129,7 +129,7 @@
             assert deoptInfo != null || getStub() != null;
         }
 
-        if (hotspotLinkage.needsJavaFrameAnchor()) {
+        if (linkage.destroysRegisters() || hotspotLinkage.needsJavaFrameAnchor()) {
             HotSpotRegistersProvider registers = getProviders().getRegisters();
             Register thread = registers.getThreadRegister();
             Value threadTemp = newVariable(LIRKind.value(Kind.Long));
--- a/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/HotSpotCryptoSubstitutionTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/HotSpotCryptoSubstitutionTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -22,6 +22,8 @@
  */
 package com.oracle.graal.hotspot.test;
 
+import static com.oracle.graal.java.AbstractBytecodeParser.IntrinsicContext.*;
+
 import java.io.*;
 import java.lang.reflect.*;
 import java.security.*;
@@ -125,13 +127,13 @@
             Method method = lookup(className, methodName);
             if (method != null) {
                 ResolvedJavaMethod installedCodeOwner = getMetaAccess().lookupJavaMethod(method);
-                StructuredGraph subst = getReplacements().getSubstitution(installedCodeOwner, true);
+                StructuredGraph subst = getReplacements().getSubstitution(installedCodeOwner);
                 ResolvedJavaMethod substMethod = subst == null ? null : subst.method();
                 if (substMethod != null) {
                     StructuredGraph graph = new StructuredGraph(substMethod, AllowAssumptions.YES);
                     Plugins plugins = new Plugins(((HotSpotProviders) getProviders()).getGraphBuilderPlugins());
                     GraphBuilderConfiguration config = GraphBuilderConfiguration.getSnippetDefault(plugins);
-                    IntrinsicContext initialReplacementContext = new IntrinsicContext(installedCodeOwner, substMethod, null, -2);
+                    IntrinsicContext initialReplacementContext = new IntrinsicContext(installedCodeOwner, substMethod, null, ROOT_COMPILATION_BCI);
                     new GraphBuilderPhase.Instance(getMetaAccess(), getProviders().getStampProvider(), getConstantReflection(), config, OptimisticOptimizations.NONE, initialReplacementContext).apply(graph);
                     Assert.assertNotNull(getCode(installedCodeOwner, graph, true));
                     atLeastOneCompiled = true;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/CompileTheWorld.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/CompileTheWorld.java	Fri Apr 10 16:58:26 2015 -0700
@@ -322,6 +322,10 @@
                         continue;
                     }
 
+                    if (dottedClassName.startsWith("jdk.management.") || dottedClassName.startsWith("jdk.internal.cmm.*")) {
+                        continue;
+                    }
+
                     try {
                         // Load and initialize class
                         Class<?> javaClass = Class.forName(dottedClassName, true, loader);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotVMConfig.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1255,8 +1255,8 @@
     @HotSpotVMType(name = "BasicLock", get = HotSpotVMType.Type.SIZE) @Stable public int basicLockSize;
     @HotSpotVMField(name = "BasicLock::_displaced_header", type = "markOop", get = HotSpotVMField.Type.OFFSET) @Stable public int basicLockDisplacedHeaderOffset;
 
-    @HotSpotVMValue(expression = "Universe::heap()->end_addr()", get = HotSpotVMValue.Type.ADDRESS) @Stable public long heapEndAddress;
-    @HotSpotVMValue(expression = "Universe::heap()->top_addr()", get = HotSpotVMValue.Type.ADDRESS) @Stable public long heapTopAddress;
+    @HotSpotVMValue(expression = "Universe::heap()->supports_inline_contig_alloc() ? Universe::heap()->end_addr() : (HeapWord**)-1", get = HotSpotVMValue.Type.ADDRESS) @Stable public long heapEndAddress;
+    @HotSpotVMValue(expression = "Universe::heap()->supports_inline_contig_alloc() ? Universe::heap()->top_addr() : (HeapWord**)-1", get = HotSpotVMValue.Type.ADDRESS) @Stable public long heapTopAddress;
 
     @HotSpotVMField(name = "Thread::_allocated_bytes", type = "jlong", get = HotSpotVMField.Type.OFFSET) @Stable public int threadAllocatedBytesOffset;
 
@@ -1444,6 +1444,9 @@
     @HotSpotVMValue(expression = "SharedRuntime::dsin", get = HotSpotVMValue.Type.ADDRESS) @Stable public long arithmeticSinAddress;
     @HotSpotVMValue(expression = "SharedRuntime::dcos", get = HotSpotVMValue.Type.ADDRESS) @Stable public long arithmeticCosAddress;
     @HotSpotVMValue(expression = "SharedRuntime::dtan", get = HotSpotVMValue.Type.ADDRESS) @Stable public long arithmeticTanAddress;
+    @HotSpotVMValue(expression = "SharedRuntime::dexp", get = HotSpotVMValue.Type.ADDRESS) @Stable public long arithmeticExpAddress;
+    @HotSpotVMValue(expression = "SharedRuntime::dlog", get = HotSpotVMValue.Type.ADDRESS) @Stable public long arithmeticLogAddress;
+    @HotSpotVMValue(expression = "SharedRuntime::dlog10", get = HotSpotVMValue.Type.ADDRESS) @Stable public long arithmeticLog10Address;
     @HotSpotVMValue(expression = "SharedRuntime::dpow", get = HotSpotVMValue.Type.ADDRESS) @Stable public long arithmeticPowAddress;
 
     @HotSpotVMValue(expression = "(jint) GraalCounterSize") @Stable public int graalCountersSize;
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/CompilerToVM.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/CompilerToVM.java	Fri Apr 10 16:58:26 2015 -0700
@@ -23,11 +23,14 @@
 
 package com.oracle.graal.hotspot.bridge;
 
+import sun.misc.*;
+
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.hotspot.meta.*;
+import com.oracle.graal.hotspotvmconfig.*;
 
 /**
  * Calls from Java into HotSpot.
@@ -290,7 +293,19 @@
 
     long readUnsafeKlassPointer(Object o);
 
-    Object readUnsafeOop(Object base, long displacement, boolean compressed);
+    /**
+     * Reads an object pointer within a VM data structures. That is, any {@link HotSpotVMField}
+     * whose {@link HotSpotVMField#type() type} is {@code "oop"} (e.g.,
+     * {@code ArrayKlass::_component_mirror}, {@code Klass::_java_mirror},
+     * {@code JavaThread::_threadObj}).
+     *
+     * Note that {@link Unsafe#getObject(Object, long)} cannot be used for this since it does a
+     * {@code narrowOop} read if the VM is using compressed oops whereas oops within VM data
+     * structures are (currently) always uncompressed.
+     *
+     * @param address address of an oop field within a VM data structure
+     */
+    Object readUncompressedOop(long address);
 
     void doNotInlineOrCompile(long metaspaceMethod);
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/CompilerToVMImpl.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/bridge/CompilerToVMImpl.java	Fri Apr 10 16:58:26 2015 -0700
@@ -160,7 +160,7 @@
     public native long readUnsafeKlassPointer(Object o);
 
     @Override
-    public native Object readUnsafeOop(Object base, long displacement, boolean compressed);
+    public native Object readUncompressedOop(long address);
 
     @Override
     public native void doNotInlineOrCompile(long metaspaceMethod);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotGraphBuilderPlugins.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotGraphBuilderPlugins.java	Fri Apr 10 16:58:26 2015 -0700
@@ -23,6 +23,7 @@
 package com.oracle.graal.hotspot.meta;
 
 import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*;
+import static com.oracle.graal.hotspot.replacements.SystemSubstitutions.*;
 
 import java.lang.invoke.*;
 
@@ -31,9 +32,8 @@
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
 import com.oracle.graal.api.replacements.*;
-import com.oracle.graal.compiler.common.type.*;
+import com.oracle.graal.graphbuilderconf.*;
 import com.oracle.graal.graphbuilderconf.GraphBuilderConfiguration.Plugins;
-import com.oracle.graal.graphbuilderconf.*;
 import com.oracle.graal.graphbuilderconf.InvocationPlugins.Receiver;
 import com.oracle.graal.graphbuilderconf.InvocationPlugins.Registration;
 import com.oracle.graal.hotspot.*;
@@ -65,7 +65,7 @@
      * @param stampProvider
      */
     public static Plugins create(HotSpotVMConfig config, HotSpotWordTypes wordTypes, MetaAccessProvider metaAccess, ConstantReflectionProvider constantReflection,
-                    SnippetReflectionProvider snippetReflection, ForeignCallsProvider foreignCalls, StampProvider stampProvider, ReplacementsImpl replacements, Architecture arch) {
+                    SnippetReflectionProvider snippetReflection, ForeignCallsProvider foreignCalls, StampProvider stampProvider, ReplacementsImpl replacements) {
         InvocationPlugins invocationPlugins = new HotSpotInvocationPlugins(config, metaAccess, constantReflection.getMethodHandleAccess());
 
         Plugins plugins = new Plugins(invocationPlugins);
@@ -85,7 +85,8 @@
         registerCallSitePlugins(invocationPlugins);
         registerReflectionPlugins(invocationPlugins);
         registerStableOptionPlugins(invocationPlugins);
-        StandardGraphBuilderPlugins.registerInvocationPlugins(metaAccess, arch, invocationPlugins, !config.useHeapProfiler);
+        registerAESPlugins(invocationPlugins, config);
+        StandardGraphBuilderPlugins.registerInvocationPlugins(metaAccess, invocationPlugins, !config.useHeapProfiler);
 
         return plugins;
     }
@@ -99,6 +100,7 @@
                 return true;
             }
         });
+        r.registerMethodSubstitution(ObjectSubstitutions.class, "hashCode", Receiver.class);
     }
 
     private static void registerClassPlugins(InvocationPlugins plugins) {
@@ -162,21 +164,11 @@
 
     private static void registerSystemPlugins(InvocationPlugins plugins, ForeignCallsProvider foreignCalls) {
         Registration r = new Registration(plugins, System.class);
-        r.register0("currentTimeMillis", new InvocationPlugin() {
-            public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver) {
-                b.addPush(Kind.Long, new ForeignCallNode(foreignCalls, SystemSubstitutions.JAVA_TIME_MILLIS, StampFactory.forKind(Kind.Long)));
-                return true;
-            }
-        });
-        r.register0("nanoTime", new InvocationPlugin() {
-            public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver) {
-                b.addPush(Kind.Long, new ForeignCallNode(foreignCalls, SystemSubstitutions.JAVA_TIME_NANOS, StampFactory.forKind(Kind.Long)));
-                return true;
-            }
-        });
+        r.register0("currentTimeMillis", new ForeignCallPlugin(foreignCalls, JAVA_TIME_MILLIS));
+        r.register0("nanoTime", new ForeignCallPlugin(foreignCalls, JAVA_TIME_NANOS));
         r.register1("identityHashCode", Object.class, new InvocationPlugin() {
             public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode object) {
-                b.addPush(new SystemIdentityHashCodeNode(b.getInvokeKind(), targetMethod, b.bci(), b.getInvokeReturnType(), object));
+                b.addPush(new IdentityHashCodeNode(b.getInvokeKind(), targetMethod, b.bci(), b.getInvokeReturnType(), object));
                 return true;
             }
         });
@@ -218,4 +210,25 @@
             }
         });
     }
+
+    private static void registerAESPlugins(InvocationPlugins plugins, HotSpotVMConfig config) {
+        if (config.useAESIntrinsics) {
+            assert config.aescryptEncryptBlockStub != 0L;
+            assert config.aescryptDecryptBlockStub != 0L;
+            assert config.cipherBlockChainingEncryptAESCryptStub != 0L;
+            assert config.cipherBlockChainingDecryptAESCryptStub != 0L;
+            Class<?> c = MethodSubstitutionPlugin.resolveClass("com.sun.crypto.provider.CipherBlockChaining", true);
+            if (c != null) {
+                Registration r = new Registration(plugins, c);
+                r.registerMethodSubstitution(CipherBlockChainingSubstitutions.class, "encrypt", Receiver.class, byte[].class, int.class, int.class, byte[].class, int.class);
+                r.registerMethodSubstitution(CipherBlockChainingSubstitutions.class, "decrypt", Receiver.class, byte[].class, int.class, int.class, byte[].class, int.class);
+            }
+            c = MethodSubstitutionPlugin.resolveClass("com.sun.crypto.provider.AESCrypt", true);
+            if (c != null) {
+                Registration r = new Registration(plugins, c);
+                r.registerMethodSubstitution(AESCryptSubstitutions.class, "encryptBlock", Receiver.class, byte[].class, int.class, byte[].class, int.class);
+                r.registerMethodSubstitution(AESCryptSubstitutions.class, "decryptBlock", Receiver.class, byte[].class, int.class, byte[].class, int.class);
+            }
+        }
+    }
 }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotHostForeignCallsProvider.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotHostForeignCallsProvider.java	Fri Apr 10 16:58:26 2015 -0700
@@ -44,7 +44,6 @@
 import static com.oracle.graal.hotspot.stubs.UnwindExceptionToCallerStub.*;
 import static com.oracle.graal.nodes.java.ForeignCallDescriptors.*;
 import static com.oracle.graal.replacements.Log.*;
-import static com.oracle.graal.replacements.MathSubstitutionsX86.*;
 
 import java.util.*;
 
@@ -150,6 +149,9 @@
         registerForeignCall(ARITHMETIC_SIN, c.arithmeticSinAddress, NativeCall, DESTROYS_REGISTERS, LEAF, REEXECUTABLE, NO_LOCATIONS);
         registerForeignCall(ARITHMETIC_COS, c.arithmeticCosAddress, NativeCall, DESTROYS_REGISTERS, LEAF, REEXECUTABLE, NO_LOCATIONS);
         registerForeignCall(ARITHMETIC_TAN, c.arithmeticTanAddress, NativeCall, DESTROYS_REGISTERS, LEAF, REEXECUTABLE, NO_LOCATIONS);
+        registerForeignCall(ARITHMETIC_EXP, c.arithmeticExpAddress, NativeCall, DESTROYS_REGISTERS, LEAF, REEXECUTABLE, NO_LOCATIONS);
+        registerForeignCall(ARITHMETIC_LOG, c.arithmeticLogAddress, NativeCall, DESTROYS_REGISTERS, LEAF, REEXECUTABLE, NO_LOCATIONS);
+        registerForeignCall(ARITHMETIC_LOG10, c.arithmeticLog10Address, NativeCall, DESTROYS_REGISTERS, LEAF, REEXECUTABLE, NO_LOCATIONS);
         registerForeignCall(ARITHMETIC_POW, c.arithmeticPowAddress, NativeCall, DESTROYS_REGISTERS, LEAF, REEXECUTABLE, NO_LOCATIONS);
         registerForeignCall(LOAD_AND_CLEAR_EXCEPTION, c.loadAndClearExceptionAddress, NativeCall, DESTROYS_REGISTERS, LEAF_NOFP, NOT_REEXECUTABLE, any());
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotInvocationPlugins.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotInvocationPlugins.java	Fri Apr 10 16:58:26 2015 -0700
@@ -61,19 +61,6 @@
                 return;
             }
         }
-        if (!config.useCountLeadingZerosInstruction) {
-            if (name.equals("numberOfLeadingZeros")) {
-                assert declaringClass.equals(Integer.class) || declaringClass.equals(Long.class);
-                return;
-            }
-        }
-        if (!config.useCountTrailingZerosInstruction) {
-            if (name.equals("numberOfTrailingZeros")) {
-                assert declaringClass.equals(Integer.class);
-                return;
-            }
-        }
-
         if (config.useHeapProfiler) {
             if (plugin instanceof BoxPlugin) {
                 // The heap profiler wants to see all allocations related to boxing
@@ -150,7 +137,7 @@
     public void checkNewNodes(GraphBuilderContext b, InvocationPlugin plugin, NodeIterable<Node> newNodes) {
         if (GraalOptions.ImmutableCode.getValue()) {
             for (Node node : newNodes) {
-                if (node instanceof ConstantNode) {
+                if (node.hasUsages() && node instanceof ConstantNode) {
                     ConstantNode c = (ConstantNode) node;
                     if (c.getKind() == Kind.Object && !AheadOfTimeVerificationPhase.isLegalObjectConstant(c)) {
                         throw new AssertionError("illegal constant node in AOT: " + node);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotMemoryAccessProviderImpl.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotMemoryAccessProviderImpl.java	Fri Apr 10 16:58:26 2015 -0700
@@ -133,13 +133,17 @@
     private Object readRawObject(Constant baseConstant, long initialDisplacement, boolean compressed) {
         long displacement = initialDisplacement;
 
+        Object ret;
         Object base = asObject(baseConstant);
         if (base == null) {
+            assert !compressed;
             displacement += asRawPointer(baseConstant);
+            ret = runtime.getCompilerToVM().readUncompressedOop(displacement);
+        } else {
+            assert runtime.getConfig().useCompressedOops == compressed;
+            ret = unsafe.getObject(base, displacement);
         }
-        Object ret = runtime.getCompilerToVM().readUnsafeOop(base, displacement, compressed);
         assert verifyReadRawObject(ret, baseConstant, initialDisplacement, compressed);
-
         return ret;
     }
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotResolvedJavaFieldImpl.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotResolvedJavaFieldImpl.java	Fri Apr 10 16:58:26 2015 -0700
@@ -149,9 +149,13 @@
 
     @Override
     public JavaType getType() {
-        if (type instanceof HotSpotUnresolvedJavaType) {
+        // Pull field into local variable to prevent a race causing
+        // a ClassCastException below
+        JavaType currentType = type;
+        if (currentType instanceof HotSpotUnresolvedJavaType) {
             // Don't allow unresolved types to hang around forever
-            ResolvedJavaType resolved = ((HotSpotUnresolvedJavaType) type).reresolve(holder);
+            HotSpotUnresolvedJavaType unresolvedType = (HotSpotUnresolvedJavaType) currentType;
+            ResolvedJavaType resolved = unresolvedType.reresolve(holder);
             if (resolved != null) {
                 type = resolved;
             }
@@ -198,12 +202,18 @@
         return null;
     }
 
+    private Field toJavaCache;
+
     private Field toJava() {
+        if (toJavaCache != null) {
+            return toJavaCache;
+        }
+
         if (isInternal()) {
             return null;
         }
         try {
-            return holder.mirror().getDeclaredField(name);
+            return toJavaCache = holder.mirror().getDeclaredField(name);
         } catch (NoSuchFieldException | NoClassDefFoundError e) {
             return null;
         }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nfi/HotSpotNativeFunctionHandle.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.hotspot.nfi;
-
-import java.util.*;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.debug.Debug.Scope;
-import com.oracle.nfi.api.*;
-
-public class HotSpotNativeFunctionHandle implements NativeFunctionHandle {
-
-    private final InstalledCode code;
-    private final String name;
-    private final Class<?>[] argumentTypes;
-
-    public HotSpotNativeFunctionHandle(InstalledCode code, String name, Class<?>... argumentTypes) {
-        this.argumentTypes = argumentTypes;
-        this.name = name;
-        this.code = code;
-    }
-
-    private void traceCall(Object... args) {
-        try (Scope s = Debug.scope("GNFI")) {
-            if (Debug.isLogEnabled()) {
-                Debug.log("[GNFI] %s%s", name, Arrays.toString(args));
-            }
-        }
-    }
-
-    private void traceResult(Object result) {
-        try (Scope s = Debug.scope("GNFI")) {
-            if (Debug.isLogEnabled()) {
-                Debug.log("[GNFI] %s --> %s", name, result);
-            }
-        }
-    }
-
-    @Override
-    public Object call(Object... args) {
-        assert checkArgs(args);
-        try {
-            traceCall(args);
-            Object res = code.executeVarargs(args, null, null);
-            traceResult(res);
-            return res;
-        } catch (InvalidInstalledCodeException e) {
-            throw GraalInternalError.shouldNotReachHere("Execution of GNFI Callstub failed: " + name);
-        }
-    }
-
-    private boolean checkArgs(Object... args) {
-        assert args.length == argumentTypes.length : this + " expected " + argumentTypes.length + " args, got " + args.length;
-        for (int i = 0; i < argumentTypes.length; i++) {
-            Object arg = args[i];
-            assert arg != null;
-            Class<?> expectedType = argumentTypes[i];
-            if (expectedType.isPrimitive()) {
-                Kind kind = Kind.fromJavaClass(expectedType);
-                expectedType = kind.toBoxedJavaClass();
-            }
-            assert expectedType == arg.getClass() : this + " expected arg " + i + " to be " + expectedType.getName() + ", not " + arg.getClass().getName();
-
-        }
-        return true;
-    }
-
-    @Override
-    public String toString() {
-        return name + Arrays.toString(argumentTypes);
-    }
-}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nfi/HotSpotNativeFunctionInterface.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,201 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.hotspot.nfi;
-
-import static com.oracle.graal.api.code.CodeUtil.*;
-import static com.oracle.graal.compiler.common.UnsafeAccess.*;
-import static com.oracle.graal.hotspot.nfi.NativeCallStubGraphBuilder.*;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.code.CallingConvention.Type;
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.*;
-import com.oracle.graal.compiler.target.*;
-import com.oracle.graal.debug.*;
-import com.oracle.graal.debug.Debug.Scope;
-import com.oracle.graal.hotspot.*;
-import com.oracle.graal.hotspot.meta.*;
-import com.oracle.graal.lir.asm.*;
-import com.oracle.graal.lir.phases.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.phases.*;
-import com.oracle.graal.phases.tiers.*;
-import com.oracle.nfi.api.*;
-
-public class HotSpotNativeFunctionInterface implements NativeFunctionInterface {
-
-    private final HotSpotProviders providers;
-    private final Backend backend;
-    private final HotSpotNativeLibraryHandle rtldDefault;
-    private final HotSpotNativeFunctionPointer libraryLoadFunctionPointer;
-    private final HotSpotNativeFunctionPointer functionLookupFunctionPointer;
-    private final RawNativeCallNodeFactory factory;
-
-    private HotSpotNativeFunctionHandle libraryLookupFunctionHandle;
-    private HotSpotNativeFunctionHandle dllLookupFunctionHandle;
-
-    public HotSpotNativeFunctionInterface(HotSpotProviders providers, RawNativeCallNodeFactory factory, Backend backend, long dlopen, long dlsym, long rtldDefault) {
-        this.rtldDefault = rtldDefault == HotSpotVMConfig.INVALID_RTLD_DEFAULT_HANDLE ? null : new HotSpotNativeLibraryHandle("RTLD_DEFAULT", rtldDefault);
-        this.providers = providers;
-        assert backend != null;
-        this.backend = backend;
-        this.factory = factory;
-        this.libraryLoadFunctionPointer = new HotSpotNativeFunctionPointer(dlopen, "os::dll_load");
-        this.functionLookupFunctionPointer = new HotSpotNativeFunctionPointer(dlsym, "os::dll_lookup");
-    }
-
-    @Override
-    public HotSpotNativeLibraryHandle getLibraryHandle(String libPath) {
-        if (libraryLookupFunctionHandle == null) {
-            libraryLookupFunctionHandle = createHandle(libraryLoadFunctionPointer, long.class, long.class, long.class, int.class);
-        }
-
-        int ebufLen = 1024;
-        // Allocating a single chunk for both the error message buffer and the
-        // file name simplifies deallocation below.
-        long buffer = unsafe.allocateMemory(ebufLen + libPath.length() + 1);
-        long ebuf = buffer;
-        long libPathCString = writeCString(libPath, buffer + ebufLen);
-        try {
-            long handle = (long) libraryLookupFunctionHandle.call(libPathCString, ebuf, ebufLen);
-            if (handle == 0) {
-                throw new UnsatisfiedLinkError(libPath);
-            }
-            return new HotSpotNativeLibraryHandle(libPath, handle);
-        } finally {
-            unsafe.freeMemory(buffer);
-        }
-    }
-
-    @Override
-    public HotSpotNativeFunctionHandle getFunctionHandle(NativeLibraryHandle library, String name, Class<?> returnType, Class<?>... argumentTypes) {
-        HotSpotNativeFunctionPointer functionPointer = lookupFunctionPointer(name, library, false);
-        return createHandle(functionPointer, returnType, argumentTypes);
-    }
-
-    @Override
-    public HotSpotNativeFunctionHandle getFunctionHandle(NativeLibraryHandle[] libraries, String name, Class<?> returnType, Class<?>... argumentTypes) {
-        HotSpotNativeFunctionPointer functionPointer = null;
-        for (NativeLibraryHandle libraryHandle : libraries) {
-            functionPointer = lookupFunctionPointer(name, libraryHandle, false);
-            if (functionPointer != null) {
-                return createHandle(functionPointer, returnType, argumentTypes);
-            }
-        }
-        // Fall back to default library path
-        return getFunctionHandle(name, returnType, argumentTypes);
-    }
-
-    @Override
-    public HotSpotNativeFunctionHandle getFunctionHandle(String name, Class<?> returnType, Class<?>... argumentTypes) {
-        if (rtldDefault == null) {
-            throw new UnsatisfiedLinkError(name);
-        }
-        return getFunctionHandle(rtldDefault, name, returnType, argumentTypes);
-    }
-
-    private HotSpotNativeFunctionPointer lookupFunctionPointer(String name, NativeLibraryHandle library, boolean linkageErrorIfMissing) {
-        if (name == null || library == null) {
-            throw new NullPointerException();
-        }
-
-        if (dllLookupFunctionHandle == null) {
-            dllLookupFunctionHandle = createHandle(functionLookupFunctionPointer, long.class, long.class, long.class);
-        }
-        long nameCString = createCString(name);
-        try {
-            long functionPointer = (long) dllLookupFunctionHandle.call(((HotSpotNativeLibraryHandle) library).value, nameCString);
-            if (functionPointer == 0L) {
-                if (!linkageErrorIfMissing) {
-                    return null;
-                }
-                throw new UnsatisfiedLinkError(name);
-            }
-            return new HotSpotNativeFunctionPointer(functionPointer, name);
-        } finally {
-            unsafe.freeMemory(nameCString);
-        }
-    }
-
-    @Override
-    public HotSpotNativeFunctionHandle getFunctionHandle(NativeFunctionPointer functionPointer, Class<?> returnType, Class<?>... argumentTypes) {
-        if (!(functionPointer instanceof HotSpotNativeFunctionPointer)) {
-            throw new UnsatisfiedLinkError(functionPointer.getName());
-        }
-        return createHandle(functionPointer, returnType, argumentTypes);
-    }
-
-    private HotSpotNativeFunctionHandle createHandle(NativeFunctionPointer functionPointer, Class<?> returnType, Class<?>... argumentTypes) {
-        HotSpotNativeFunctionPointer hs = (HotSpotNativeFunctionPointer) functionPointer;
-        if (hs != null) {
-            InstalledCode code = installNativeFunctionStub(hs.value, returnType, argumentTypes);
-            return new HotSpotNativeFunctionHandle(code, hs.name, argumentTypes);
-        } else {
-            return null;
-        }
-    }
-
-    /**
-     * Creates and installs a stub for calling a native function.
-     */
-    private InstalledCode installNativeFunctionStub(long functionPointer, Class<?> returnType, Class<?>... argumentTypes) {
-        StructuredGraph g = getGraph(providers, factory, functionPointer, returnType, argumentTypes);
-        Suites suites = providers.getSuites().createSuites();
-        LIRSuites lirSuites = providers.getSuites().createLIRSuites();
-        PhaseSuite<HighTierContext> phaseSuite = backend.getSuites().getDefaultGraphBuilderSuite().copy();
-        CallingConvention cc = getCallingConvention(providers.getCodeCache(), Type.JavaCallee, g.method(), false);
-        CompilationResult compResult = GraalCompiler.compileGraph(g, cc, g.method(), providers, backend, backend.getTarget(), phaseSuite, OptimisticOptimizations.ALL,
-                        DefaultProfilingInfo.get(TriState.UNKNOWN), null, suites, lirSuites, new CompilationResult(), CompilationResultBuilderFactory.Default);
-        InstalledCode installedCode;
-        try (Scope s = Debug.scope("CodeInstall", providers.getCodeCache(), g.method())) {
-            installedCode = providers.getCodeCache().addMethod(g.method(), compResult, null, null);
-        } catch (Throwable e) {
-            throw Debug.handle(e);
-        }
-        return installedCode;
-    }
-
-    @Override
-    public HotSpotNativeFunctionPointer getFunctionPointer(NativeLibraryHandle[] libraries, String name) {
-        for (NativeLibraryHandle libraryHandle : libraries) {
-            HotSpotNativeFunctionPointer functionPointer = lookupFunctionPointer(name, libraryHandle, false);
-            if (functionPointer != null) {
-                return functionPointer;
-            }
-        }
-        // Fall back to default library path
-        if (rtldDefault == null) {
-            throw new UnsatisfiedLinkError(name);
-        }
-        return lookupFunctionPointer(name, rtldDefault, false);
-    }
-
-    public boolean isDefaultLibrarySearchSupported() {
-        return rtldDefault != null;
-    }
-
-    @Override
-    public NativeFunctionPointer getNativeFunctionPointerFromRawValue(long rawValue) {
-        return new HotSpotNativeFunctionPointer(rawValue, null);
-    }
-}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nfi/HotSpotNativeFunctionPointer.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.hotspot.nfi;
-
-import com.oracle.nfi.api.*;
-
-public class HotSpotNativeFunctionPointer implements NativeFunctionPointer {
-
-    final long value;
-    final String name;
-
-    public HotSpotNativeFunctionPointer(long value, String name) {
-        if (value == 0) {
-            throw new UnsatisfiedLinkError(name);
-        }
-        this.value = value;
-        this.name = name;
-    }
-
-    public String getName() {
-        return name;
-    }
-
-    public long getRawValue() {
-        return value;
-    }
-
-    @Override
-    public String toString() {
-        return name + "@0x" + Long.toHexString(value);
-    }
-}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nfi/HotSpotNativeLibraryHandle.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.hotspot.nfi;
-
-import com.oracle.nfi.api.*;
-
-public class HotSpotNativeLibraryHandle implements NativeLibraryHandle {
-
-    final long value;
-    final String name;
-
-    public HotSpotNativeLibraryHandle(String name, long handle) {
-        this.name = name;
-        this.value = handle;
-    }
-
-    public String getName() {
-        return name;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (obj instanceof HotSpotNativeLibraryHandle) {
-            HotSpotNativeLibraryHandle that = (HotSpotNativeLibraryHandle) obj;
-            return that.value == value;
-        }
-        return false;
-    }
-
-    @Override
-    public int hashCode() {
-        return (int) value;
-    }
-
-    @Override
-    public String toString() {
-        return name + "@" + value;
-    }
-}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nfi/NativeCallStubGraphBuilder.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,178 +0,0 @@
-/*
- * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.hotspot.nfi;
-
-import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*;
-
-import java.util.*;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.*;
-import com.oracle.graal.compiler.common.type.*;
-import com.oracle.graal.hotspot.meta.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.StructuredGraph.AllowAssumptions;
-import com.oracle.graal.nodes.extended.*;
-import com.oracle.graal.nodes.java.*;
-import com.oracle.graal.nodes.virtual.*;
-
-/**
- * Utility creating a graph for a stub used to call a native function.
- */
-public class NativeCallStubGraphBuilder {
-
-    /**
-     * Creates a graph for a stub used to call a native function.
-     *
-     * @param functionPointer a native function pointer
-     * @param returnType the type of the return value
-     * @param argumentTypes the types of the arguments
-     * @return the graph that represents the call stub
-     */
-    public static StructuredGraph getGraph(HotSpotProviders providers, RawNativeCallNodeFactory factory, long functionPointer, Class<?> returnType, Class<?>... argumentTypes) {
-        try {
-            ResolvedJavaMethod method = providers.getMetaAccess().lookupJavaMethod(NativeCallStubGraphBuilder.class.getMethod("libCall", Object.class, Object.class, Object.class));
-            StructuredGraph g = new StructuredGraph(method, AllowAssumptions.NO);
-            ParameterNode arg0 = g.unique(new ParameterNode(0, StampFactory.forKind(Kind.Object)));
-            ParameterNode arg1 = g.unique(new ParameterNode(1, StampFactory.forKind(Kind.Object)));
-            ParameterNode arg2 = g.unique(new ParameterNode(2, StampFactory.forKind(Kind.Object)));
-            FrameState frameState = g.add(new FrameState(null, method, 0, Arrays.asList(new ValueNode[]{arg0, arg1, arg2}), 3, 0, false, false, null, new ArrayList<EscapeObjectState>()));
-            g.start().setStateAfter(frameState);
-            List<ValueNode> parameters = new ArrayList<>();
-            FixedWithNextNode fixedWithNext = getParameters(g, arg0, argumentTypes.length, argumentTypes, parameters, providers);
-            JavaConstant functionPointerNode = JavaConstant.forLong(functionPointer);
-
-            ValueNode[] arguments = new ValueNode[parameters.size()];
-
-            for (int i = 0; i < arguments.length; i++) {
-                arguments[i] = parameters.get(i);
-            }
-
-            FixedWithNextNode callNode = g.add(factory.createRawCallNode(getKind(returnType), functionPointerNode, arguments));
-
-            if (fixedWithNext == null) {
-                g.start().setNext(callNode);
-            } else {
-                fixedWithNext.setNext(callNode);
-            }
-
-            // box result
-            BoxNode boxedResult;
-            if (callNode.getKind() != Kind.Void) {
-                if (callNode.getKind() == Kind.Object) {
-                    throw new IllegalArgumentException("Return type not supported: " + returnType.getName());
-                }
-                ResolvedJavaType type = providers.getMetaAccess().lookupJavaType(callNode.getKind().toBoxedJavaClass());
-                boxedResult = g.unique(new BoxNode(callNode, type, callNode.getKind()));
-            } else {
-                boxedResult = g.unique(new BoxNode(ConstantNode.forLong(0, g), providers.getMetaAccess().lookupJavaType(Long.class), Kind.Long));
-            }
-
-            ReturnNode returnNode = g.add(new ReturnNode(boxedResult));
-            callNode.setNext(returnNode);
-            return g;
-        } catch (NoSuchMethodException e) {
-            throw GraalInternalError.shouldNotReachHere("Call Stub method not found");
-        }
-    }
-
-    private static FixedWithNextNode getParameters(StructuredGraph g, ParameterNode argumentsArray, int numArgs, Class<?>[] argumentTypes, List<ValueNode> args, HotSpotProviders providers) {
-        assert numArgs == argumentTypes.length;
-        FixedWithNextNode last = null;
-        for (int i = 0; i < numArgs; i++) {
-            // load boxed array element:
-            LoadIndexedNode boxedElement = g.add(new LoadIndexedNode(argumentsArray, ConstantNode.forInt(i, g), Kind.Object));
-            if (i == 0) {
-                g.start().setNext(boxedElement);
-                last = boxedElement;
-            } else {
-                last.setNext(boxedElement);
-                last = boxedElement;
-            }
-            Class<?> type = argumentTypes[i];
-            Kind kind = getKind(type);
-            if (kind == Kind.Object) {
-                // array value
-                Kind arrayElementKind = getElementKind(type);
-                LocationIdentity locationIdentity = NamedLocationIdentity.getArrayLocation(arrayElementKind);
-                int displacement = runtime().getArrayBaseOffset(arrayElementKind);
-                ConstantNode index = ConstantNode.forInt(0, g);
-                int indexScaling = runtime().getArrayIndexScale(arrayElementKind);
-                IndexedLocationNode locationNode = g.unique(new IndexedLocationNode(locationIdentity, displacement, index, indexScaling));
-                Stamp wordStamp = StampFactory.forKind(providers.getWordTypes().getWordKind());
-                ComputeAddressNode arrayAddress = g.unique(new ComputeAddressNode(boxedElement, locationNode, wordStamp));
-                args.add(arrayAddress);
-            } else {
-                // boxed primitive value
-                try {
-                    ResolvedJavaField field = providers.getMetaAccess().lookupJavaField(kind.toBoxedJavaClass().getDeclaredField("value"));
-                    LoadFieldNode loadFieldNode = g.add(new LoadFieldNode(boxedElement, field));
-                    last.setNext(loadFieldNode);
-                    last = loadFieldNode;
-                    args.add(loadFieldNode);
-                } catch (NoSuchFieldException e) {
-                    throw new GraalInternalError(e);
-                }
-            }
-        }
-        return last;
-    }
-
-    public static Kind getElementKind(Class<?> clazz) {
-        Class<?> componentType = clazz.getComponentType();
-        if (componentType == null) {
-            throw new IllegalArgumentException("Parameter type not supported: " + clazz);
-        }
-        if (componentType.isPrimitive()) {
-            return Kind.fromJavaClass(componentType);
-        }
-        throw new IllegalArgumentException("Parameter type not supported: " + clazz);
-    }
-
-    private static Kind getKind(Class<?> clazz) {
-        if (clazz == int.class || clazz == Integer.class) {
-            return Kind.Int;
-        } else if (clazz == long.class || clazz == Long.class) {
-            return Kind.Long;
-        } else if (clazz == char.class || clazz == Character.class) {
-            return Kind.Char;
-        } else if (clazz == byte.class || clazz == Byte.class) {
-            return Kind.Byte;
-        } else if (clazz == float.class || clazz == Float.class) {
-            return Kind.Float;
-        } else if (clazz == double.class || clazz == Double.class) {
-            return Kind.Double;
-        } else if (clazz == int[].class || clazz == long[].class || clazz == char[].class || clazz == byte[].class || clazz == float[].class || clazz == double[].class) {
-            return Kind.Object;
-        } else if (clazz == void.class) {
-            return Kind.Void;
-        } else {
-            throw new IllegalArgumentException("Type not supported: " + clazz);
-        }
-    }
-
-    @SuppressWarnings("unused")
-    public static Object libCall(Object argLoc, Object unused1, Object unused2) {
-        throw GraalInternalError.shouldNotReachHere("GNFI libCall method must not be called");
-    }
-}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nfi/RawNativeCallNodeFactory.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.hotspot.nfi;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.nodes.*;
-
-/**
- * Factory for creating a node that makes a direct call to a native function pointer.
- */
-public interface RawNativeCallNodeFactory {
-    FixedWithNextNode createRawCallNode(Kind returnType, JavaConstant functionPointer, ValueNode... args);
-}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/SnippetAnchorNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/SnippetAnchorNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -42,7 +42,7 @@
         AbstractBeginNode prevBegin = AbstractBeginNode.prevBegin(this);
         replaceAtUsages(InputType.Anchor, prevBegin);
         replaceAtUsages(InputType.Guard, prevBegin);
-        if (hasNoUsages()) {
+        if (tool.allUsagesAvailable() && hasNoUsages()) {
             graph().removeFixed(this);
         }
     }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/AESCryptSubstitutions.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/AESCryptSubstitutions.java	Fri Apr 10 16:58:26 2015 -0700
@@ -28,11 +28,9 @@
 import sun.misc.*;
 
 import com.oracle.graal.api.meta.*;
-import com.oracle.graal.api.replacements.*;
 import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.graph.Node.ConstantNodeParameter;
 import com.oracle.graal.graph.Node.NodeIntrinsic;
-import com.oracle.graal.hotspot.*;
 import com.oracle.graal.hotspot.nodes.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.extended.*;
@@ -41,27 +39,8 @@
 /**
  * Substitutions for {@code com.sun.crypto.provider.AESCrypt} methods.
  */
-@ClassSubstitution(className = "com.sun.crypto.provider.AESCrypt", optional = true, defaultGuard = AESCryptSubstitutions.Guard.class)
 public class AESCryptSubstitutions {
 
-    public static class Guard implements SubstitutionGuard {
-        private HotSpotVMConfig config;
-
-        public Guard(HotSpotVMConfig config) {
-            this.config = config;
-        }
-
-        public boolean execute() {
-            if (config.useAESIntrinsics) {
-                assert config.aescryptEncryptBlockStub != 0L;
-                assert config.aescryptDecryptBlockStub != 0L;
-                assert config.cipherBlockChainingEncryptAESCryptStub != 0L;
-                assert config.cipherBlockChainingDecryptAESCryptStub != 0L;
-            }
-            return config.useAESIntrinsics;
-        }
-    }
-
     static final long kOffset;
     static final Class<?> AESCryptClass;
 
@@ -77,12 +56,10 @@
         }
     }
 
-    @MethodSubstitution(isStatic = false)
     static void encryptBlock(Object rcvr, byte[] in, int inOffset, byte[] out, int outOffset) {
         crypt(rcvr, in, inOffset, out, outOffset, true);
     }
 
-    @MethodSubstitution(isStatic = false)
     static void decryptBlock(Object rcvr, byte[] in, int inOffset, byte[] out, int outOffset) {
         crypt(rcvr, in, inOffset, out, outOffset, false);
     }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/CheckCastDynamicSnippets.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/CheckCastDynamicSnippets.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,7 +67,7 @@
 
     public static class Templates extends AbstractTemplates {
 
-        private final SnippetInfo dynamic = snippet(CheckCastDynamicSnippets.class, "checkcastDynamic");
+        private final SnippetInfo dynamic = snippet(CheckCastDynamicSnippets.class, "checkcastDynamic", SECONDARY_SUPER_CACHE_LOCATION);
 
         public Templates(HotSpotProviders providers, TargetDescription target) {
             super(providers, providers.getSnippetReflection(), target);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/CipherBlockChainingSubstitutions.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/CipherBlockChainingSubstitutions.java	Fri Apr 10 16:58:26 2015 -0700
@@ -39,7 +39,6 @@
 /**
  * Substitutions for {@code com.sun.crypto.provider.CipherBlockChaining} methods.
  */
-@ClassSubstitution(className = "com.sun.crypto.provider.CipherBlockChaining", optional = true, defaultGuard = AESCryptSubstitutions.Guard.class)
 public class CipherBlockChainingSubstitutions {
 
     private static final long embeddedCipherOffset;
@@ -67,7 +66,6 @@
         return AESCryptSubstitutions.AESCryptClass;
     }
 
-    @MethodSubstitution(isStatic = false)
     static int encrypt(Object rcvr, byte[] in, int inOffset, int inLength, byte[] out, int outOffset) {
         Object realReceiver = PiNode.piCastNonNull(rcvr, cipherBlockChainingClass);
         Object embeddedCipher = UnsafeLoadNode.load(realReceiver, embeddedCipherOffset, Kind.Object, LocationIdentity.any());
@@ -80,7 +78,6 @@
         }
     }
 
-    @MethodSubstitution(isStatic = false)
     static int decrypt(Object rcvr, byte[] in, int inOffset, int inLength, byte[] out, int outOffset) {
         Object realReceiver = PiNode.piCastNonNull(rcvr, cipherBlockChainingClass);
         Object embeddedCipher = UnsafeLoadNode.load(realReceiver, embeddedCipherOffset, Kind.Object, LocationIdentity.any());
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/ClassGetHubNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/ClassGetHubNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -57,7 +57,7 @@
 
     @Override
     public Node canonical(CanonicalizerTool tool) {
-        if (hasNoUsages()) {
+        if (tool.allUsagesAvailable() && hasNoUsages()) {
             return null;
         } else {
             if (clazz.isConstant()) {
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotSubstitutions.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HotSpotSubstitutions.java	Fri Apr 10 16:58:26 2015 -0700
@@ -22,7 +22,6 @@
  */
 package com.oracle.graal.hotspot.replacements;
 
-import java.lang.reflect.*;
 import java.util.zip.*;
 
 import sun.misc.*;
@@ -39,22 +38,8 @@
 @ServiceProvider(ReplacementsProvider.class)
 public class HotSpotSubstitutions implements ReplacementsProvider {
 
-    static class NamedType implements Type {
-        private final String name;
-
-        public NamedType(String name) {
-            this.name = name;
-        }
-
-        @Override
-        public String toString() {
-            return name;
-        }
-    }
-
     @Override
     public void registerReplacements(MetaAccessProvider metaAccess, LoweringProvider loweringProvider, SnippetReflectionProvider snippetReflection, Replacements replacements, TargetDescription target) {
-        replacements.registerSubstitutions(Object.class, ObjectSubstitutions.class);
         replacements.registerSubstitutions(System.class, SystemSubstitutions.class);
         replacements.registerSubstitutions(Thread.class, ThreadSubstitutions.class);
         replacements.registerSubstitutions(Unsafe.class, UnsafeSubstitutions.class);
@@ -62,7 +47,5 @@
         replacements.registerSubstitutions(CRC32.class, CRC32Substitutions.class);
         replacements.registerSubstitutions(Reflection.class, ReflectionSubstitutions.class);
         replacements.registerSubstitutions(CompilerToVMImpl.class, CompilerToVMImplSubstitutions.class);
-        replacements.registerSubstitutions(new NamedType("com.sun.crypto.provider.AESCrypt"), AESCryptSubstitutions.class);
-        replacements.registerSubstitutions(new NamedType("com.sun.crypto.provider.CipherBlockChaining"), CipherBlockChainingSubstitutions.class);
     }
 }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HubGetClassNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/HubGetClassNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -53,7 +53,7 @@
 
     @Override
     public Node canonical(CanonicalizerTool tool) {
-        if (hasNoUsages()) {
+        if (tool.allUsagesAvailable() && hasNoUsages()) {
             return null;
         } else {
             MetaAccessProvider metaAccess = tool.getMetaAccess();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/IdentityHashCodeNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.hotspot.replacements;
+
+import static com.oracle.graal.compiler.common.GraalOptions.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.hotspot.meta.*;
+import com.oracle.graal.nodeinfo.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
+import com.oracle.graal.replacements.nodes.*;
+
+@NodeInfo
+public final class IdentityHashCodeNode extends PureFunctionMacroNode {
+
+    public static final NodeClass<IdentityHashCodeNode> TYPE = NodeClass.create(IdentityHashCodeNode.class);
+
+    public IdentityHashCodeNode(InvokeKind invokeKind, ResolvedJavaMethod targetMethod, int bci, JavaType returnType, ValueNode object) {
+        super(TYPE, invokeKind, targetMethod, bci, returnType, object);
+    }
+
+    @Override
+    protected JavaConstant evaluate(JavaConstant param, MetaAccessProvider metaAccess) {
+        if (ImmutableCode.getValue() || param.isNull()) {
+            return null;
+        }
+        HotSpotObjectConstant c = (HotSpotObjectConstant) param;
+        return JavaConstant.forInt(c.getIdentityHashCode());
+    }
+}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/InstanceOfSnippets.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/InstanceOfSnippets.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -216,9 +216,9 @@
         private final SnippetInfo instanceofWithProfile = snippet(InstanceOfSnippets.class, "instanceofWithProfile");
         private final SnippetInfo instanceofExact = snippet(InstanceOfSnippets.class, "instanceofExact");
         private final SnippetInfo instanceofPrimary = snippet(InstanceOfSnippets.class, "instanceofPrimary");
-        private final SnippetInfo instanceofSecondary = snippet(InstanceOfSnippets.class, "instanceofSecondary");
-        private final SnippetInfo instanceofDynamic = snippet(InstanceOfSnippets.class, "instanceofDynamic");
-        private final SnippetInfo isAssignableFrom = snippet(InstanceOfSnippets.class, "isAssignableFrom");
+        private final SnippetInfo instanceofSecondary = snippet(InstanceOfSnippets.class, "instanceofSecondary", SECONDARY_SUPER_CACHE_LOCATION);
+        private final SnippetInfo instanceofDynamic = snippet(InstanceOfSnippets.class, "instanceofDynamic", SECONDARY_SUPER_CACHE_LOCATION);
+        private final SnippetInfo isAssignableFrom = snippet(InstanceOfSnippets.class, "isAssignableFrom", SECONDARY_SUPER_CACHE_LOCATION);
 
         public Templates(HotSpotProviders providers, TargetDescription target) {
             super(providers, providers.getSnippetReflection(), target);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/KlassLayoutHelperNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/KlassLayoutHelperNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -80,7 +80,7 @@
 
     @Override
     public Node canonical(CanonicalizerTool tool) {
-        if (hasNoUsages()) {
+        if (tool.allUsagesAvailable() && hasNoUsages()) {
             return null;
         } else {
             if (klass.isConstant()) {
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/LoadExceptionObjectSnippets.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/LoadExceptionObjectSnippets.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,7 +69,7 @@
 
     public static class Templates extends AbstractTemplates {
 
-        private final SnippetInfo loadException = snippet(LoadExceptionObjectSnippets.class, "loadException");
+        private final SnippetInfo loadException = snippet(LoadExceptionObjectSnippets.class, "loadException", EXCEPTION_OOP_LOCATION, EXCEPTION_PC_LOCATION);
 
         public Templates(HotSpotProviders providers, TargetDescription target) {
             super(providers, providers.getSnippetReflection(), target);
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/NewObjectSnippets.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/NewObjectSnippets.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -401,11 +401,13 @@
 
     public static class Templates extends AbstractTemplates {
 
-        private final SnippetInfo allocateInstance = snippet(NewObjectSnippets.class, "allocateInstance");
-        private final SnippetInfo allocateArray = snippet(NewObjectSnippets.class, "allocateArray");
-        private final SnippetInfo allocateArrayDynamic = snippet(NewObjectSnippets.class, "allocateArrayDynamic");
-        private final SnippetInfo allocateInstanceDynamic = snippet(NewObjectSnippets.class, "allocateInstanceDynamic");
-        private final SnippetInfo newmultiarray = snippet(NewObjectSnippets.class, "newmultiarray");
+        private final SnippetInfo allocateInstance = snippet(NewObjectSnippets.class, "allocateInstance", INIT_LOCATION, MARK_WORD_LOCATION, HUB_WRITE_LOCATION, TLAB_TOP_LOCATION, TLAB_END_LOCATION);
+        private final SnippetInfo allocateArray = snippet(NewObjectSnippets.class, "allocateArray", INIT_LOCATION, MARK_WORD_LOCATION, HUB_WRITE_LOCATION, TLAB_TOP_LOCATION, TLAB_END_LOCATION);
+        private final SnippetInfo allocateArrayDynamic = snippet(NewObjectSnippets.class, "allocateArrayDynamic", INIT_LOCATION, MARK_WORD_LOCATION, HUB_WRITE_LOCATION, TLAB_TOP_LOCATION,
+                        TLAB_END_LOCATION);
+        private final SnippetInfo allocateInstanceDynamic = snippet(NewObjectSnippets.class, "allocateInstanceDynamic", INIT_LOCATION, MARK_WORD_LOCATION, HUB_WRITE_LOCATION, TLAB_TOP_LOCATION,
+                        TLAB_END_LOCATION);
+        private final SnippetInfo newmultiarray = snippet(NewObjectSnippets.class, "newmultiarray", INIT_LOCATION, TLAB_TOP_LOCATION, TLAB_END_LOCATION);
         private final SnippetInfo verifyHeap = snippet(NewObjectSnippets.class, "verifyHeap");
 
         public Templates(HotSpotProviders providers, TargetDescription target) {
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/ObjectSubstitutions.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/ObjectSubstitutions.java	Fri Apr 10 16:58:26 2015 -0700
@@ -24,30 +24,12 @@
 
 import static com.oracle.graal.hotspot.replacements.HotSpotReplacementsUtil.*;
 
-import com.oracle.graal.api.replacements.*;
-import com.oracle.graal.hotspot.word.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.java.*;
-
 /**
  * Substitutions for {@link java.lang.Object} methods.
  */
-@ClassSubstitution(java.lang.Object.class)
 public class ObjectSubstitutions {
 
-    @MethodSubstitution(isStatic = false, forced = true)
-    public static Class<?> getClass(final Object thisObj) {
-        KlassPointer hub = loadHub(GuardingPiNode.guardingNonNull(thisObj));
-        return HubGetClassNode.readClass(hub);
-    }
-
-    @MethodSubstitution(isStatic = false)
     public static int hashCode(final Object thisObj) {
         return computeHashCode(thisObj);
     }
-
-    @MethodSubstitution(value = "<init>", isStatic = false, forced = true)
-    public static void init(Object thisObj) {
-        RegisterFinalizerNode.register(thisObj);
-    }
 }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/SystemIdentityHashCodeNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.hotspot.replacements;
-
-import static com.oracle.graal.compiler.common.GraalOptions.*;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.hotspot.meta.*;
-import com.oracle.graal.nodeinfo.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
-import com.oracle.graal.replacements.nodes.*;
-
-@NodeInfo
-public final class SystemIdentityHashCodeNode extends PureFunctionMacroNode {
-
-    public static final NodeClass<SystemIdentityHashCodeNode> TYPE = NodeClass.create(SystemIdentityHashCodeNode.class);
-
-    public SystemIdentityHashCodeNode(InvokeKind invokeKind, ResolvedJavaMethod targetMethod, int bci, JavaType returnType, ValueNode object) {
-        super(TYPE, invokeKind, targetMethod, bci, returnType, object);
-    }
-
-    @Override
-    protected JavaConstant evaluate(JavaConstant param, MetaAccessProvider metaAccess) {
-        if (ImmutableCode.getValue() || param.isNull()) {
-            return null;
-        }
-        HotSpotObjectConstant c = (HotSpotObjectConstant) param;
-        return JavaConstant.forInt(c.getIdentityHashCode());
-    }
-}
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/WriteBarrierSnippets.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/WriteBarrierSnippets.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -334,13 +334,13 @@
 
     public static class Templates extends AbstractTemplates {
 
-        private final SnippetInfo serialWriteBarrier = snippet(WriteBarrierSnippets.class, "serialWriteBarrier");
+        private final SnippetInfo serialWriteBarrier = snippet(WriteBarrierSnippets.class, "serialWriteBarrier", GC_CARD_LOCATION);
         private final SnippetInfo serialArrayRangeWriteBarrier = snippet(WriteBarrierSnippets.class, "serialArrayRangeWriteBarrier");
-        private final SnippetInfo g1PreWriteBarrier = snippet(WriteBarrierSnippets.class, "g1PreWriteBarrier");
-        private final SnippetInfo g1ReferentReadBarrier = snippet(WriteBarrierSnippets.class, "g1PreWriteBarrier");
-        private final SnippetInfo g1PostWriteBarrier = snippet(WriteBarrierSnippets.class, "g1PostWriteBarrier");
-        private final SnippetInfo g1ArrayRangePreWriteBarrier = snippet(WriteBarrierSnippets.class, "g1ArrayRangePreWriteBarrier");
-        private final SnippetInfo g1ArrayRangePostWriteBarrier = snippet(WriteBarrierSnippets.class, "g1ArrayRangePostWriteBarrier");
+        private final SnippetInfo g1PreWriteBarrier = snippet(WriteBarrierSnippets.class, "g1PreWriteBarrier", GC_INDEX_LOCATION, GC_LOG_LOCATION);
+        private final SnippetInfo g1ReferentReadBarrier = snippet(WriteBarrierSnippets.class, "g1PreWriteBarrier", GC_INDEX_LOCATION, GC_LOG_LOCATION);
+        private final SnippetInfo g1PostWriteBarrier = snippet(WriteBarrierSnippets.class, "g1PostWriteBarrier", GC_CARD_LOCATION, GC_INDEX_LOCATION, GC_LOG_LOCATION);
+        private final SnippetInfo g1ArrayRangePreWriteBarrier = snippet(WriteBarrierSnippets.class, "g1ArrayRangePreWriteBarrier", GC_INDEX_LOCATION, GC_LOG_LOCATION);
+        private final SnippetInfo g1ArrayRangePostWriteBarrier = snippet(WriteBarrierSnippets.class, "g1ArrayRangePostWriteBarrier", GC_CARD_LOCATION, GC_INDEX_LOCATION, GC_LOG_LOCATION);
 
         private final CompressEncoding oopEncoding;
 
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/arraycopy/UnsafeArrayCopyNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/arraycopy/UnsafeArrayCopyNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -34,7 +34,7 @@
 import com.oracle.graal.replacements.SnippetTemplate.Arguments;
 
 @NodeInfo(allowedUsageTypes = {InputType.Memory})
-public final class UnsafeArrayCopyNode extends ArrayRangeWriteNode implements Lowerable, MemoryCheckpoint.Single {
+public final class UnsafeArrayCopyNode extends ArrayRangeWriteNode implements Lowerable, MemoryCheckpoint.Single, MemoryAccess {
 
     public static final NodeClass<UnsafeArrayCopyNode> TYPE = NodeClass.create(UnsafeArrayCopyNode.class);
     @Input ValueNode src;
@@ -44,6 +44,8 @@
     @Input ValueNode length;
     @OptionalInput ValueNode layoutHelper;
 
+    @OptionalInput(InputType.Memory) MemoryNode lastLocationAccess;
+
     protected Kind elementKind;
 
     public UnsafeArrayCopyNode(ValueNode src, ValueNode srcPos, ValueNode dest, ValueNode destPos, ValueNode length, ValueNode layoutHelper, Kind elementKind) {
@@ -122,6 +124,15 @@
         return any();
     }
 
+    public MemoryNode getLastLocationAccess() {
+        return lastLocationAccess;
+    }
+
+    public void setLastLocationAccess(MemoryNode lla) {
+        updateUsagesInterface(lastLocationAccess, lla);
+        lastLocationAccess = lla;
+    }
+
     @NodeIntrinsic
     public static native void arraycopy(Object src, int srcPos, Object dest, int destPos, int length, @ConstantNodeParameter Kind elementKind);
 
--- a/graal/com.oracle.graal.hotspotvmconfig/src/com/oracle/graal/hotspotvmconfig/HotSpotVMConfigProcessor.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.hotspotvmconfig/src/com/oracle/graal/hotspotvmconfig/HotSpotVMConfigProcessor.java	Fri Apr 10 16:58:26 2015 -0700
@@ -130,11 +130,11 @@
         "}",
         "",
         "#define set_boolean(name, value) vmconfig_oop->bool_field_put(fs.offset(), value)",
-        "#define set_byte(name, value) vmconfig_oop->byte_field_put(fs.offset(), (jbyte)value)",
-        "#define set_short(name, value) vmconfig_oop->short_field_put(fs.offset(), (jshort)value)",
-        "#define set_int(name, value) vmconfig_oop->int_field_put(fs.offset(), (int)value)",
+        "#define set_byte(name, value) vmconfig_oop->byte_field_put(fs.offset(), (jbyte)(value))",
+        "#define set_short(name, value) vmconfig_oop->short_field_put(fs.offset(), (jshort)(value))",
+        "#define set_int(name, value) vmconfig_oop->int_field_put(fs.offset(), (int)(value))",
         "#define set_long(name, value) vmconfig_oop->long_field_put(fs.offset(), value)",
-        "#define set_address(name, value) do { set_long(name, (jlong) value); } while (0)",
+        "#define set_address(name, value) do { set_long(name, (jlong)(value)); } while (0)",
         "",
         "#define set_optional_boolean_flag(varName, flagName) do { bool flagValue; if (boolAt((char*) flagName, &flagValue)) { set_boolean(varName, flagValue); } } while (0)",
         "#define set_optional_int_flag(varName, flagName) do { intx flagValue; if (intxAt((char*) flagName, &flagValue)) { set_int(varName, flagValue); } } while (0)",
--- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java	Fri Apr 10 16:58:26 2015 -0700
@@ -37,7 +37,6 @@
 
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
-import com.oracle.graal.api.replacements.*;
 import com.oracle.graal.bytecode.*;
 import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.compiler.common.calc.*;
@@ -50,7 +49,7 @@
 import com.oracle.graal.graph.iterators.*;
 import com.oracle.graal.graphbuilderconf.*;
 import com.oracle.graal.graphbuilderconf.InlineInvokePlugin.InlineInfo;
-import com.oracle.graal.graphbuilderconf.InvocationPlugins.Receiver;
+import com.oracle.graal.graphbuilderconf.InvocationPlugins.InvocationPluginReceiver;
 import com.oracle.graal.java.AbstractBytecodeParser.ReplacementContext;
 import com.oracle.graal.java.BciBlockMapping.BciBlock;
 import com.oracle.graal.java.BciBlockMapping.ExceptionDispatchBlock;
@@ -79,7 +78,7 @@
 
     @Override
     protected void run(StructuredGraph graph, HighTierContext context) {
-        new Instance(context.getMetaAccess(), context.getStampProvider(), null, context.getConstantReflection(), graphBuilderConfig, context.getOptimisticOptimizations(), null).run(graph);
+        new Instance(context.getMetaAccess(), context.getStampProvider(), context.getConstantReflection(), graphBuilderConfig, context.getOptimisticOptimizations(), null).run(graph);
     }
 
     public GraphBuilderConfiguration getGraphBuilderConfig() {
@@ -99,26 +98,19 @@
         private final OptimisticOptimizations optimisticOpts;
         private final StampProvider stampProvider;
         private final ConstantReflectionProvider constantReflection;
-        private final SnippetReflectionProvider snippetReflectionProvider;
 
-        public Instance(MetaAccessProvider metaAccess, StampProvider stampProvider, SnippetReflectionProvider snippetReflectionProvider, ConstantReflectionProvider constantReflection,
-                        GraphBuilderConfiguration graphBuilderConfig, OptimisticOptimizations optimisticOpts, ReplacementContext initialReplacementContext) {
+        public Instance(MetaAccessProvider metaAccess, StampProvider stampProvider, ConstantReflectionProvider constantReflection, GraphBuilderConfiguration graphBuilderConfig,
+                        OptimisticOptimizations optimisticOpts, ReplacementContext initialReplacementContext) {
             this.graphBuilderConfig = graphBuilderConfig;
             this.optimisticOpts = optimisticOpts;
             this.metaAccess = metaAccess;
             this.stampProvider = stampProvider;
             this.constantReflection = constantReflection;
-            this.snippetReflectionProvider = snippetReflectionProvider;
             this.initialReplacementContext = initialReplacementContext;
 
             assert metaAccess != null;
         }
 
-        public Instance(MetaAccessProvider metaAccess, StampProvider stampProvider, ConstantReflectionProvider constantReflection, GraphBuilderConfiguration graphBuilderConfig,
-                        OptimisticOptimizations optimisticOpts, ReplacementContext initialReplacementContext) {
-            this(metaAccess, stampProvider, null, constantReflection, graphBuilderConfig, optimisticOpts, initialReplacementContext);
-        }
-
         @Override
         protected void run(@SuppressWarnings("hiding") StructuredGraph graph) {
             ResolvedJavaMethod method = graph.method();
@@ -133,7 +125,7 @@
                 frameState.initializeForMethodStart(graphBuilderConfig.eagerResolving() || replacementContext != null, graphBuilderConfig.getPlugins().getParameterPlugin());
                 parser.build(graph.start(), frameState);
 
-                parser.connectLoopEndToBegin();
+                connectLoopEndToBegin(graph);
 
                 // Remove dead parameters.
                 for (ParameterNode param : graph.getNodes(ParameterNode.TYPE)) {
@@ -228,42 +220,6 @@
             }
         }
 
-        static class InvocationPluginReceiver implements Receiver {
-            final BytecodeParser parser;
-            ValueNode[] args;
-            ValueNode value;
-
-            public InvocationPluginReceiver(BytecodeParser parser) {
-                this.parser = parser;
-            }
-
-            @Override
-            public ValueNode get() {
-                assert args != null : "Cannot get the receiver of a static method";
-                if (value == null) {
-                    value = parser.nullCheckedValue(args[0]);
-                    if (value != args[0]) {
-                        args[0] = value;
-                    }
-                }
-                return value;
-            }
-
-            @Override
-            public boolean isConstant() {
-                return args[0].isConstant();
-            }
-
-            InvocationPluginReceiver init(ResolvedJavaMethod targetMethod, ValueNode[] newArgs) {
-                if (!targetMethod.isStatic()) {
-                    this.args = newArgs;
-                    this.value = null;
-                    return this;
-                }
-                return null;
-            }
-        }
-
         public class BytecodeParser extends AbstractBytecodeParser implements GraphBuilderContext {
 
             private BciBlockMapping blockMap;
@@ -417,28 +373,6 @@
                 }
             }
 
-            /**
-             * Gets a version of a given value that has a
-             * {@linkplain StampTool#isPointerNonNull(ValueNode) non-null} stamp.
-             */
-            ValueNode nullCheckedValue(ValueNode value) {
-                if (!StampTool.isPointerNonNull(value.stamp())) {
-                    IsNullNode condition = graph.unique(new IsNullNode(value));
-                    ObjectStamp receiverStamp = (ObjectStamp) value.stamp();
-                    Stamp stamp = receiverStamp.join(objectNonNull());
-                    FixedGuardNode fixedGuard = append(new FixedGuardNode(condition, NullCheckException, InvalidateReprofile, true));
-                    PiNode nonNullReceiver = graph.unique(new PiNode(value, stamp));
-                    nonNullReceiver.setGuard(fixedGuard);
-                    // TODO: Propogating the non-null into the frame state would
-                    // remove subsequent null-checks on the same value. However,
-                    // it currently causes an assertion failure when merging states.
-                    //
-                    // frameState.replace(value, nonNullReceiver);
-                    return nonNullReceiver;
-                }
-                return value;
-            }
-
             private void detectLoops(FixedNode startInstruction) {
                 NodeBitMap visited = graph.createNodeBitMap();
                 NodeBitMap active = graph.createNodeBitMap();
@@ -1194,15 +1128,17 @@
                         return;
                     }
 
-                    if (tryInvocationPlugin(args, targetMethod, resultType)) {
-                        if (TraceParserPlugins.getValue()) {
-                            traceWithContext("used invocation plugin for %s", targetMethod.format("%h.%n(%p)"));
+                    if (invokeKind.isDirect()) {
+                        if (tryInvocationPlugin(args, targetMethod, resultType)) {
+                            if (TraceParserPlugins.getValue()) {
+                                traceWithContext("used invocation plugin for %s", targetMethod.format("%h.%n(%p)"));
+                            }
+                            return;
                         }
-                        return;
-                    }
 
-                    if (tryInline(args, targetMethod, invokeKind, returnType)) {
-                        return;
+                        if (tryInline(args, targetMethod, returnType)) {
+                            return;
+                        }
                     }
                 } finally {
                     currentInvokeReturnType = null;
@@ -1301,7 +1237,7 @@
                     }
 
                     InvocationPluginAssertions assertions = assertionsEnabled() ? new InvocationPluginAssertions(plugin, args, targetMethod, resultType) : null;
-                    if (InvocationPlugin.execute(this, targetMethod, plugin, invocationPluginReceiver.init(targetMethod, args), args)) {
+                    if (plugin.execute(this, targetMethod, invocationPluginReceiver.init(targetMethod, args), args)) {
                         assert assertions.check(true);
                         return true;
                     }
@@ -1315,22 +1251,26 @@
                 return plugin != null && plugin.apply(this, targetMethod, args);
             }
 
-            private boolean tryInline(ValueNode[] args, ResolvedJavaMethod targetMethod, InvokeKind invokeKind, JavaType returnType) {
+            private boolean tryInline(ValueNode[] args, ResolvedJavaMethod targetMethod, JavaType returnType) {
                 InlineInvokePlugin plugin = graphBuilderConfig.getPlugins().getInlineInvokePlugin();
                 boolean canBeInlined = parsingReplacement() || targetMethod.canBeInlined();
-                if (plugin == null || !invokeKind.isDirect() || !canBeInlined) {
+                if (plugin == null || !canBeInlined) {
                     return false;
                 }
                 InlineInfo inlineInfo = plugin.getInlineInfo(this, targetMethod, args, returnType);
                 if (inlineInfo != null) {
-                    return inline(plugin, targetMethod, inlineInfo, args);
+                    return inline(plugin, targetMethod, inlineInfo.methodToInline, inlineInfo.isReplacement, inlineInfo.isIntrinsic, args);
                 }
                 return false;
             }
 
-            public boolean inline(InlineInvokePlugin plugin, ResolvedJavaMethod targetMethod, InlineInfo inlineInfo, ValueNode[] args) {
+            public void intrinsify(ResolvedJavaMethod targetMethod, ResolvedJavaMethod substitute, ValueNode[] args) {
+                boolean res = inline(null, targetMethod, substitute, true, true, args);
+                assert res : "failed to inline " + substitute;
+            }
+
+            private boolean inline(InlineInvokePlugin plugin, ResolvedJavaMethod targetMethod, ResolvedJavaMethod inlinedMethod, boolean isReplacement, boolean isIntrinsic, ValueNode[] args) {
                 int bci = bci();
-                ResolvedJavaMethod inlinedMethod = inlineInfo.methodToInline;
                 if (TraceInlineDuringParsing.getValue() || TraceParserPlugins.getValue()) {
                     if (targetMethod.equals(inlinedMethod)) {
                         traceWithContext("inlining call to %s", inlinedMethod.format("%h.%n(%p)"));
@@ -1366,9 +1306,9 @@
                         }
                     }
                 } else {
-                    if (context == null && inlineInfo.isReplacement) {
+                    if (context == null && isReplacement) {
                         assert !inlinedMethod.equals(targetMethod);
-                        if (inlineInfo.isIntrinsic) {
+                        if (isIntrinsic) {
                             context = new IntrinsicContext(targetMethod, inlinedMethod, args, bci);
                         } else {
                             context = new ReplacementContext(targetMethod, inlinedMethod);
@@ -2023,30 +1963,6 @@
                 }
             }
 
-            /**
-             * Remove loop header without loop ends. This can happen with degenerated loops like
-             * this one:
-             *
-             * <pre>
-             * for (;;) {
-             *     try {
-             *         break;
-             *     } catch (UnresolvedException iioe) {
-             *     }
-             * }
-             * </pre>
-             */
-            private void connectLoopEndToBegin() {
-                for (LoopBeginNode begin : graph.getNodes(LoopBeginNode.TYPE)) {
-                    if (begin.loopEnds().isEmpty()) {
-                        assert begin.forwardEndCount() == 1;
-                        graph.reduceDegenerateLoopBegin(begin);
-                    } else {
-                        GraphUtil.normalizeLoopBegin(begin);
-                    }
-                }
-            }
-
             private void createUnwind() {
                 assert frameState.stackSize() == 1 : frameState;
                 ValueNode exception = frameState.apop();
@@ -2447,10 +2363,6 @@
                 return constantReflection;
             }
 
-            public SnippetReflectionProvider getSnippetReflection() {
-                return snippetReflectionProvider;
-            }
-
             /**
              * Gets the graph being processed by this builder.
              */
@@ -2513,4 +2425,27 @@
         assert assertionsEnabled = true;
         return assertionsEnabled;
     }
+
+    /**
+     * Remove loop header without loop ends. This can happen with degenerated loops like this one:
+     *
+     * <pre>
+     * for (;;) {
+     *     try {
+     *         break;
+     *     } catch (UnresolvedException iioe) {
+     *     }
+     * }
+     * </pre>
+     */
+    public static void connectLoopEndToBegin(StructuredGraph graph) {
+        for (LoopBeginNode begin : graph.getNodes(LoopBeginNode.TYPE)) {
+            if (begin.loopEnds().isEmpty()) {
+                assert begin.forwardEndCount() == 1;
+                graph.reduceDegenerateLoopBegin(begin);
+            } else {
+                GraphUtil.normalizeLoopBegin(begin);
+            }
+        }
+    }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.jtt/src/com/oracle/graal/jtt/lang/Math_exact.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.jtt.lang;
+
+import org.junit.*;
+
+import com.oracle.graal.jtt.*;
+
+/*
+ */
+public class Math_exact extends JTTTest {
+
+    public static int testIntAddExact(int a, int b) {
+        return Math.addExact(a, b);
+    }
+
+    @Test
+    public void runTestIntAddExact() throws Throwable {
+        runTest("testIntAddExact", 1, 2);
+        runTest("testIntAddExact", 1, Integer.MAX_VALUE);
+        runTest("testIntAddExact", -1, Integer.MIN_VALUE);
+    }
+
+    public static long testLongAddExact(long a, long b) {
+        return Math.addExact(a, b);
+    }
+
+    @Test
+    public void runTestLongAddExact() throws Throwable {
+        runTest("testLongAddExact", 1L, 2L);
+        runTest("testLongAddExact", 1L, Long.MAX_VALUE);
+        runTest("testLongAddExact", -1L, Long.MIN_VALUE);
+    }
+
+    public static int testIntSubExact(int a, int b) {
+        return Math.subtractExact(a, b);
+    }
+
+    @Test
+    public void runTestIntSubExact() throws Throwable {
+        runTest("testIntSubExact", 1, 2);
+        runTest("testIntSubExact", -2, Integer.MAX_VALUE);
+        runTest("testIntSubExact", 2, Integer.MIN_VALUE);
+    }
+
+    public static long testLongSubExact(long a, long b) {
+        return Math.subtractExact(a, b);
+    }
+
+    @Test
+    public void runTestLongSubExact() throws Throwable {
+        runTest("testLongSubExact", 1L, 2L);
+        runTest("testLongSubExact", -2L, Long.MAX_VALUE);
+        runTest("testLongSubExact", 2L, Long.MIN_VALUE);
+    }
+
+    public static int testIntMulExact(int a, int b) {
+        return Math.multiplyExact(a, b);
+    }
+
+    @Test
+    public void runTestIntMulExact() throws Throwable {
+        runTest("testIntMulExact", 1, 2);
+        runTest("testIntMulExact", -2, Integer.MAX_VALUE);
+        runTest("testIntMulExact", 2, Integer.MIN_VALUE);
+    }
+
+    public static long testLongMulExact(long a, long b) {
+        return Math.multiplyExact(a, b);
+    }
+
+    @Test
+    public void runTestLongMulExact() throws Throwable {
+        runTest("testLongMulExact", 1L, 2L);
+        runTest("testLongMulExact", 2L, Long.MAX_VALUE);
+        runTest("testLongMulExact", -2L, Long.MIN_VALUE);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.jtt/src/com/oracle/graal/jtt/lang/Object_hashCode02.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+/*
+ */
+package com.oracle.graal.jtt.lang;
+
+import java.time.*;
+import java.util.*;
+
+import org.junit.*;
+
+import com.oracle.graal.jtt.*;
+
+public final class Object_hashCode02 extends JTTTest {
+
+    public static final Object obj1 = new Object();
+    public static final Object obj2 = DayOfWeek.FRIDAY;
+    public static final Object obj3 = new HashMap<>();
+
+    public static int test(int a) {
+        if (a == 1) {
+            return obj1.hashCode();
+        }
+        if (a == 2) {
+            return obj2.hashCode();
+        }
+        return obj3.hashCode();
+    }
+
+    @Test
+    public void run0() throws Throwable {
+        runTest("test", 1);
+    }
+
+    @Test
+    public void run1() throws Throwable {
+        runTest("test", 2);
+    }
+
+    @Test
+    public void run2() throws Throwable {
+        runTest("test", 3);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ArithmeticLIRGenerator.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.lir.amd64;
+
+import com.oracle.graal.api.meta.*;
+
+/**
+ * This interface can be used to generate AMD64 LIR for arithmetic operations.
+ */
+public interface AMD64ArithmeticLIRGenerator {
+
+    Value emitMathLog(Value input, boolean base10);
+
+    Value emitMathCos(Value input);
+
+    Value emitMathSin(Value input);
+
+    Value emitMathTan(Value input);
+}
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCArithmetic.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCArithmetic.java	Fri Apr 10 16:58:26 2015 -0700
@@ -264,9 +264,11 @@
             case IMULCC:
                 throw GraalInternalError.unimplemented();
             case IDIV:
+                masm.sra(asRegister(src1), 0, asRegister(src1));
                 masm.sdivx(asIntReg(src1), constant, asIntReg(dst));
                 break;
             case IUDIV:
+                masm.srl(asRegister(src1), 0, asRegister(src1));
                 masm.udivx(asIntReg(src1), constant, asIntReg(dst));
                 break;
             case IAND:
@@ -398,8 +400,8 @@
                 masm.sdivx(asIntReg(src1), asIntReg(src2), asIntReg(dst));
                 break;
             case IUDIV:
-                masm.signx(asIntReg(src1), asIntReg(src1));
-                masm.signx(asIntReg(src2), asIntReg(src2));
+                masm.srl(asIntReg(src1), 0, asIntReg(src1));
+                masm.srl(asIntReg(src2), 0, asIntReg(src2));
                 delaySlotLir.emitControlTransfer(crb, masm);
                 exceptionOffset = masm.position();
                 masm.udivx(asIntReg(src1), asIntReg(src2), asIntReg(dst));
@@ -874,6 +876,8 @@
             assert isRegister(x) && isRegister(y) && isRegister(result) && isRegister(scratch);
             switch (opcode) {
                 case IMUL:
+                    masm.sra(asRegister(x), 0, asRegister(x));
+                    masm.sra(asRegister(y), 0, asRegister(y));
                     masm.mulx(asIntReg(x), asIntReg(y), asIntReg(result));
                     masm.srax(asIntReg(result), 32, asIntReg(result));
                     break;
--- a/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMathIntrinsicOp.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMathIntrinsicOp.java	Fri Apr 10 16:58:26 2015 -0700
@@ -35,11 +35,6 @@
 
     public enum IntrinsicOpcode {
         SQRT,
-        SIN,
-        COS,
-        TAN,
-        LOG,
-        LOG10,
         ABS
     }
 
@@ -83,11 +78,6 @@
                         GraalInternalError.shouldNotReachHere();
                 }
                 break;
-            case LOG:
-            case LOG10:
-            case SIN:
-            case COS:
-            case TAN:
             default:
                 throw GraalInternalError.shouldNotReachHere();
         }
--- a/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/ArithmeticLIRGenerator.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/ArithmeticLIRGenerator.java	Fri Apr 10 16:58:26 2015 -0700
@@ -87,12 +87,4 @@
     Value emitMathAbs(Value input);
 
     Value emitMathSqrt(Value input);
-
-    Value emitMathLog(Value input, boolean base10);
-
-    Value emitMathCos(Value input);
-
-    Value emitMathSin(Value input);
-
-    Value emitMathTan(Value input);
 }
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/AbstractMergeNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/AbstractMergeNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -227,7 +227,7 @@
                 end.safeDelete();
             }
             for (PhiNode phi : phis) {
-                if (phi.isAlive() && phi.hasNoUsages()) {
+                if (tool.allUsagesAvailable() && phi.isAlive() && phi.hasNoUsages()) {
                     GraphUtil.killWithUnusedFloatingInputs(phi);
                 }
             }
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/BreakpointNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/BreakpointNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -51,7 +51,7 @@
     public static final NodeClass<BreakpointNode> TYPE = NodeClass.create(BreakpointNode.class);
     @Input NodeInputList<ValueNode> arguments;
 
-    public BreakpointNode(ValueNode[] arguments) {
+    public BreakpointNode(ValueNode... arguments) {
         super(TYPE, StampFactory.forVoid());
         this.arguments = new NodeInputList<>(this, arguments);
     }
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/ConditionAnchorNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/ConditionAnchorNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -76,7 +76,7 @@
                 return new ValueAnchorNode(null);
             }
         }
-        if (this.hasNoUsages()) {
+        if (tool.allUsagesAvailable() && this.hasNoUsages()) {
             return null;
         }
         return this;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/EncodedGraph.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.nodes;
+
+import java.util.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.graph.*;
+
+/**
+ * A {@link StructuredGraph} encoded in a compact binary representation as a byte[] array. See
+ * {@link GraphEncoder} for a description of the encoding format. Use {@link GraphDecoder} for
+ * decoding.
+ */
+public class EncodedGraph {
+
+    private final byte[] encoding;
+    private final long startOffset;
+    private final Object[] objects;
+    private final NodeClass<?>[] types;
+    private final Assumptions assumptions;
+    private final Set<ResolvedJavaMethod> inlinedMethods;
+
+    public EncodedGraph(byte[] encoding, long startOffset, Object[] objects, NodeClass<?>[] types, Assumptions assumptions, Set<ResolvedJavaMethod> inlinedMethods) {
+        this.encoding = encoding;
+        this.startOffset = startOffset;
+        this.objects = objects;
+        this.types = types;
+        this.assumptions = assumptions;
+        this.inlinedMethods = inlinedMethods;
+    }
+
+    public byte[] getEncoding() {
+        return encoding;
+    }
+
+    public long getStartOffset() {
+        return startOffset;
+    }
+
+    public Object[] getObjects() {
+        return objects;
+    }
+
+    public NodeClass<?>[] getNodeClasses() {
+        return types;
+    }
+
+    public Assumptions getAssumptions() {
+        return assumptions;
+    }
+
+    public Set<ResolvedJavaMethod> getInlinedMethods() {
+        return inlinedMethods;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/GraphDecoder.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,1033 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.nodes;
+
+import static com.oracle.graal.compiler.common.GraalInternalError.*;
+
+import java.util.*;
+
+import com.oracle.graal.compiler.common.*;
+import com.oracle.graal.compiler.common.util.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.nodes.util.*;
+
+/**
+ * Decoder for {@link EncodedGraph encoded graphs} produced by {@link GraphEncoder}. Support for
+ * loop explosion during decoding is built into this class, because it requires many interactions
+ * with the decoding process. Subclasses can provide canonicalization and simplification of nodes
+ * during decoding, as well as method inlining during decoding.
+ */
+public class GraphDecoder {
+
+    public enum LoopExplosionKind {
+        /**
+         * No loop explosion.
+         */
+        NONE,
+        /**
+         * Fully unroll all loops. The loops must have a known finite number of iterations. If a
+         * loop has multiple loop ends, they are merged so that the subsequent loop iteration is
+         * processed only once. For example, a loop with 4 iterations and 2 loop ends leads to
+         * 1+1+1+1 = 4 copies of the loop body. The merge can introduce phi functions.
+         */
+        FULL_UNROLL,
+        /**
+         * Fully explode all loops. The loops must have a known finite number of iterations. If a
+         * loop has multiple loop ends, they are not merged so that subsequent loop iterations are
+         * processed multiple times. For example, a loop with 4 iterations and 2 loop ends leads to
+         * 1+2+4+8 = 15 copies of the loop body.
+         */
+        FULL_EXPLODE,
+        /**
+         * like {@link #FULL_EXPLODE}, but copies of the loop body that have the exact same state
+         * are merged. This reduces the number of copies necessary, but can introduce loops again.
+         */
+        MERGE_EXPLODE
+    }
+
+    /** Decoding state maintained for each encoded graph. */
+    protected static class MethodScope {
+        /** The target graph where decoded nodes are added to. */
+        public final StructuredGraph graph;
+        /** The state of the caller method. Only non-null during method inlining. */
+        public final MethodScope caller;
+        /** The encode graph that is decoded. */
+        public final EncodedGraph encodedGraph;
+        /** Access to the encoded graph. */
+        public final TypeReader reader;
+        /**
+         * The "table of contents" of the encoded graph, i.e., the mapping from orderId numbers to
+         * the offset in the encoded byte[] array.
+         */
+        public final long[] nodeStartOffsets;
+        /** The kind of loop explosion to be performed during decoding. */
+        public final LoopExplosionKind loopExplosion;
+
+        /**
+         * The start node of the decoded graph. This is a temporary node for inlined graphs that
+         * needs to be deleted after inlining.
+         */
+        public StartNode startNode;
+        /** All return nodes encountered during decoding. */
+        public final List<ReturnNode> returnNodes;
+        /** The exception unwind node encountered during decoding, or null. */
+        public UnwindNode unwindNode;
+
+        protected MethodScope(StructuredGraph graph, MethodScope caller, EncodedGraph encodedGraph, LoopExplosionKind loopExplosion) {
+            this.graph = graph;
+            this.caller = caller;
+            this.encodedGraph = encodedGraph;
+            this.loopExplosion = loopExplosion;
+            this.returnNodes = new ArrayList<>();
+
+            reader = new UnsafeArrayTypeReader(encodedGraph.getEncoding(), encodedGraph.getStartOffset());
+            int nodeCount = reader.getUVInt();
+            nodeStartOffsets = new long[nodeCount];
+            for (int i = 0; i < nodeCount; i++) {
+                nodeStartOffsets[i] = encodedGraph.getStartOffset() - reader.getUV();
+            }
+        }
+
+        public boolean isInlinedMethod() {
+            return caller != null;
+        }
+    }
+
+    /** Decoding state maintained for each loop in the encoded graph. */
+    protected static class LoopScope {
+        public final LoopScope outer;
+        public final int loopDepth;
+        public final int loopIteration;
+        /**
+         * Upcoming loop iterations during loop explosions that have not been processed yet. Only
+         * used when {@link MethodScope#loopExplosion} is not {@link LoopExplosionKind#NONE}.
+         */
+        public Deque<LoopScope> nextIterations;
+        /**
+         * Information about already processed loop iterations for state merging during loop
+         * explosion. Only used when {@link MethodScope#loopExplosion} is
+         * {@link LoopExplosionKind#MERGE_EXPLODE}.
+         */
+        public final Map<LoopExplosionState, LoopExplosionState> iterationStates;
+        public final int loopBeginOrderId;
+        /**
+         * The worklist of fixed nodes to process. Since we already the correct processing order
+         * from the orderId, we just set the orderId bit in the bitset when a node is ready for
+         * processing. The lowest set bit is the next node to process.
+         */
+        public final BitSet nodesToProcess;
+        /** Nodes that have been created, indexed by the orderId. */
+        public final Node[] createdNodes;
+        /**
+         * Nodes that have been created in outer loop scopes and existed before starting to process
+         * this loop, indexed by the orderId.
+         */
+        public final Node[] initialCreatedNodes;
+
+        protected LoopScope(MethodScope methodScope) {
+            this.outer = null;
+            this.nextIterations = null;
+            this.loopDepth = 0;
+            this.loopIteration = 0;
+            this.iterationStates = null;
+            this.loopBeginOrderId = -1;
+
+            int nodeCount = methodScope.nodeStartOffsets.length;
+            this.nodesToProcess = new BitSet(nodeCount);
+            this.initialCreatedNodes = new Node[nodeCount];
+            this.createdNodes = new Node[nodeCount];
+        }
+
+        protected LoopScope(LoopScope outer, int loopDepth, int loopIteration, int loopBeginOrderId, Node[] initialCreatedNodes, Deque<LoopScope> nextIterations,
+                        Map<LoopExplosionState, LoopExplosionState> iterationStates) {
+            this.outer = outer;
+            this.loopDepth = loopDepth;
+            this.loopIteration = loopIteration;
+            this.nextIterations = nextIterations;
+            this.iterationStates = iterationStates;
+            this.loopBeginOrderId = loopBeginOrderId;
+            this.nodesToProcess = new BitSet(initialCreatedNodes.length);
+            this.initialCreatedNodes = initialCreatedNodes;
+            this.createdNodes = Arrays.copyOf(initialCreatedNodes, initialCreatedNodes.length);
+        }
+
+        @Override
+        public String toString() {
+            return loopDepth + "," + loopIteration + (loopBeginOrderId == -1 ? "" : "#" + loopBeginOrderId);
+        }
+    }
+
+    protected static class LoopExplosionState {
+        public final FrameState state;
+        public final MergeNode merge;
+        public final int hashCode;
+
+        protected LoopExplosionState(FrameState state, MergeNode merge) {
+            this.state = state;
+            this.merge = merge;
+
+            int h = 0;
+            for (ValueNode value : state.values()) {
+                if (value == null) {
+                    h = h * 31 + 1234;
+                } else {
+                    h = h * 31 + value.hashCode();
+                }
+            }
+            this.hashCode = h;
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (!(obj instanceof LoopExplosionState)) {
+                return false;
+            }
+
+            FrameState otherState = ((LoopExplosionState) obj).state;
+            FrameState thisState = state;
+            assert thisState.outerFrameState() == otherState.outerFrameState();
+
+            Iterator<ValueNode> thisIter = thisState.values().iterator();
+            Iterator<ValueNode> otherIter = otherState.values().iterator();
+            while (thisIter.hasNext() && otherIter.hasNext()) {
+                ValueNode thisValue = thisIter.next();
+                ValueNode otherValue = otherIter.next();
+                if (thisValue != otherValue) {
+                    return false;
+                }
+            }
+            return thisIter.hasNext() == otherIter.hasNext();
+        }
+
+        @Override
+        public int hashCode() {
+            return hashCode;
+        }
+    }
+
+    public final void decode(StructuredGraph graph, EncodedGraph encodedGraph) {
+        MethodScope methodScope = new MethodScope(graph, null, encodedGraph, LoopExplosionKind.NONE);
+        decode(methodScope);
+        cleanupGraph(methodScope);
+        methodScope.graph.verify();
+    }
+
+    protected final void decode(MethodScope methodScope) {
+        LoopScope loopScope = new LoopScope(methodScope);
+        if (methodScope.isInlinedMethod()) {
+            methodScope.startNode = methodScope.graph.add(new StartNode());
+            methodScope.startNode.setNext(makeStubNode(methodScope, loopScope, GraphEncoder.FIRST_NODE_ORDER_ID));
+            loopScope.nodesToProcess.set(GraphEncoder.FIRST_NODE_ORDER_ID);
+        } else {
+            methodScope.startNode = methodScope.graph.start();
+            registerNode(loopScope, GraphEncoder.START_NODE_ORDER_ID, methodScope.startNode, false, false);
+            loopScope.nodesToProcess.set(GraphEncoder.START_NODE_ORDER_ID);
+        }
+
+        while (loopScope != null) {
+            while (!loopScope.nodesToProcess.isEmpty()) {
+                loopScope = processNextNode(methodScope, loopScope);
+            }
+
+            if (loopScope.nextIterations != null && !loopScope.nextIterations.isEmpty()) {
+                /* Loop explosion: process the loop iteration. */
+                assert loopScope.nextIterations.peekFirst().loopIteration == loopScope.loopIteration + 1;
+                loopScope = loopScope.nextIterations.removeFirst();
+            } else {
+                loopScope = loopScope.outer;
+            }
+        }
+
+        if (methodScope.loopExplosion == LoopExplosionKind.MERGE_EXPLODE) {
+            cleanupGraph(methodScope);
+            Debug.dump(methodScope.graph, "Before loop detection");
+            detectLoops(methodScope.graph, methodScope.startNode);
+        }
+    }
+
+    protected LoopScope processNextNode(MethodScope methodScope, LoopScope loopScope) {
+        int nodeOrderId = loopScope.nodesToProcess.nextSetBit(0);
+        loopScope.nodesToProcess.clear(nodeOrderId);
+
+        FixedNode node = (FixedNode) lookupNode(loopScope, nodeOrderId);
+        if (node.isDeleted()) {
+            return loopScope;
+        }
+
+        LoopScope successorAddScope = loopScope;
+        boolean updatePredecessors = true;
+        if (node instanceof LoopExitNode) {
+            successorAddScope = loopScope.outer;
+            updatePredecessors = methodScope.loopExplosion == LoopExplosionKind.NONE;
+        }
+
+        methodScope.reader.setByteIndex(methodScope.nodeStartOffsets[nodeOrderId]);
+        int typeId = methodScope.reader.getUVInt();
+        assert node.getNodeClass() == methodScope.encodedGraph.getNodeClasses()[typeId];
+        readProperties(methodScope, node);
+        makeSuccessorStubs(methodScope, successorAddScope, node, updatePredecessors);
+        makeInputNodes(methodScope, loopScope, node, true);
+
+        LoopScope resultScope = loopScope;
+        if (node instanceof LoopBeginNode) {
+            if (methodScope.loopExplosion != LoopExplosionKind.NONE) {
+                handleLoopExplosionBegin(methodScope, loopScope, (LoopBeginNode) node);
+            }
+
+        } else if (node instanceof LoopExitNode) {
+            if (methodScope.loopExplosion != LoopExplosionKind.NONE) {
+                handleLoopExplosionProxyNodes(methodScope, loopScope, (LoopExitNode) node, nodeOrderId);
+            } else {
+                handleProxyNodes(methodScope, loopScope);
+            }
+
+        } else if (node instanceof AbstractEndNode) {
+            LoopScope phiInputScope = loopScope;
+            LoopScope phiNodeScope = loopScope;
+
+            if (methodScope.loopExplosion != LoopExplosionKind.NONE && node instanceof LoopEndNode) {
+                node = handleLoopExplosionEnd(methodScope, loopScope, (LoopEndNode) node);
+                phiNodeScope = loopScope.nextIterations.getLast();
+            }
+
+            int mergeOrderId = methodScope.reader.getUVInt();
+            AbstractMergeNode merge = (AbstractMergeNode) lookupNode(phiNodeScope, mergeOrderId);
+            if (merge == null) {
+                merge = (AbstractMergeNode) makeStubNode(methodScope, phiNodeScope, mergeOrderId);
+
+                if (merge instanceof LoopBeginNode) {
+                    assert phiNodeScope == phiInputScope && phiNodeScope == loopScope;
+                    resultScope = new LoopScope(loopScope, loopScope.loopDepth + 1, 0, mergeOrderId, Arrays.copyOf(loopScope.createdNodes, loopScope.createdNodes.length), //
+                                    methodScope.loopExplosion != LoopExplosionKind.NONE ? new ArrayDeque<>() : null, //
+                                    methodScope.loopExplosion == LoopExplosionKind.MERGE_EXPLODE ? new HashMap<>() : null);
+                    phiNodeScope = resultScope;
+
+                    registerNode(phiInputScope, mergeOrderId, null, true, true);
+                    phiInputScope.nodesToProcess.clear(mergeOrderId);
+                    phiNodeScope.nodesToProcess.set(mergeOrderId);
+                }
+            }
+
+            handlePhiFunctions(methodScope, phiInputScope, phiNodeScope, (AbstractEndNode) node, merge);
+
+        } else if (node instanceof Invoke) {
+            simplifyInvoke(methodScope, loopScope, nodeOrderId, (Invoke) node);
+
+        } else if (node instanceof ReturnNode) {
+            methodScope.returnNodes.add((ReturnNode) node);
+        } else if (node instanceof UnwindNode) {
+            assert methodScope.unwindNode == null : "graph can have only one UnwindNode";
+            methodScope.unwindNode = (UnwindNode) node;
+
+        } else {
+            simplifyFixedNode(methodScope, loopScope, nodeOrderId, node);
+        }
+
+        return resultScope;
+    }
+
+    /**
+     * Hook for subclasses.
+     *
+     * @param methodScope The current method.
+     * @param loopScope The current loop.
+     * @param invokeOrderId The orderId of the method invocation node.
+     * @param invoke The invocation node.
+     */
+    protected void simplifyInvoke(MethodScope methodScope, LoopScope loopScope, int invokeOrderId, Invoke invoke) {
+    }
+
+    protected void handleLoopExplosionBegin(MethodScope methodScope, LoopScope loopScope, LoopBeginNode loopBegin) {
+        checkLoopExplosionIteration(methodScope, loopScope);
+
+        List<EndNode> predecessors = loopBegin.forwardEnds().snapshot();
+        FixedNode successor = loopBegin.next();
+        FrameState frameState = loopBegin.stateAfter();
+
+        if (methodScope.loopExplosion == LoopExplosionKind.MERGE_EXPLODE) {
+            LoopExplosionState queryState = new LoopExplosionState(frameState, null);
+            LoopExplosionState existingState = loopScope.iterationStates.get(queryState);
+            if (existingState != null) {
+                loopBegin.replaceAtUsages(existingState.merge);
+                loopBegin.safeDelete();
+                successor.safeDelete();
+                for (EndNode predecessor : predecessors) {
+                    existingState.merge.addForwardEnd(predecessor);
+                }
+                return;
+            }
+        }
+
+        MergeNode merge = methodScope.graph.add(new MergeNode());
+        loopBegin.replaceAtUsages(merge);
+        loopBegin.safeDelete();
+        merge.setStateAfter(frameState);
+        merge.setNext(successor);
+        for (EndNode predecessor : predecessors) {
+            merge.addForwardEnd(predecessor);
+        }
+
+        if (methodScope.loopExplosion == LoopExplosionKind.MERGE_EXPLODE) {
+            LoopExplosionState explosionState = new LoopExplosionState(frameState, merge);
+            loopScope.iterationStates.put(explosionState, explosionState);
+        }
+    }
+
+    /**
+     * Hook for subclasses.
+     *
+     * @param methodScope The current method.
+     * @param loopScope The current loop.
+     */
+    protected void checkLoopExplosionIteration(MethodScope methodScope, LoopScope loopScope) {
+        throw shouldNotReachHere("when subclass uses loop explosion, it needs to implement this method");
+    }
+
+    protected FixedNode handleLoopExplosionEnd(MethodScope methodScope, LoopScope loopScope, LoopEndNode loopEnd) {
+        EndNode replacementNode = methodScope.graph.add(new EndNode());
+        loopEnd.replaceAtPredecessor(replacementNode);
+        loopEnd.safeDelete();
+
+        assert methodScope.loopExplosion != LoopExplosionKind.NONE;
+        if (methodScope.loopExplosion != LoopExplosionKind.FULL_UNROLL || loopScope.nextIterations.isEmpty()) {
+            int nextIterationNumber = loopScope.nextIterations.isEmpty() ? loopScope.loopIteration + 1 : loopScope.nextIterations.getLast().loopIteration + 1;
+            LoopScope nextIterationScope = new LoopScope(loopScope.outer, loopScope.loopDepth, nextIterationNumber, loopScope.loopBeginOrderId, loopScope.initialCreatedNodes,
+                            loopScope.nextIterations, loopScope.iterationStates);
+            loopScope.nextIterations.addLast(nextIterationScope);
+            registerNode(nextIterationScope, loopScope.loopBeginOrderId, null, true, true);
+            makeStubNode(methodScope, nextIterationScope, loopScope.loopBeginOrderId);
+        }
+        return replacementNode;
+    }
+
+    /**
+     * Hook for subclasses.
+     *
+     * @param methodScope The current method.
+     * @param loopScope The current loop.
+     * @param nodeOrderId The orderId of the node.
+     * @param node The node to be simplified.
+     */
+    protected void simplifyFixedNode(MethodScope methodScope, LoopScope loopScope, int nodeOrderId, FixedNode node) {
+    }
+
+    protected void handleProxyNodes(MethodScope methodScope, LoopScope loopScope) {
+        int numProxies = methodScope.reader.getUVInt();
+        for (int i = 0; i < numProxies; i++) {
+            int proxyOrderId = methodScope.reader.getUVInt();
+            ProxyNode proxy = (ProxyNode) ensureNodeCreated(methodScope, loopScope, proxyOrderId);
+            /*
+             * The ProxyNode transports a value from the loop to the outer scope. We therefore
+             * register it in the outer scope.
+             */
+            registerNode(loopScope.outer, proxyOrderId, proxy, false, false);
+        }
+    }
+
+    protected void handleLoopExplosionProxyNodes(MethodScope methodScope, LoopScope loopScope, LoopExitNode loopExit, int loopExitOrderId) {
+        BeginNode begin = methodScope.graph.add(new BeginNode());
+
+        FrameState loopExitState = loopExit.stateAfter();
+        FixedNode loopExitSuccessor = loopExit.next();
+        loopExit.replaceAtPredecessor(begin);
+
+        /*
+         * In the original graph, the loop exit is not a merge node. Multiple exploded loop
+         * iterations now take the same loop exit, so we have to introduce a new merge node to
+         * handle the merge.
+         */
+        MergeNode merge;
+        if (lookupNode(loopScope.outer, loopExitOrderId) == null) {
+            merge = methodScope.graph.add(new MergeNode());
+            registerNode(loopScope.outer, loopExitOrderId, merge, false, false);
+            merge.setNext(loopExitSuccessor);
+        } else {
+            merge = (MergeNode) loopScope.outer.createdNodes[loopExitOrderId];
+        }
+
+        FrameState oldStateAfter = merge.stateAfter();
+        merge.setStateAfter(loopExitState);
+        if (oldStateAfter != null) {
+            oldStateAfter.safeDelete();
+        }
+
+        EndNode end = methodScope.graph.add(new EndNode());
+        begin.setNext(end);
+        merge.addForwardEnd(end);
+
+        /*
+         * Possibly create phi nodes for the original proxy nodes that flow out of the loop. Note
+         * that we definitely do not need a proxy node itself anymore, since the loop was exploded
+         * and is no longer present.
+         */
+        int numProxies = methodScope.reader.getUVInt();
+        for (int i = 0; i < numProxies; i++) {
+            int proxyOrderId = methodScope.reader.getUVInt();
+            ProxyNode proxy = (ProxyNode) ensureNodeCreated(methodScope, loopScope, proxyOrderId);
+            ValueNode phiInput = proxy.value();
+            ValueNode replacement;
+
+            ValueNode existing = (ValueNode) loopScope.outer.createdNodes[proxyOrderId];
+            if (existing == null || existing == phiInput) {
+                /*
+                 * We are at the first loop exit, or the proxy carries the same value for all exits.
+                 * We do not need a phi node yet.
+                 */
+                registerNode(loopScope.outer, proxyOrderId, phiInput, true, false);
+                replacement = phiInput;
+
+            } else if (!merge.isPhiAtMerge(existing)) {
+                /* Now we have two different values, so we need to create a phi node. */
+                PhiNode phi = methodScope.graph.addWithoutUnique(new ValuePhiNode(proxy.stamp(), merge));
+                /* Add the inputs from all previous exits. */
+                for (int j = 0; j < merge.phiPredecessorCount() - 1; j++) {
+                    phi.addInput(existing);
+                }
+                /* Add the input from this exit. */
+                phi.addInput(phiInput);
+                registerNode(loopScope.outer, proxyOrderId, phi, true, false);
+                replacement = phi;
+
+            } else {
+                /* Phi node has been created before, so just add the new input. */
+                PhiNode phi = (PhiNode) existing;
+                phi.addInput(phiInput);
+                replacement = phi;
+            }
+
+            methodScope.graph.replaceFloating(proxy, replacement);
+        }
+
+        loopExit.safeDelete();
+        assert loopExitSuccessor.predecessor() == null;
+        merge.getNodeClass().getSuccessorEdges().update(merge, null, loopExitSuccessor);
+    }
+
+    protected void handlePhiFunctions(MethodScope methodScope, LoopScope phiInputScope, LoopScope phiNodeScope, AbstractEndNode end, AbstractMergeNode merge) {
+
+        if (end instanceof LoopEndNode) {
+            /*
+             * Fix the loop end index and the number of loop ends. When we do canonicalization
+             * during decoding, we can end up with fewer ends than the encoded graph had. And the
+             * order of loop ends can be different.
+             */
+            int numEnds = ((LoopBeginNode) merge).loopEnds().count();
+            ((LoopBeginNode) merge).nextEndIndex = numEnds;
+            ((LoopEndNode) end).endIndex = numEnds - 1;
+
+        } else {
+            if (merge.ends == null) {
+                merge.ends = new NodeInputList<>(merge);
+            }
+            merge.addForwardEnd((EndNode) end);
+        }
+
+        /*
+         * We create most phi functions lazily. Canonicalization and simplification during decoding
+         * can lead to dead branches that are not decoded, so we might not need all phi functions
+         * that the original graph contained. Since we process all predecessors before actually
+         * processing the merge node, we have the final phi function when processing the merge node.
+         * The only exception are loop headers of non-exploded loops: since backward branches are
+         * not processed yet when processing the loop body, we need to create all phi functions
+         * upfront.
+         */
+        boolean lazyPhi = !(merge instanceof LoopBeginNode) || methodScope.loopExplosion != LoopExplosionKind.NONE;
+        int numPhis = methodScope.reader.getUVInt();
+        for (int i = 0; i < numPhis; i++) {
+            int phiInputOrderId = methodScope.reader.getUVInt();
+            int phiNodeOrderId = methodScope.reader.getUVInt();
+
+            ValueNode phiInput = (ValueNode) ensureNodeCreated(methodScope, phiInputScope, phiInputOrderId);
+
+            ValueNode existing = (ValueNode) lookupNode(phiNodeScope, phiNodeOrderId);
+            if (lazyPhi && (existing == null || existing == phiInput)) {
+                /* Phi function not yet necessary. */
+                registerNode(phiNodeScope, phiNodeOrderId, phiInput, true, false);
+
+            } else if (!merge.isPhiAtMerge(existing)) {
+                /*
+                 * Phi function is necessary. Create it and fill it with existing inputs as well as
+                 * the new input.
+                 */
+                registerNode(phiNodeScope, phiNodeOrderId, null, true, true);
+                PhiNode phi = (PhiNode) ensureNodeCreated(methodScope, phiNodeScope, phiNodeOrderId);
+
+                phi.setMerge(merge);
+                for (int j = 0; j < merge.phiPredecessorCount() - 1; j++) {
+                    phi.addInput(existing);
+                }
+                phi.addInput(phiInput);
+
+            } else {
+                /* Phi node has been created before, so just add the new input. */
+                PhiNode phi = (PhiNode) existing;
+                phi.addInput(phiInput);
+            }
+        }
+    }
+
+    protected Node instantiateNode(MethodScope methodScope, int nodeOrderId) {
+        methodScope.reader.setByteIndex(methodScope.nodeStartOffsets[nodeOrderId]);
+        NodeClass<?> nodeClass = methodScope.encodedGraph.getNodeClasses()[methodScope.reader.getUVInt()];
+        return nodeClass.allocateInstance();
+    }
+
+    protected void readProperties(MethodScope methodScope, Node node) {
+        Fields fields = node.getNodeClass().getData();
+        for (int pos = 0; pos < fields.getCount(); pos++) {
+            if (fields.getType(pos).isPrimitive()) {
+                long primitive = methodScope.reader.getSV();
+                fields.setRawPrimitive(node, pos, primitive);
+            } else {
+                int objectId = methodScope.reader.getUVInt();
+                Object value = methodScope.encodedGraph.getObjects()[objectId];
+                fields.set(node, pos, value);
+            }
+        }
+    }
+
+    /**
+     * Process the input edges of a node. Input nodes that have not yet been created must be
+     * non-fixed nodes (because fixed nodes are processed in reverse postorder. Such non-fixed nodes
+     * are created on demand (recursively since they can themselves reference not yet created
+     * nodes).
+     */
+    protected void makeInputNodes(MethodScope methodScope, LoopScope loopScope, Node node, boolean updateUsages) {
+        Edges edges = node.getNodeClass().getEdges(Edges.Type.Inputs);
+        for (int index = 0; index < edges.getDirectCount(); index++) {
+            if (skipEdge(node, edges, index, true, true)) {
+                continue;
+            }
+            int orderId = methodScope.reader.getUVInt();
+            Node value = ensureNodeCreated(methodScope, loopScope, orderId);
+            Edges.initializeNode(node, edges.getOffsets(), index, value);
+            if (updateUsages && value != null && !value.isDeleted()) {
+                edges.update(node, null, value);
+
+            }
+        }
+        for (int index = edges.getDirectCount(); index < edges.getCount(); index++) {
+            if (skipEdge(node, edges, index, false, true)) {
+                continue;
+            }
+            int size = methodScope.reader.getSVInt();
+            if (size != -1) {
+                NodeList<Node> nodeList = new NodeInputList<>(node, size);
+                Edges.initializeList(node, edges.getOffsets(), index, nodeList);
+                for (int idx = 0; idx < size; idx++) {
+                    int orderId = methodScope.reader.getUVInt();
+                    Node value = ensureNodeCreated(methodScope, loopScope, orderId);
+                    nodeList.initialize(idx, value);
+                    if (updateUsages && value != null && !value.isDeleted()) {
+                        edges.update(node, null, value);
+                    }
+                }
+            }
+        }
+    }
+
+    protected Node ensureNodeCreated(MethodScope methodScope, LoopScope loopScope, int nodeOrderId) {
+        if (nodeOrderId == GraphEncoder.NULL_ORDER_ID) {
+            return null;
+        }
+        Node node = lookupNode(loopScope, nodeOrderId);
+        if (node != null) {
+            return node;
+        }
+
+        long readerByteIndex = methodScope.reader.getByteIndex();
+        node = instantiateNode(methodScope, nodeOrderId);
+        assert !(node instanceof FixedNode);
+
+        /* Read the properties of the node. */
+        readProperties(methodScope, node);
+        /* There must not be any successors to read, since it is a non-fixed node. */
+        assert node.getNodeClass().getEdges(Edges.Type.Successors).getCount() == 0;
+        /* Read the inputs of the node, possibly creating them recursively. */
+        makeInputNodes(methodScope, loopScope, node, false);
+        methodScope.reader.setByteIndex(readerByteIndex);
+
+        if (node instanceof ProxyNode || node instanceof PhiNode) {
+            /*
+             * We need these nodes as they were in the original graph, without any canonicalization
+             * or value numbering.
+             */
+            node = methodScope.graph.addWithoutUnique(node);
+
+        } else {
+            /* Allow subclasses to canonicalize and intercept nodes. */
+            node = handleFloatingNodeBeforeAdd(methodScope, loopScope, node);
+            if (!node.isAlive()) {
+                node = methodScope.graph.addOrUnique(node);
+            }
+            node = handleFloatingNodeAfterAdd(methodScope, loopScope, node);
+        }
+        registerNode(loopScope, nodeOrderId, node, false, false);
+        return node;
+    }
+
+    /**
+     * Hook for subclasses to process a non-fixed node before it is added to the graph.
+     *
+     * @param methodScope The current method.
+     * @param loopScope The current loop.
+     * @param node The node to be canonicalized.
+     * @return The replacement for the node, or the node itself.
+     */
+    protected Node handleFloatingNodeBeforeAdd(MethodScope methodScope, LoopScope loopScope, Node node) {
+        return node;
+    }
+
+    /**
+     * Hook for subclasses to process a non-fixed node after it is added to the graph.
+     *
+     * @param methodScope The current method.
+     * @param loopScope The current loop.
+     * @param node The node to be canonicalized.
+     * @return The replacement for the node, or the node itself.
+     */
+    protected Node handleFloatingNodeAfterAdd(MethodScope methodScope, LoopScope loopScope, Node node) {
+        return node;
+    }
+
+    /**
+     * Process successor edges of a node. We create the successor nodes so that we can fill the
+     * successor list, but no properties or edges are loaded yet. That is done when the successor is
+     * on top of the worklist in {@link #processNextNode}.
+     */
+    protected void makeSuccessorStubs(MethodScope methodScope, LoopScope loopScope, Node node, boolean updatePredecessors) {
+        Edges edges = node.getNodeClass().getEdges(Edges.Type.Successors);
+        for (int index = 0; index < edges.getDirectCount(); index++) {
+            if (skipEdge(node, edges, index, true, true)) {
+                continue;
+            }
+            int orderId = methodScope.reader.getUVInt();
+            Node value = makeStubNode(methodScope, loopScope, orderId);
+            Edges.initializeNode(node, edges.getOffsets(), index, value);
+            if (updatePredecessors && value != null) {
+                edges.update(node, null, value);
+            }
+        }
+        for (int index = edges.getDirectCount(); index < edges.getCount(); index++) {
+            if (skipEdge(node, edges, index, false, true)) {
+                continue;
+            }
+            int size = methodScope.reader.getSVInt();
+            if (size != -1) {
+                NodeList<Node> nodeList = new NodeSuccessorList<>(node, size);
+                Edges.initializeList(node, edges.getOffsets(), index, nodeList);
+                for (int idx = 0; idx < size; idx++) {
+                    int orderId = methodScope.reader.getUVInt();
+                    Node value = makeStubNode(methodScope, loopScope, orderId);
+                    nodeList.initialize(idx, value);
+                    if (updatePredecessors && value != null) {
+                        edges.update(node, null, value);
+                    }
+                }
+            }
+        }
+    }
+
+    protected FixedNode makeStubNode(MethodScope methodScope, LoopScope loopScope, int nodeOrderId) {
+        if (nodeOrderId == GraphEncoder.NULL_ORDER_ID) {
+            return null;
+        }
+        FixedNode node = (FixedNode) lookupNode(loopScope, nodeOrderId);
+        if (node != null) {
+            return node;
+        }
+
+        long readerByteIndex = methodScope.reader.getByteIndex();
+        node = (FixedNode) methodScope.graph.add(instantiateNode(methodScope, nodeOrderId));
+        /* Properties and edges are not filled yet, the node remains uninitialized. */
+        methodScope.reader.setByteIndex(readerByteIndex);
+
+        registerNode(loopScope, nodeOrderId, node, false, false);
+        loopScope.nodesToProcess.set(nodeOrderId);
+        return node;
+    }
+
+    /**
+     * Returns false for {@link Edges} that are not necessary in the encoded graph because they are
+     * reconstructed using other sources of information.
+     */
+    protected static boolean skipEdge(Node node, Edges edges, int index, boolean direct, boolean decode) {
+        if (node instanceof PhiNode) {
+            /* The inputs of phi functions are filled manually when the end nodes are processed. */
+            assert edges.type() == Edges.Type.Inputs;
+            if (direct) {
+                assert index == edges.getDirectCount() - 1 : "PhiNode has one direct input (the MergeNode)";
+            } else {
+                assert index == edges.getCount() - 1 : "PhiNode has one variable size input (the values)";
+                if (decode) {
+                    /* The values must not be null, so initialize with an empty list. */
+                    Edges.initializeList(node, edges.getOffsets(), index, new NodeInputList<>(node));
+                }
+            }
+            return true;
+
+        } else if (node instanceof AbstractMergeNode && edges.type() == Edges.Type.Inputs && !direct) {
+            /* The ends of merge nodes are filled manually when the ends are processed. */
+            assert index == edges.getCount() - 1 : "MergeNode has one variable size input (the ends)";
+            assert Edges.getNodeList(node, edges.getOffsets(), index) != null : "Input list must have been already created";
+            return true;
+        }
+        return false;
+    }
+
+    protected Node lookupNode(LoopScope loopScope, int nodeOrderId) {
+        return loopScope.createdNodes[nodeOrderId];
+    }
+
+    protected void registerNode(LoopScope loopScope, int nodeOrderId, Node node, boolean allowOverwrite, boolean allowNull) {
+        assert node == null || node.isAlive();
+        assert allowNull || node != null;
+        assert allowOverwrite || lookupNode(loopScope, nodeOrderId) == null;
+        loopScope.createdNodes[nodeOrderId] = node;
+    }
+
+    /*
+     * The following methods are a literal copy from GraphBuilderPhase.
+     */
+
+    protected void detectLoops(StructuredGraph currentGraph, FixedNode startInstruction) {
+        NodeBitMap visited = currentGraph.createNodeBitMap();
+        NodeBitMap active = currentGraph.createNodeBitMap();
+        Deque<Node> stack = new ArrayDeque<>();
+        stack.add(startInstruction);
+        visited.mark(startInstruction);
+        while (!stack.isEmpty()) {
+            Node next = stack.peek();
+            assert next.isDeleted() || visited.isMarked(next);
+            if (next.isDeleted() || active.isMarked(next)) {
+                stack.pop();
+                if (!next.isDeleted()) {
+                    active.clear(next);
+                }
+            } else {
+                active.mark(next);
+                for (Node n : next.cfgSuccessors()) {
+                    if (active.contains(n)) {
+                        // Detected cycle.
+                        assert n instanceof MergeNode;
+                        assert next instanceof EndNode;
+                        MergeNode merge = (MergeNode) n;
+                        EndNode endNode = (EndNode) next;
+                        merge.removeEnd(endNode);
+                        FixedNode afterMerge = merge.next();
+                        if (!(afterMerge instanceof EndNode) || !(((EndNode) afterMerge).merge() instanceof LoopBeginNode)) {
+                            FrameState stateAfter = merge.stateAfter();
+                            merge.setNext(null);
+                            merge.setStateAfter(null);
+                            LoopBeginNode newLoopBegin = appendLoopBegin(currentGraph, merge);
+                            newLoopBegin.setNext(afterMerge);
+                            newLoopBegin.setStateAfter(stateAfter);
+                        }
+                        LoopBeginNode loopBegin = (LoopBeginNode) ((EndNode) merge.next()).merge();
+                        LoopEndNode loopEnd = currentGraph.add(new LoopEndNode(loopBegin));
+                        endNode.replaceAndDelete(loopEnd);
+                    } else if (visited.contains(n)) {
+                        // Normal merge into a branch we are already exploring.
+                    } else {
+                        visited.mark(n);
+                        stack.push(n);
+                    }
+                }
+            }
+        }
+
+        Debug.dump(currentGraph, "After loops detected");
+        insertLoopEnds(currentGraph, startInstruction);
+    }
+
+    private static LoopBeginNode appendLoopBegin(StructuredGraph currentGraph, FixedWithNextNode fixedWithNext) {
+        EndNode preLoopEnd = currentGraph.add(new EndNode());
+        LoopBeginNode loopBegin = currentGraph.add(new LoopBeginNode());
+        fixedWithNext.setNext(preLoopEnd);
+        // Add the single non-loop predecessor of the loop header.
+        loopBegin.addForwardEnd(preLoopEnd);
+        return loopBegin;
+    }
+
+    private static void insertLoopEnds(StructuredGraph currentGraph, FixedNode startInstruction) {
+        NodeBitMap visited = currentGraph.createNodeBitMap();
+        Deque<Node> stack = new ArrayDeque<>();
+        stack.add(startInstruction);
+        visited.mark(startInstruction);
+        List<LoopBeginNode> loopBegins = new ArrayList<>();
+        while (!stack.isEmpty()) {
+            Node next = stack.pop();
+            assert visited.isMarked(next);
+            if (next instanceof LoopBeginNode) {
+                loopBegins.add((LoopBeginNode) next);
+            }
+            for (Node n : next.cfgSuccessors()) {
+                if (visited.contains(n)) {
+                    // Nothing to do.
+                } else {
+                    visited.mark(n);
+                    stack.push(n);
+                }
+            }
+        }
+
+        IdentityHashMap<LoopBeginNode, List<LoopBeginNode>> innerLoopsMap = new IdentityHashMap<>();
+        for (int i = loopBegins.size() - 1; i >= 0; --i) {
+            LoopBeginNode loopBegin = loopBegins.get(i);
+            insertLoopExits(currentGraph, loopBegin, innerLoopsMap);
+        }
+
+        // Remove degenerated merges with only one predecessor.
+        for (LoopBeginNode loopBegin : loopBegins) {
+            Node pred = loopBegin.forwardEnd().predecessor();
+            if (pred instanceof MergeNode) {
+                MergeNode.removeMergeIfDegenerated((MergeNode) pred);
+            }
+        }
+    }
+
+    private static void insertLoopExits(StructuredGraph currentGraph, LoopBeginNode loopBegin, IdentityHashMap<LoopBeginNode, List<LoopBeginNode>> innerLoopsMap) {
+        NodeBitMap visited = currentGraph.createNodeBitMap();
+        Deque<Node> stack = new ArrayDeque<>();
+        for (LoopEndNode loopEnd : loopBegin.loopEnds()) {
+            stack.push(loopEnd);
+            visited.mark(loopEnd);
+        }
+
+        List<ControlSplitNode> controlSplits = new ArrayList<>();
+        List<LoopBeginNode> innerLoopBegins = new ArrayList<>();
+
+        while (!stack.isEmpty()) {
+            Node current = stack.pop();
+            if (current == loopBegin) {
+                continue;
+            }
+            for (Node pred : current.cfgPredecessors()) {
+                if (!visited.isMarked(pred)) {
+                    visited.mark(pred);
+                    if (pred instanceof LoopExitNode) {
+                        // Inner loop
+                        LoopExitNode loopExitNode = (LoopExitNode) pred;
+                        LoopBeginNode innerLoopBegin = loopExitNode.loopBegin();
+                        if (!visited.isMarked(innerLoopBegin)) {
+                            stack.push(innerLoopBegin);
+                            visited.mark(innerLoopBegin);
+                            innerLoopBegins.add(innerLoopBegin);
+                        }
+                    } else {
+                        if (pred instanceof ControlSplitNode) {
+                            ControlSplitNode controlSplitNode = (ControlSplitNode) pred;
+                            controlSplits.add(controlSplitNode);
+                        }
+                        stack.push(pred);
+                    }
+                }
+            }
+        }
+
+        for (ControlSplitNode controlSplit : controlSplits) {
+            for (Node succ : controlSplit.cfgSuccessors()) {
+                if (!visited.isMarked(succ)) {
+                    LoopExitNode loopExit = currentGraph.add(new LoopExitNode(loopBegin));
+                    FixedNode next = ((FixedWithNextNode) succ).next();
+                    next.replaceAtPredecessor(loopExit);
+                    loopExit.setNext(next);
+                }
+            }
+        }
+
+        for (LoopBeginNode inner : innerLoopBegins) {
+            addLoopExits(currentGraph, loopBegin, inner, innerLoopsMap, visited);
+        }
+
+        innerLoopsMap.put(loopBegin, innerLoopBegins);
+    }
+
+    private static void addLoopExits(StructuredGraph currentGraph, LoopBeginNode loopBegin, LoopBeginNode inner, IdentityHashMap<LoopBeginNode, List<LoopBeginNode>> innerLoopsMap, NodeBitMap visited) {
+        for (LoopExitNode exit : inner.loopExits()) {
+            if (!visited.isMarked(exit)) {
+                LoopExitNode newLoopExit = currentGraph.add(new LoopExitNode(loopBegin));
+                FixedNode next = exit.next();
+                next.replaceAtPredecessor(newLoopExit);
+                newLoopExit.setNext(next);
+            }
+        }
+
+        for (LoopBeginNode innerInner : innerLoopsMap.get(inner)) {
+            addLoopExits(currentGraph, loopBegin, innerInner, innerLoopsMap, visited);
+        }
+    }
+
+    protected void cleanupGraph(MethodScope methodScope) {
+        assert verifyEdges(methodScope);
+
+        Debug.dump(methodScope.graph, "Before removing redundant merges");
+        for (MergeNode mergeNode : methodScope.graph.getNodes(MergeNode.TYPE)) {
+            if (mergeNode.forwardEndCount() == 1) {
+                methodScope.graph.reduceTrivialMerge(mergeNode);
+            }
+        }
+
+        Debug.dump(methodScope.graph, "Before removing redundant begins");
+        for (Node node : methodScope.graph.getNodes()) {
+            if (node instanceof BeginNode || node instanceof KillingBeginNode) {
+                if (!(node.predecessor() instanceof ControlSplitNode) && node.hasNoUsages()) {
+                    GraphUtil.unlinkFixedNode((AbstractBeginNode) node);
+                    node.safeDelete();
+                }
+            }
+        }
+
+        Debug.dump(methodScope.graph, "Before removing unused non-fixed nodes");
+        for (Node node : methodScope.graph.getNodes()) {
+            if (!(node instanceof FixedNode) && node.hasNoUsages()) {
+                GraphUtil.killCFG(node);
+            }
+        }
+    }
+
+    protected boolean verifyEdges(MethodScope methodScope) {
+        for (Node node : methodScope.graph.getNodes()) {
+            assert node.isAlive();
+            node.acceptInputs((n, i) -> {
+                assert i.isAlive();
+                assert i.usages().contains(n);
+            });
+            node.acceptSuccessors((n, s) -> {
+                assert s.isAlive();
+                assert s.predecessor() == n;
+            });
+
+            for (Node usage : node.usages()) {
+                assert usage.isAlive();
+                assert usage.inputs().contains(node);
+            }
+            if (node.predecessor() != null) {
+                assert node.predecessor().isAlive();
+                assert node.predecessor().successors().contains(node);
+            }
+        }
+        return true;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/GraphEncoder.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,468 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.nodes;
+
+import java.util.*;
+
+import com.oracle.graal.compiler.common.*;
+import com.oracle.graal.compiler.common.util.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.graph.iterators.*;
+import com.oracle.graal.nodes.StructuredGraph.AllowAssumptions;
+
+/**
+ * Encodes a {@link StructuredGraph} to a compact byte[] array. All nodes of the graph and edges
+ * between the nodes are encoded. Primitive data fields of nodes are stored in the byte[] array.
+ * Object data fields of nodes are stored in a separate Object[] array.
+ *
+ * One encoder instance can be used to encode multiple graphs. This requires that {@link #prepare}
+ * is called for all graphs first, followed by one call to {@link #finishPrepare}. Then
+ * {@link #encode} can be called for all graphs. The {@link #getObjects() objects} and
+ * {@link #getNodeClasses() node classes} arrays do not change anymore after preparation.
+ *
+ * Multiple encoded graphs share the Object[] array, and elements of the Object[] array are
+ * de-duplicated using {@link Object#equals Object equality}. This uses the assumption and good
+ * coding practice that data objects are immutable if {@link Object#equals} is implemented.
+ * Unfortunately, this cannot be enforced.
+ *
+ * The Graal {@link NodeClass} does not have a unique id that allows class lookup from an id.
+ * Therefore, the encoded graph contains a {@link NodeClass}[] array for lookup, and type ids are
+ * encoding-local.
+ *
+ * The encoded graph has the following structure: First, all nodes and their edges are serialized.
+ * The start offset of every node is then known. The raw node data is followed by a
+ * "table of contents" that lists the start offset for every node.
+ *
+ * The beginning of that table of contents is the return value of {@link #encode} and stored in
+ * {@link EncodedGraph#getStartOffset()}. The order of nodes in the table of contents is the
+ * {@link NodeOrder#orderIds orderId} of a node. Note that the orderId is not the regular node id
+ * that every Graal graph node gets assigned. The orderId is computed and used just for encoding and
+ * decoding. The orderId of fixed nodes is assigned in reverse postorder. The decoder processes
+ * nodes using that order, which ensures that all predecessors of a node (including all
+ * {@link EndNode predecessors} of a {@link AbstractBeginNode block}) are decoded before the node.
+ * The order id of floating node does not matter during decoding, so floating nodes get order ids
+ * after all fixed nodes. The order id is used to encode edges between nodes
+ *
+ * Structure of an encoded node:
+ *
+ * <pre>
+ * struct Node {
+ *   unsigned typeId
+ *   signed[] properties
+ *   unsigned[] successorOrderIds
+ *   unsigned[] inputOrderIds
+ * }
+ * </pre>
+ *
+ * All numbers (unsigned and signed) are stored using a variable-length encoding as defined in
+ * {@link TypeReader} and {@link TypeWriter}. Especially orderIds are small, so the variable-length
+ * encoding is important to keep the encoding compact.
+ *
+ * The properties, successors, and inputs are written in the order as defined in
+ * {@link NodeClass#getData}, {@link NodeClass#getSuccessorEdges()}, and
+ * {@link NodeClass#getInputEdges()}. For variable-length successors and input lists, first the
+ * length is written and then the orderIds. There is a distinction between null lists (encoded as
+ * length -1) and empty lists (encoded as length 0). No reverse edges are written (predecessors,
+ * usages) since that information can be easily restored during decoding.
+ *
+ * Some nodes have additional information written after the properties, successors, and inputs: <li>
+ * <item>{@link AbstractEndNode}: the orderId of the merge node and then all {@link PhiNode phi
+ * mappings} from this end to the merge node are written. <item>{@link LoopExitNode}: the orderId of
+ * all {@link ProxyNode proxy nodes} of the loop exit is written.</li>
+ */
+public class GraphEncoder {
+
+    /** The orderId that always represents {@code null}. */
+    public static final int NULL_ORDER_ID = 0;
+    /** The orderId of the {@link StructuredGraph#start() start node} of the encoded graph. */
+    public static final int START_NODE_ORDER_ID = 1;
+    /** The orderId of the first actual node after the {@link StructuredGraph#start() start node}. */
+    public static final int FIRST_NODE_ORDER_ID = 2;
+
+    /**
+     * Collects all non-primitive data referenced from nodes. The encoding uses an index into an
+     * array for decoding. Because of the variable-length encoding, it is beneficial that frequently
+     * used objects have the small indices.
+     */
+    protected final FrequencyEncoder<Object> objects;
+    /**
+     * Collects all node classes referenced in graphs. This is necessary because {@link NodeClass}
+     * currently does not have a unique id.
+     */
+    protected final FrequencyEncoder<NodeClass<?>> nodeClasses;
+    /** The writer for the encoded graphs. */
+    protected final UnsafeArrayTypeWriter writer;
+
+    /** The last snapshot of {@link #objects} that was retrieved. */
+    protected Object[] objectsArray;
+    /** The last snapshot of {@link #nodeClasses} that was retrieved. */
+    protected NodeClass<?>[] nodeClassesArray;
+
+    /**
+     * Utility method that does everything necessary to encode a single graph.
+     */
+    public static EncodedGraph encodeSingleGraph(StructuredGraph graph) {
+        GraphEncoder encoder = new GraphEncoder();
+        encoder.prepare(graph);
+        encoder.finishPrepare();
+        long startOffset = encoder.encode(graph);
+        return new EncodedGraph(encoder.getEncoding(), startOffset, encoder.getObjects(), encoder.getNodeClasses(), graph.getAssumptions(), graph.getInlinedMethods());
+    }
+
+    public GraphEncoder() {
+        objects = FrequencyEncoder.createEqualityEncoder();
+        nodeClasses = FrequencyEncoder.createIdentityEncoder();
+        writer = new UnsafeArrayTypeWriter();
+    }
+
+    /**
+     * Must be invoked before {@link #finishPrepare()} and {@link #encode}.
+     */
+    public void prepare(StructuredGraph graph) {
+        for (Node node : graph.getNodes()) {
+            nodeClasses.addObject(node.getNodeClass());
+
+            NodeClass<?> nodeClass = node.getNodeClass();
+            for (int i = 0; i < nodeClass.getData().getCount(); i++) {
+                if (!nodeClass.getData().getType(i).isPrimitive()) {
+                    objects.addObject(nodeClass.getData().get(node, i));
+                }
+            }
+        }
+    }
+
+    public void finishPrepare() {
+        objectsArray = objects.encodeAll(new Object[objects.getLength()]);
+        nodeClassesArray = nodeClasses.encodeAll(new NodeClass[nodeClasses.getLength()]);
+    }
+
+    public Object[] getObjects() {
+        return objectsArray;
+    }
+
+    public NodeClass<?>[] getNodeClasses() {
+        return nodeClassesArray;
+    }
+
+    /**
+     * Compresses a graph to a byte array. Multiple graphs can be compressed with the same
+     * {@link GraphEncoder}.
+     *
+     * @param graph The graph to encode
+     */
+    public long encode(StructuredGraph graph) {
+        assert objectsArray != null && nodeClassesArray != null : "finishPrepare() must be called before encode()";
+
+        NodeOrder nodeOrder = new NodeOrder(graph);
+        int nodeCount = nodeOrder.nextOrderId;
+        assert nodeOrder.orderIds.get(graph.start()) == START_NODE_ORDER_ID;
+        assert nodeOrder.orderIds.get(graph.start().next()) == FIRST_NODE_ORDER_ID;
+        assert nodeCount == graph.getNodeCount() + 1;
+
+        long[] nodeStartOffsets = new long[nodeCount];
+        for (Map.Entry<Node, Integer> entry : nodeOrder.orderIds.entries()) {
+            Node node = entry.getKey();
+            nodeStartOffsets[entry.getValue()] = writer.getBytesWritten();
+
+            /* Write out the type, properties, and edges. */
+            NodeClass<?> nodeClass = node.getNodeClass();
+            writer.putUV(nodeClasses.getIndex(nodeClass));
+            writeProperties(node, nodeClass.getData());
+            writeEdges(node, nodeClass.getEdges(Edges.Type.Successors), nodeOrder);
+            writeEdges(node, nodeClass.getEdges(Edges.Type.Inputs), nodeOrder);
+
+            /* Special handling for some nodes that require additional information for decoding. */
+            if (node instanceof AbstractEndNode) {
+                AbstractEndNode end = (AbstractEndNode) node;
+                AbstractMergeNode merge = end.merge();
+                /*
+                 * Write the orderId of the merge. The merge is not a successor in the Graal graph
+                 * (only the merge has an input edge to the EndNode).
+                 */
+                writeOrderId(merge, nodeOrder);
+
+                /*
+                 * Write all phi mappings (the oderId of the phi input for this EndNode, and the
+                 * orderId of the phi node.
+                 */
+                writer.putUV(merge.phis().count());
+                for (PhiNode phi : merge.phis()) {
+                    writeOrderId(phi.valueAt(end), nodeOrder);
+                    writeOrderId(phi, nodeOrder);
+                }
+
+            } else if (node instanceof LoopExitNode) {
+                LoopExitNode exit = (LoopExitNode) node;
+                /* Write all proxy nodes of the LoopExitNode. */
+                writer.putUV(exit.proxies().count());
+                for (ProxyNode proxy : exit.proxies()) {
+                    writeOrderId(proxy, nodeOrder);
+                }
+            }
+        }
+
+        /* Write out the table of contents with the start offset for all nodes. */
+        long nodeTableStart = writer.getBytesWritten();
+        writer.putUV(nodeCount);
+        for (int i = 0; i < nodeCount; i++) {
+            assert i == NULL_ORDER_ID || i == START_NODE_ORDER_ID || nodeStartOffsets[i] > 0;
+            writer.putUV(nodeTableStart - nodeStartOffsets[i]);
+        }
+
+        /* Check that the decoding of the encode graph is the same as the input. */
+        assert verifyEncoding(graph, new EncodedGraph(getEncoding(), nodeTableStart, getObjects(), getNodeClasses(), graph.getAssumptions(), graph.getInlinedMethods()));
+
+        return nodeTableStart;
+    }
+
+    public byte[] getEncoding() {
+        return writer.toArray(new byte[TypeConversion.asS4(writer.getBytesWritten())]);
+    }
+
+    static class NodeOrder {
+        protected final NodeMap<Integer> orderIds;
+        protected int nextOrderId;
+
+        public NodeOrder(StructuredGraph graph) {
+            this.orderIds = new NodeMap<>(graph);
+            this.nextOrderId = START_NODE_ORDER_ID;
+
+            /* Order the fixed nodes of the graph in reverse postorder. */
+            Deque<AbstractBeginNode> nodeQueue = new ArrayDeque<>();
+            FixedNode current = graph.start();
+            do {
+                add(current);
+                if (current instanceof InvokeWithExceptionNode) {
+                    /*
+                     * Special handling for invokes: the orderID of the invocation, the regular
+                     * successor, and the exception edge must be consecutive.
+                     */
+                    add(((InvokeWithExceptionNode) current).next());
+                    add(((InvokeWithExceptionNode) current).exceptionEdge());
+                }
+
+                if (current instanceof FixedWithNextNode) {
+                    current = ((FixedWithNextNode) current).next;
+                } else {
+                    if (current instanceof ControlSplitNode) {
+                        for (Node successor : current.successors()) {
+                            if (successor != null) {
+                                nodeQueue.addFirst((AbstractBeginNode) successor);
+                            }
+                        }
+                    } else if (current instanceof EndNode) {
+                        AbstractMergeNode merge = ((AbstractEndNode) current).merge();
+                        boolean allForwardEndsVisited = true;
+                        for (int i = 0; i < merge.forwardEndCount(); i++) {
+                            if (orderIds.get(merge.forwardEndAt(i)) == null) {
+                                allForwardEndsVisited = false;
+                                break;
+                            }
+                        }
+                        if (allForwardEndsVisited) {
+                            nodeQueue.add(merge);
+                        }
+                    }
+                    current = nodeQueue.pollFirst();
+                }
+            } while (current != null);
+
+            for (Node node : graph.getNodes()) {
+                assert (node instanceof FixedNode) == (orderIds.get(node) != null) : "all fixed nodes must be ordered";
+                add(node);
+            }
+        }
+
+        private void add(Node node) {
+            if (orderIds.get(node) == null) {
+                orderIds.set(node, nextOrderId);
+                nextOrderId++;
+            }
+        }
+    }
+
+    protected void writeProperties(Node node, Fields fields) {
+        for (int idx = 0; idx < fields.getCount(); idx++) {
+            if (fields.getType(idx).isPrimitive()) {
+                long primitive = fields.getRawPrimitive(node, idx);
+                writer.putSV(primitive);
+            } else {
+                Object property = fields.get(node, idx);
+                writer.putUV(objects.getIndex(property));
+            }
+        }
+    }
+
+    protected void writeEdges(Node node, Edges edges, NodeOrder nodeOrder) {
+        for (int idx = 0; idx < edges.getDirectCount(); idx++) {
+            if (GraphDecoder.skipEdge(node, edges, idx, true, false)) {
+                /* Edge is not needed for decoding, so we must not write it. */
+                continue;
+            }
+            Node edge = Edges.getNode(node, edges.getOffsets(), idx);
+            writeOrderId(edge, nodeOrder);
+        }
+        for (int idx = edges.getDirectCount(); idx < edges.getCount(); idx++) {
+            if (GraphDecoder.skipEdge(node, edges, idx, false, false)) {
+                /* Edge is not needed for decoding, so we must not write it. */
+                continue;
+            }
+            NodeList<Node> edgeList = Edges.getNodeList(node, edges.getOffsets(), idx);
+            if (edgeList == null) {
+                writer.putSV(-1);
+            } else {
+                writer.putSV(edgeList.size());
+                for (Node edge : edgeList) {
+                    writeOrderId(edge, nodeOrder);
+                }
+            }
+        }
+    }
+
+    protected void writeOrderId(Node node, NodeOrder nodeOrder) {
+        writer.putUV(node == null ? NULL_ORDER_ID : nodeOrder.orderIds.get(node));
+    }
+
+    /**
+     * Verification code that checks that the decoding of an encode graph is the same as the
+     * original graph.
+     */
+    public static boolean verifyEncoding(StructuredGraph originalGraph, EncodedGraph encodedGraph) {
+        StructuredGraph decodedGraph = new StructuredGraph(originalGraph.method(), AllowAssumptions.YES);
+        GraphDecoder decoder = new GraphDecoder();
+        decoder.decode(decodedGraph, encodedGraph);
+
+        decodedGraph.verify();
+        GraphComparison.verifyGraphsEqual(originalGraph, decodedGraph);
+        return true;
+    }
+}
+
+class GraphComparison {
+    public static boolean verifyGraphsEqual(StructuredGraph expectedGraph, StructuredGraph actualGraph) {
+        NodeMap<Node> nodeMapping = new NodeMap<>(expectedGraph);
+        Deque<Pair<Node, Node>> workList = new ArrayDeque<>();
+
+        pushToWorklist(expectedGraph.start(), actualGraph.start(), nodeMapping, workList);
+        while (!workList.isEmpty()) {
+            Pair<Node, Node> pair = workList.pop();
+            Node expectedNode = pair.first;
+            Node actualNode = pair.second;
+            assert expectedNode.getClass() == actualNode.getClass();
+
+            NodeClass<?> nodeClass = expectedNode.getNodeClass();
+            assert nodeClass == actualNode.getNodeClass();
+
+            if (expectedNode instanceof MergeNode) {
+                /* The order of the ends can be different, so ignore them. */
+                verifyNodesEqual(expectedNode.inputs(), actualNode.inputs(), nodeMapping, workList, true);
+            } else if (expectedNode instanceof PhiNode) {
+                /* The order of phi inputs can be different, so they are checked manually below. */
+            } else {
+                verifyNodesEqual(expectedNode.inputs(), actualNode.inputs(), nodeMapping, workList, false);
+            }
+            verifyNodesEqual(expectedNode.successors(), actualNode.successors(), nodeMapping, workList, false);
+
+            if (expectedNode instanceof LoopEndNode) {
+                LoopEndNode actualLoopEnd = (LoopEndNode) actualNode;
+                assert actualLoopEnd.loopBegin().loopEnds().snapshot().indexOf(actualLoopEnd) == actualLoopEnd.endIndex();
+            } else {
+                for (int i = 0; i < nodeClass.getData().getCount(); i++) {
+                    Object expectedProperty = nodeClass.getData().get(expectedNode, i);
+                    Object actualProperty = nodeClass.getData().get(actualNode, i);
+                    assert Objects.equals(expectedProperty, actualProperty);
+                }
+            }
+
+            if (expectedNode instanceof EndNode) {
+                /* Visit the merge node, which is the one and only usage of the EndNode. */
+                assert expectedNode.usages().count() == 1;
+                assert actualNode.usages().count() == 1;
+                verifyNodesEqual(expectedNode.usages(), actualNode.usages(), nodeMapping, workList, false);
+            }
+
+            if (expectedNode instanceof AbstractEndNode) {
+                /* Visit the input values of the merge phi functions for this EndNode. */
+                verifyPhis((AbstractEndNode) expectedNode, (AbstractEndNode) actualNode, nodeMapping, workList);
+            }
+        }
+
+        for (Node expectedNode : expectedGraph.getNodes()) {
+            assert nodeMapping.get(expectedNode) != null || (expectedNode.hasNoUsages() && !(expectedNode instanceof FixedNode)) : "expectedNode";
+        }
+        return true;
+    }
+
+    protected static void verifyPhis(AbstractEndNode expectedEndNode, AbstractEndNode actualEndNode, NodeMap<Node> nodeMapping, Deque<Pair<Node, Node>> workList) {
+        AbstractMergeNode expectedMergeNode = expectedEndNode.merge();
+        AbstractMergeNode actualMergeNode = (AbstractMergeNode) nodeMapping.get(expectedMergeNode);
+
+        Iterator<PhiNode> actualPhis = actualMergeNode.phis().iterator();
+        for (PhiNode expectedPhi : expectedMergeNode.phis()) {
+            PhiNode actualPhi = actualPhis.next();
+            verifyNodeEqual(expectedPhi, actualPhi, nodeMapping, workList, false);
+
+            ValueNode expectedPhiInput = expectedPhi.valueAt(expectedEndNode);
+            ValueNode actualPhiInput = actualPhi.valueAt(actualEndNode);
+            verifyNodeEqual(expectedPhiInput, actualPhiInput, nodeMapping, workList, false);
+        }
+        assert !actualPhis.hasNext();
+    }
+
+    private static void verifyNodesEqual(NodeIterable<Node> expectedNodes, NodeIterable<Node> actualNodes, NodeMap<Node> nodeMapping, Deque<Pair<Node, Node>> workList, boolean ignoreEndNode) {
+        Iterator<Node> actualIter = actualNodes.iterator();
+        for (Node expectedNode : expectedNodes) {
+            verifyNodeEqual(expectedNode, actualIter.next(), nodeMapping, workList, ignoreEndNode);
+        }
+        assert !actualIter.hasNext();
+    }
+
+    protected static void verifyNodeEqual(Node expectedNode, Node actualNode, NodeMap<Node> nodeMapping, Deque<Pair<Node, Node>> workList, boolean ignoreEndNode) {
+        assert expectedNode.getClass() == actualNode.getClass();
+        if (ignoreEndNode && expectedNode instanceof EndNode) {
+            return;
+        }
+
+        Node existing = nodeMapping.get(expectedNode);
+        if (existing != null) {
+            assert existing == actualNode;
+        } else {
+            pushToWorklist(expectedNode, actualNode, nodeMapping, workList);
+        }
+    }
+
+    protected static void pushToWorklist(Node expectedNode, Node actualNode, NodeMap<Node> nodeMapping, Deque<Pair<Node, Node>> workList) {
+        nodeMapping.set(expectedNode, actualNode);
+        workList.push(new Pair<>(expectedNode, actualNode));
+    }
+}
+
+class Pair<F, S> {
+    public final F first;
+    public final S second;
+
+    public Pair(F first, S second) {
+        this.first = first;
+        this.second = second;
+    }
+}
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/IfNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/IfNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -178,7 +178,7 @@
             }
             return;
         }
-        if (trueSuccessor().hasNoUsages() && falseSuccessor().hasNoUsages()) {
+        if (tool.allUsagesAvailable() && trueSuccessor().hasNoUsages() && falseSuccessor().hasNoUsages()) {
 
             pushNodesThroughIf(tool);
 
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/InvokeWithExceptionNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/InvokeWithExceptionNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -160,7 +160,9 @@
     @Override
     public Map<Object, Object> getDebugProperties(Map<Object, Object> map) {
         Map<Object, Object> debugProperties = super.getDebugProperties(map);
-        debugProperties.put("targetMethod", callTarget.targetName());
+        if (callTarget != null) {
+            debugProperties.put("targetMethod", callTarget.targetName());
+        }
         return debugProperties;
     }
 
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/LoopExitNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/LoopExitNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -45,7 +45,7 @@
     @Override
     public void simplify(SimplifierTool tool) {
         Node prev = this.predecessor();
-        while (prev instanceof BeginNode && prev.hasNoUsages()) {
+        while (tool.allUsagesAvailable() && prev instanceof BeginNode && prev.hasNoUsages()) {
             AbstractBeginNode begin = (AbstractBeginNode) prev;
             prev = prev.predecessor();
             graph().removeFixed(begin);
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/IntegerSwitchNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/IntegerSwitchNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -98,6 +98,15 @@
         gen.emitSwitch(this);
     }
 
+    public AbstractBeginNode successorAtKey(int key) {
+        for (int i = 0; i < keyCount(); i++) {
+            if (keys[i] == key) {
+                return blockSuccessor(keySuccessorIndex(i));
+            }
+        }
+        return blockSuccessor(keySuccessorIndex(keyCount()));
+    }
+
     @Override
     public void simplify(SimplifierTool tool) {
         if (blockSuccessorCount() == 1) {
@@ -105,20 +114,15 @@
             graph().removeSplitPropagate(this, defaultSuccessor());
         } else if (value() instanceof ConstantNode) {
             int constant = value().asJavaConstant().asInt();
-
-            int survivingEdge = keySuccessorIndex(keyCount());
-            for (int i = 0; i < keyCount(); i++) {
-                if (keys[i] == constant) {
-                    survivingEdge = keySuccessorIndex(i);
+            AbstractBeginNode survivingSuccessor = successorAtKey(constant);
+            for (Node successor : successors()) {
+                if (successor != survivingSuccessor) {
+                    tool.deleteBranch(successor);
                 }
             }
-            for (int i = 0; i < blockSuccessorCount(); i++) {
-                if (i != survivingEdge) {
-                    tool.deleteBranch(blockSuccessor(i));
-                }
-            }
-            tool.addToWorkList(blockSuccessor(survivingEdge));
-            graph().removeSplit(this, blockSuccessor(survivingEdge));
+            tool.addToWorkList(survivingSuccessor);
+            graph().removeSplit(this, survivingSuccessor);
+
         } else if (value().stamp() instanceof IntegerStamp) {
             IntegerStamp integerStamp = (IntegerStamp) value().stamp();
             if (!integerStamp.isUnrestricted()) {
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/MemoryAnchorNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/MemoryAnchorNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -45,7 +45,7 @@
 
     @Override
     public Node canonical(CanonicalizerTool tool) {
-        return hasNoUsages() ? null : this;
+        return tool.allUsagesAvailable() && hasNoUsages() ? null : this;
     }
 
     @NodeIntrinsic
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/ReadNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/ReadNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -71,7 +71,7 @@
 
     @Override
     public Node canonical(CanonicalizerTool tool) {
-        if (hasNoUsages()) {
+        if (tool.allUsagesAvailable() && hasNoUsages()) {
             if (getGuard() != null && !(getGuard() instanceof FixedNode)) {
                 // The guard is necessary even if the read goes away.
                 return new ValueAnchorNode((ValueNode) getGuard());
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/UnboxNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/UnboxNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -79,7 +79,7 @@
 
     @Override
     public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) {
-        if (hasNoUsages() && StampTool.isPointerNonNull(forValue)) {
+        if (tool.allUsagesAvailable() && hasNoUsages() && StampTool.isPointerNonNull(forValue)) {
             return null;
         }
         ValueNode synonym = findSynonym(tool.getMetaAccess(), tool.getConstantReflection(), forValue, boxingKind);
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/ValueAnchorNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/ValueAnchorNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -67,7 +67,7 @@
                 break;
             }
         }
-        if (hasNoUsages() && next() instanceof FixedAccessNode) {
+        if (tool.allUsagesAvailable() && hasNoUsages() && next() instanceof FixedAccessNode) {
             FixedAccessNode currentNext = (FixedAccessNode) next();
             if (currentNext.getGuard() == anchored) {
                 GraphUtil.removeFixedWithUnusedInputs(this);
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/InstanceOfNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/InstanceOfNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -80,7 +80,7 @@
             if (result != null) {
                 return result;
             }
-            Assumptions assumptions = graph().getAssumptions();
+            Assumptions assumptions = graph() == null ? null : graph().getAssumptions();
             if (assumptions != null) {
                 AssumptionResult<ResolvedJavaType> leafConcreteSubtype = stampType.findLeafConcreteSubtype();
                 if (leafConcreteSubtype != null) {
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/LoadFieldNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/java/LoadFieldNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -60,7 +60,7 @@
     }
 
     public ValueNode canonical(CanonicalizerTool tool, ValueNode forObject) {
-        if (hasNoUsages() && !isVolatile() && (isStatic() || StampTool.isPointerNonNull(forObject.stamp()))) {
+        if (tool.allUsagesAvailable() && hasNoUsages() && !isVolatile() && (isStatic() || StampTool.isPointerNonNull(forObject.stamp()))) {
             return null;
         }
         MetaAccessProvider metaAccess = tool.getMetaAccess();
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/util/GraphUtil.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/util/GraphUtil.java	Fri Apr 10 16:58:26 2015 -0700
@@ -521,6 +521,11 @@
             return canonicalizeReads;
         }
 
+        @Override
+        public boolean allUsagesAvailable() {
+            return true;
+        }
+
         public void deleteBranch(Node branch) {
             branch.predecessor().replaceFirstSuccessor(branch, null);
             GraphUtil.killCFG(branch, this);
--- a/graal/com.oracle.graal.options/src/com/oracle/graal/options/OptionUtils.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.options/src/com/oracle/graal/options/OptionUtils.java	Fri Apr 10 16:58:26 2015 -0700
@@ -91,7 +91,9 @@
             } else if (optionType == Double.class) {
                 value = Double.parseDouble(valueString);
             } else if (optionType == Integer.class) {
-                value = Integer.parseInt(valueString);
+                value = Integer.valueOf((int) parseLong(valueString));
+            } else if (optionType == Long.class) {
+                value = Long.valueOf(parseLong(valueString));
             } else if (optionType == String.class) {
                 value = valueString;
             }
@@ -118,6 +120,27 @@
         return true;
     }
 
+    private static long parseLong(String v) {
+        String valueString = v.toLowerCase();
+        long scale = 1;
+        if (valueString.endsWith("k")) {
+            scale = 1024L;
+        } else if (valueString.endsWith("m")) {
+            scale = 1024L * 1024L;
+        } else if (valueString.endsWith("g")) {
+            scale = 1024L * 1024L * 1024L;
+        } else if (valueString.endsWith("t")) {
+            scale = 1024L * 1024L * 1024L * 1024L;
+        }
+
+        if (scale != 1) {
+            /* Remove trailing scale character. */
+            valueString = valueString.substring(0, valueString.length() - 1);
+        }
+
+        return Long.parseLong(valueString) * scale;
+    }
+
     public static void printNoMatchMessage(SortedMap<String, OptionDescriptor> options, String optionName, String prefix) {
         OptionDescriptor desc = options.get(optionName);
         if (desc != null) {
--- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/CanonicalizerPhase.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/CanonicalizerPhase.java	Fri Apr 10 16:58:26 2015 -0700
@@ -416,6 +416,11 @@
             public boolean canonicalizeReads() {
                 return canonicalizeReads;
             }
+
+            @Override
+            public boolean allUsagesAvailable() {
+                return true;
+            }
         }
     }
 
--- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/DominatorConditionalEliminationPhase.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/DominatorConditionalEliminationPhase.java	Fri Apr 10 16:58:26 2015 -0700
@@ -395,8 +395,7 @@
                     node.replaceAndDelete(guard);
                 } else {
                     DeoptimizeNode deopt = node.graph().add(new DeoptimizeNode(node.action(), node.reason()));
-                    Block block = nodeToBlock.apply(node);
-                    AbstractBeginNode beginNode = block.getBeginNode();
+                    AbstractBeginNode beginNode = (AbstractBeginNode) node.getAnchor();
                     FixedNode next = beginNode.next();
                     beginNode.setNext(deopt);
                     GraphUtil.killCFG(next);
--- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/inlining/InliningUtil.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/inlining/InliningUtil.java	Fri Apr 10 16:58:26 2015 -0700
@@ -243,8 +243,6 @@
         assert inlineGraph.getGuardsStage().ordinal() >= graph.getGuardsStage().ordinal();
         assert !invokeNode.graph().isAfterFloatingReadPhase() : "inline isn't handled correctly after floating reads phase";
 
-        FrameState stateAfter = invoke.stateAfter();
-        assert stateAfter == null || stateAfter.isAlive();
         if (receiverNullCheck && !((MethodCallTargetNode) invoke.callTarget()).isStatic()) {
             nonNullReceiver(invoke);
         }
@@ -288,22 +286,69 @@
         assert invokeNode.predecessor() != null;
 
         Map<Node, Node> duplicates = graph.addDuplicates(nodes, inlineGraph, inlineGraph.getNodeCount(), localReplacement);
-        FixedNode firstCFGNodeDuplicate = (FixedNode) duplicates.get(firstCFGNode);
-        invokeNode.replaceAtPredecessor(firstCFGNodeDuplicate);
+
+        FrameState stateAfter = invoke.stateAfter();
+        assert stateAfter == null || stateAfter.isAlive();
 
         FrameState stateAtExceptionEdge = null;
         if (invoke instanceof InvokeWithExceptionNode) {
             InvokeWithExceptionNode invokeWithException = ((InvokeWithExceptionNode) invoke);
             if (unwindNode != null) {
+                ExceptionObjectNode obj = (ExceptionObjectNode) invokeWithException.exceptionEdge();
+                stateAtExceptionEdge = obj.stateAfter();
+            }
+        }
+
+        processSimpleInfopoints(invoke, inlineGraph, duplicates);
+        if (stateAfter != null) {
+            processFrameStates(invoke, inlineGraph, duplicates, stateAtExceptionEdge, returnNodes.size() > 1);
+            int callerLockDepth = stateAfter.nestedLockDepth();
+            if (callerLockDepth != 0) {
+                for (MonitorIdNode original : inlineGraph.getNodes(MonitorIdNode.TYPE)) {
+                    MonitorIdNode monitor = (MonitorIdNode) duplicates.get(original);
+                    processMonitorId(invoke, monitor);
+                }
+            }
+        } else {
+            assert checkContainsOnlyInvalidOrAfterFrameState(duplicates);
+        }
+
+        firstCFGNode = (FixedNode) duplicates.get(firstCFGNode);
+        for (int i = 0; i < returnNodes.size(); i++) {
+            returnNodes.set(i, (ReturnNode) duplicates.get(returnNodes.get(i)));
+        }
+        if (unwindNode != null) {
+            unwindNode = (UnwindNode) duplicates.get(unwindNode);
+        }
+
+        finishInlining(invoke, graph, firstCFGNode, returnNodes, unwindNode, inlineGraph.getAssumptions(), inlineGraph.getInlinedMethods(), canonicalizedNodes);
+
+        GraphUtil.killCFG(invokeNode);
+
+        return duplicates;
+    }
+
+    public static ValueNode finishInlining(Invoke invoke, StructuredGraph graph, FixedNode firstNode, List<ReturnNode> returnNodes, UnwindNode unwindNode, Assumptions inlinedAssumptions,
+                    Set<ResolvedJavaMethod> inlinedMethods, List<Node> canonicalizedNodes) {
+        FixedNode invokeNode = invoke.asNode();
+        FrameState stateAfter = invoke.stateAfter();
+        assert stateAfter == null || stateAfter.isAlive();
+
+        invokeNode.replaceAtPredecessor(firstNode);
+
+        if (invoke instanceof InvokeWithExceptionNode) {
+            InvokeWithExceptionNode invokeWithException = ((InvokeWithExceptionNode) invoke);
+            if (unwindNode != null) {
                 assert unwindNode.predecessor() != null;
                 assert invokeWithException.exceptionEdge().successors().count() == 1;
                 ExceptionObjectNode obj = (ExceptionObjectNode) invokeWithException.exceptionEdge();
-                stateAtExceptionEdge = obj.stateAfter();
-                UnwindNode unwindDuplicate = (UnwindNode) duplicates.get(unwindNode);
-                obj.replaceAtUsages(unwindDuplicate.exception());
+                obj.replaceAtUsages(unwindNode.exception());
                 Node n = obj.next();
                 obj.setNext(null);
-                unwindDuplicate.replaceAndDelete(n);
+                unwindNode.replaceAndDelete(n);
+
+                obj.replaceAtPredecessor(null);
+                obj.safeDelete();
             } else {
                 invokeWithException.killExceptionEdge();
             }
@@ -317,66 +362,50 @@
                 graph.removeFixed(begin);
             }
         } else {
-            if (unwindNode != null) {
-                UnwindNode unwindDuplicate = (UnwindNode) duplicates.get(unwindNode);
+            if (unwindNode != null && !unwindNode.isDeleted()) {
                 DeoptimizeNode deoptimizeNode = graph.add(new DeoptimizeNode(DeoptimizationAction.InvalidateRecompile, DeoptimizationReason.NotCompiledExceptionHandler));
-                unwindDuplicate.replaceAndDelete(deoptimizeNode);
+                unwindNode.replaceAndDelete(deoptimizeNode);
             }
         }
 
-        processSimpleInfopoints(invoke, inlineGraph, duplicates);
-        if (stateAfter != null) {
-            processFrameStates(invoke, inlineGraph, duplicates, stateAtExceptionEdge, returnNodes.size() > 1);
-            int callerLockDepth = stateAfter.nestedLockDepth();
-            if (callerLockDepth != 0) {
-                for (MonitorIdNode original : inlineGraph.getNodes(MonitorIdNode.TYPE)) {
-                    MonitorIdNode monitor = (MonitorIdNode) duplicates.get(original);
-                    monitor.setLockDepth(monitor.getLockDepth() + callerLockDepth);
-                }
-            }
-        } else {
-            assert checkContainsOnlyInvalidOrAfterFrameState(duplicates);
-        }
+        ValueNode returnValue;
         if (!returnNodes.isEmpty()) {
             FixedNode n = invoke.next();
             invoke.setNext(null);
             if (returnNodes.size() == 1) {
-                ReturnNode returnNode = (ReturnNode) duplicates.get(returnNodes.get(0));
-                Node returnValue = returnNode.result();
+                ReturnNode returnNode = returnNodes.get(0);
+                returnValue = returnNode.result();
                 invokeNode.replaceAtUsages(returnValue);
                 returnNode.replaceAndDelete(n);
             } else {
-                ArrayList<ReturnNode> returnDuplicates = new ArrayList<>(returnNodes.size());
-                for (ReturnNode returnNode : returnNodes) {
-                    returnDuplicates.add((ReturnNode) duplicates.get(returnNode));
-                }
                 AbstractMergeNode merge = graph.add(new MergeNode());
                 merge.setStateAfter(stateAfter);
-                ValueNode returnValue = mergeReturns(merge, returnDuplicates, canonicalizedNodes);
+                returnValue = mergeReturns(merge, returnNodes, canonicalizedNodes);
                 invokeNode.replaceAtUsages(returnValue);
                 merge.setNext(n);
             }
+        } else {
+            returnValue = null;
+            invokeNode.replaceAtUsages(null);
+            GraphUtil.killCFG(invoke.next());
         }
 
-        invokeNode.replaceAtUsages(null);
-        GraphUtil.killCFG(invokeNode);
-
         // Copy assumptions from inlinee to caller
         Assumptions assumptions = graph.getAssumptions();
         if (assumptions != null) {
-            if (inlineGraph.getAssumptions() != null) {
-                assumptions.record(inlineGraph.getAssumptions());
+            if (inlinedAssumptions != null) {
+                assumptions.record(inlinedAssumptions);
             }
         } else {
-            assert inlineGraph.getAssumptions() == null : "cannot inline graph which makes assumptions into a graph that doesn't: " + inlineGraph + " -> " + graph;
+            assert inlinedAssumptions == null : "cannot inline graph which makes assumptions into a graph that doesn't";
         }
 
         // Copy inlined methods from inlinee to caller
-        if (inlineGraph.isInlinedMethodRecordingEnabled() && graph.isInlinedMethodRecordingEnabled()) {
-            graph.getInlinedMethods().addAll(inlineGraph.getInlinedMethods());
+        if (inlinedMethods != null && graph.getInlinedMethods() != null) {
+            graph.getInlinedMethods().addAll(inlinedMethods);
         }
 
-        return duplicates;
+        return returnValue;
     }
 
     private static String formatGraph(StructuredGraph graph) {
@@ -390,10 +419,24 @@
         if (inlineGraph.getNodes(SimpleInfopointNode.TYPE).isEmpty()) {
             return;
         }
-        BytecodePosition pos = new BytecodePosition(toBytecodePosition(invoke.stateAfter()), invoke.asNode().graph().method(), invoke.bci());
+        BytecodePosition pos = null;
         for (SimpleInfopointNode original : inlineGraph.getNodes(SimpleInfopointNode.TYPE)) {
             SimpleInfopointNode duplicate = (SimpleInfopointNode) duplicates.get(original);
-            duplicate.addCaller(pos);
+            pos = processSimpleInfopoint(invoke, duplicate, pos);
+        }
+    }
+
+    public static BytecodePosition processSimpleInfopoint(Invoke invoke, SimpleInfopointNode infopointNode, BytecodePosition incomingPos) {
+        BytecodePosition pos = incomingPos != null ? incomingPos : new BytecodePosition(toBytecodePosition(invoke.stateAfter()), invoke.asNode().graph().method(), invoke.bci());
+        infopointNode.addCaller(pos);
+        return pos;
+    }
+
+    public static void processMonitorId(Invoke invoke, MonitorIdNode monitorIdNode) {
+        FrameState stateAfter = invoke.stateAfter();
+        if (stateAfter != null) {
+            int callerLockDepth = stateAfter.nestedLockDepth();
+            monitorIdNode.setLockDepth(monitorIdNode.getLockDepth() + callerLockDepth);
         }
     }
 
@@ -411,57 +454,70 @@
         for (FrameState original : inlineGraph.getNodes(FrameState.TYPE)) {
             FrameState frameState = (FrameState) duplicates.get(original);
             if (frameState != null && frameState.isAlive()) {
-                if (frameState.bci == BytecodeFrame.AFTER_BCI) {
-                    /*
-                     * pop return kind from invoke's stateAfter and replace with this frameState's
-                     * return value (top of stack)
-                     */
-                    FrameState stateAfterReturn = stateAtReturn;
-                    if (invokeReturnKind != Kind.Void && (alwaysDuplicateStateAfter || (frameState.stackSize() > 0 && stateAfterReturn.stackAt(0) != frameState.stackAt(0)))) {
-                        stateAfterReturn = stateAtReturn.duplicateModified(invokeReturnKind, frameState.stackAt(0));
-                    }
-                    frameState.replaceAndDelete(stateAfterReturn);
-                } else if (stateAtExceptionEdge != null && isStateAfterException(frameState)) {
-                    /*
-                     * pop exception object from invoke's stateAfter and replace with this
-                     * frameState's exception object (top of stack)
-                     */
-                    FrameState stateAfterException = stateAtExceptionEdge;
-                    if (frameState.stackSize() > 0 && stateAtExceptionEdge.stackAt(0) != frameState.stackAt(0)) {
-                        stateAfterException = stateAtExceptionEdge.duplicateModified(Kind.Object, frameState.stackAt(0));
-                    }
-                    frameState.replaceAndDelete(stateAfterException);
-                } else if (frameState.bci == BytecodeFrame.UNWIND_BCI || frameState.bci == BytecodeFrame.AFTER_EXCEPTION_BCI) {
-                    handleMissingAfterExceptionFrameState(frameState);
-                } else if (frameState.bci == BytecodeFrame.BEFORE_BCI) {
-                    // This is an intrinsic. Deoptimizing within an intrinsic
-                    // must re-execute the intrinsified invocation
-                    assert frameState.outerFrameState() == null;
-                    NodeInputList<ValueNode> invokeArgsList = invoke.callTarget().arguments();
-                    ValueNode[] invokeArgs = invokeArgsList.isEmpty() ? NO_ARGS : invokeArgsList.toArray(new ValueNode[invokeArgsList.size()]);
-                    FrameState stateBeforeCall = stateAtReturn.duplicateModifiedBeforeCall(invoke.bci(), invokeReturnKind, invokeArgs);
-                    frameState.replaceAndDelete(stateBeforeCall);
-                } else {
-                    // only handle the outermost frame states
-                    if (frameState.outerFrameState() == null) {
-                        assert checkInlineeFrameState(invoke, inlineGraph, frameState);
-                        if (outerFrameState == null) {
-                            outerFrameState = stateAtReturn.duplicateModifiedDuringCall(invoke.bci(), invokeReturnKind);
-                        }
-                        frameState.setOuterFrameState(outerFrameState);
-                    }
+                if (outerFrameState == null) {
+                    outerFrameState = stateAtReturn.duplicateModifiedDuringCall(invoke.bci(), invokeReturnKind);
                 }
+                processFrameState(frameState, invoke, inlineGraph.method(), stateAtExceptionEdge, outerFrameState, alwaysDuplicateStateAfter);
             }
         }
     }
 
-    static boolean checkInlineeFrameState(Invoke invoke, StructuredGraph inlineGraph, FrameState frameState) {
+    public static FrameState processFrameState(FrameState frameState, Invoke invoke, ResolvedJavaMethod inlinedMethod, FrameState stateAtExceptionEdge, FrameState outerFrameState,
+                    boolean alwaysDuplicateStateAfter) {
+
+        FrameState stateAtReturn = invoke.stateAfter();
+        Kind invokeReturnKind = invoke.asNode().getKind();
+
+        if (frameState.bci == BytecodeFrame.AFTER_BCI) {
+            /*
+             * pop return kind from invoke's stateAfter and replace with this frameState's return
+             * value (top of stack)
+             */
+            FrameState stateAfterReturn = stateAtReturn;
+            if (invokeReturnKind != Kind.Void && (alwaysDuplicateStateAfter || (frameState.stackSize() > 0 && stateAfterReturn.stackAt(0) != frameState.stackAt(0)))) {
+                stateAfterReturn = stateAtReturn.duplicateModified(invokeReturnKind, frameState.stackAt(0));
+            }
+            frameState.replaceAndDelete(stateAfterReturn);
+            return stateAfterReturn;
+        } else if (stateAtExceptionEdge != null && isStateAfterException(frameState)) {
+            /*
+             * pop exception object from invoke's stateAfter and replace with this frameState's
+             * exception object (top of stack)
+             */
+            FrameState stateAfterException = stateAtExceptionEdge;
+            if (frameState.stackSize() > 0 && stateAtExceptionEdge.stackAt(0) != frameState.stackAt(0)) {
+                stateAfterException = stateAtExceptionEdge.duplicateModified(Kind.Object, frameState.stackAt(0));
+            }
+            frameState.replaceAndDelete(stateAfterException);
+            return stateAfterException;
+        } else if (frameState.bci == BytecodeFrame.UNWIND_BCI || frameState.bci == BytecodeFrame.AFTER_EXCEPTION_BCI) {
+            return handleMissingAfterExceptionFrameState(frameState);
+        } else if (frameState.bci == BytecodeFrame.BEFORE_BCI) {
+            // This is an intrinsic. Deoptimizing within an intrinsic
+            // must re-execute the intrinsified invocation
+            assert frameState.outerFrameState() == null;
+            NodeInputList<ValueNode> invokeArgsList = invoke.callTarget().arguments();
+            ValueNode[] invokeArgs = invokeArgsList.isEmpty() ? NO_ARGS : invokeArgsList.toArray(new ValueNode[invokeArgsList.size()]);
+            FrameState stateBeforeCall = stateAtReturn.duplicateModifiedBeforeCall(invoke.bci(), invokeReturnKind, invokeArgs);
+            frameState.replaceAndDelete(stateBeforeCall);
+            return stateBeforeCall;
+        } else {
+            // only handle the outermost frame states
+            if (frameState.outerFrameState() == null) {
+                assert checkInlineeFrameState(invoke, inlinedMethod, frameState);
+                frameState.setOuterFrameState(outerFrameState);
+            }
+            return frameState;
+        }
+    }
+
+    static boolean checkInlineeFrameState(Invoke invoke, ResolvedJavaMethod inlinedMethod, FrameState frameState) {
         assert frameState.bci != BytecodeFrame.AFTER_EXCEPTION_BCI : frameState;
         assert frameState.bci != BytecodeFrame.BEFORE_BCI : frameState;
         assert frameState.bci != BytecodeFrame.UNKNOWN_BCI : frameState;
         assert frameState.bci != BytecodeFrame.UNWIND_BCI : frameState;
         if (frameState.bci != BytecodeFrame.INVALID_FRAMESTATE_BCI) {
-            if (frameState.method().equals(inlineGraph.method())) {
+            if (frameState.method().equals(inlinedMethod)) {
                 // Normal inlining expects all outermost inlinee frame states to
                 // denote the inlinee method
             } else if (frameState.method().equals(invoke.callTarget().targetMethod())) {
@@ -470,7 +526,7 @@
                 // partial intrinsic, these calls are given frame states
                 // that exclude the outer frame state denoting a position
                 // in the intrinsic code.
-                assert inlineGraph.method().getAnnotation(MethodSubstitution.class) != null : "expected an intrinsic when inlinee frame state matches method of call target but does not match the method of the inlinee graph: " +
+                assert inlinedMethod.getAnnotation(MethodSubstitution.class) != null : "expected an intrinsic when inlinee frame state matches method of call target but does not match the method of the inlinee graph: " +
                                 frameState;
             } else {
                 throw new AssertionError(frameState.toString());
@@ -485,7 +541,7 @@
         return frameState.bci == BytecodeFrame.AFTER_EXCEPTION_BCI || (frameState.bci == BytecodeFrame.UNWIND_BCI && !frameState.method().isSynchronized());
     }
 
-    protected static void handleMissingAfterExceptionFrameState(FrameState nonReplaceableFrameState) {
+    protected static FrameState handleMissingAfterExceptionFrameState(FrameState nonReplaceableFrameState) {
         Graph graph = nonReplaceableFrameState.graph();
         NodeWorkList workList = graph.createNodeWorkList();
         workList.add(nonReplaceableFrameState);
@@ -519,6 +575,7 @@
                 }
             }
         }
+        return null;
     }
 
     public static ValueNode mergeReturns(AbstractMergeNode merge, List<? extends ReturnNode> returnNodes, List<Node> canonicalizedNodes) {
--- a/graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64CountLeadingZerosNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64CountLeadingZerosNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -33,7 +33,7 @@
 import com.oracle.graal.nodes.spi.*;
 
 /**
- * Count the number of leading zeros.
+ * Count the number of leading zeros using the {@code lzcntq} or {@code lzcntl} instructions.
  */
 @NodeInfo
 public final class AMD64CountLeadingZerosNode extends UnaryNode implements LIRLowerable {
@@ -57,36 +57,23 @@
         return updateStamp(StampFactory.forInteger(Kind.Int, min, max));
     }
 
-    @Override
-    public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) {
-        if (forValue.isConstant()) {
-            JavaConstant c = forValue.asJavaConstant();
-            if (forValue.getKind() == Kind.Int) {
+    public static ValueNode tryFold(ValueNode value) {
+        if (value.isConstant()) {
+            JavaConstant c = value.asJavaConstant();
+            if (value.getKind() == Kind.Int) {
                 return ConstantNode.forInt(Integer.numberOfLeadingZeros(c.asInt()));
             } else {
                 return ConstantNode.forInt(Long.numberOfLeadingZeros(c.asLong()));
             }
         }
-        return this;
+        return null;
     }
 
-    /**
-     * Raw intrinsic for lzcntq instruction.
-     *
-     * @param v
-     * @return number of trailing zeros
-     */
-    @NodeIntrinsic
-    public static native int count(long v);
-
-    /**
-     * Raw intrinsic for lzcntl instruction.
-     *
-     * @param v
-     * @return number of trailing zeros
-     */
-    @NodeIntrinsic
-    public static native int count(int v);
+    @Override
+    public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) {
+        ValueNode folded = tryFold(forValue);
+        return folded != null ? folded : this;
+    }
 
     @Override
     public void generate(NodeLIRBuilderTool gen) {
--- a/graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64CountTrailingZerosNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64CountTrailingZerosNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -33,7 +33,7 @@
 import com.oracle.graal.nodes.spi.*;
 
 /**
- * Count the number of trailing zeros.
+ * Count the number of trailing zeros using the {@code tzcntq} or {@code tzcntl} instructions.
  */
 @NodeInfo
 public final class AMD64CountTrailingZerosNode extends UnaryNode implements LIRLowerable {
@@ -53,36 +53,23 @@
         return updateStamp(StampFactory.forInteger(Kind.Int, min, max));
     }
 
-    @Override
-    public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) {
-        if (forValue.isConstant()) {
-            JavaConstant c = forValue.asJavaConstant();
-            if (forValue.getKind() == Kind.Int) {
+    public static ValueNode tryFold(ValueNode value) {
+        if (value.isConstant()) {
+            JavaConstant c = value.asJavaConstant();
+            if (value.getKind() == Kind.Int) {
                 return ConstantNode.forInt(Integer.numberOfTrailingZeros(c.asInt()));
             } else {
                 return ConstantNode.forInt(Long.numberOfTrailingZeros(c.asLong()));
             }
         }
-        return this;
+        return null;
     }
 
-    /**
-     * Raw intrinsic for tzcntq instruction.
-     *
-     * @param v
-     * @return number of trailing zeros
-     */
-    @NodeIntrinsic
-    public static native int count(long v);
-
-    /**
-     * Raw intrinsic for tzcntl instruction.
-     *
-     * @param v
-     * @return number of trailing zeros
-     */
-    @NodeIntrinsic
-    public static native int count(int v);
+    @Override
+    public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) {
+        ValueNode folded = tryFold(forValue);
+        return folded != null ? folded : this;
+    }
 
     @Override
     public void generate(NodeLIRBuilderTool gen) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64GraphBuilderPlugins.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.amd64;
+
+import static com.oracle.graal.compiler.target.Backend.*;
+import static com.oracle.graal.replacements.amd64.AMD64MathIntrinsicNode.Operation.*;
+import sun.misc.*;
+
+import com.oracle.graal.amd64.*;
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.graphbuilderconf.GraphBuilderConfiguration.Plugins;
+import com.oracle.graal.graphbuilderconf.*;
+import com.oracle.graal.graphbuilderconf.InvocationPlugins.Receiver;
+import com.oracle.graal.graphbuilderconf.InvocationPlugins.Registration;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.java.*;
+import com.oracle.graal.replacements.*;
+
+public class AMD64GraphBuilderPlugins {
+
+    public static void register(Plugins plugins, ForeignCallsProvider foreignCalls, AMD64 arch) {
+        InvocationPlugins invocationPlugins = plugins.getInvocationPlugins();
+        registerIntegerLongPlugins(invocationPlugins, IntegerSubstitutions.class, Kind.Int, arch);
+        registerIntegerLongPlugins(invocationPlugins, LongSubstitutions.class, Kind.Long, arch);
+        registerUnsafePlugins(invocationPlugins);
+        registerMathPlugins(invocationPlugins, foreignCalls);
+    }
+
+    private static void registerIntegerLongPlugins(InvocationPlugins plugins, Class<?> substituteDeclaringClass, Kind kind, AMD64 arch) {
+        Class<?> declaringClass = kind.toBoxedJavaClass();
+        Class<?> type = kind.toJavaClass();
+        Registration r = new Registration(plugins, declaringClass);
+        if (arch.getFlags().contains(AMD64.Flag.UseCountLeadingZerosInstruction)) {
+            r.register1("numberOfLeadingZeros", type, new InvocationPlugin() {
+                public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode value) {
+                    ValueNode folded = AMD64CountLeadingZerosNode.tryFold(value);
+                    if (folded != null) {
+                        b.addPush(folded);
+                    } else {
+                        b.addPush(new AMD64CountLeadingZerosNode(value));
+                    }
+                    return true;
+                }
+            });
+        } else {
+            r.registerMethodSubstitution(substituteDeclaringClass, "numberOfLeadingZeros", type);
+        }
+        if (arch.getFlags().contains(AMD64.Flag.UseCountTrailingZerosInstruction)) {
+            r.register1("numberOfTrailingZeros", type, new InvocationPlugin() {
+                public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode value) {
+                    ValueNode folded = AMD64CountTrailingZerosNode.tryFold(value);
+                    if (folded != null) {
+                        b.addPush(folded);
+                    } else {
+                        b.addPush(new AMD64CountTrailingZerosNode(value));
+                    }
+                    return true;
+                }
+            });
+        } else {
+            r.registerMethodSubstitution(substituteDeclaringClass, "numberOfTrailingZeros", type);
+        }
+    }
+
+    private static void registerMathPlugins(InvocationPlugins plugins, ForeignCallsProvider foreignCalls) {
+        Registration r = new Registration(plugins, Math.class);
+        r.register1("log", Double.TYPE, new InvocationPlugin() {
+            public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode value) {
+                b.push(Kind.Double, b.recursiveAppend(AMD64MathIntrinsicNode.create(value, LOG)));
+                return true;
+            }
+        });
+        r.register1("log10", Double.TYPE, new InvocationPlugin() {
+            public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode value) {
+                b.push(Kind.Double, b.recursiveAppend(AMD64MathIntrinsicNode.create(value, LOG10)));
+                return true;
+            }
+        });
+        r.registerMethodSubstitution(AMD64MathSubstitutions.class, "sin", double.class);
+        r.registerMethodSubstitution(AMD64MathSubstitutions.class, "cos", double.class);
+        r.registerMethodSubstitution(AMD64MathSubstitutions.class, "tan", double.class);
+        r.registerMethodSubstitution(AMD64MathSubstitutions.class, "pow", double.class, double.class);
+        r.register1("exp", Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_EXP));
+    }
+
+    private static void registerUnsafePlugins(InvocationPlugins plugins) {
+        Registration r = new Registration(plugins, Unsafe.class);
+
+        for (Kind kind : new Kind[]{Kind.Int, Kind.Long, Kind.Object}) {
+            Class<?> javaClass = kind == Kind.Object ? Object.class : kind.toJavaClass();
+
+            r.register4("getAndSet" + kind.name(), Receiver.class, Object.class, long.class, javaClass, new InvocationPlugin() {
+                public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode object, ValueNode offset, ValueNode value) {
+                    // Emits a null-check for the otherwise unused receiver
+                    unsafe.get();
+                    b.addPush(kind.getStackKind(), new AtomicReadAndWriteNode(object, offset, value, kind, LocationIdentity.any()));
+                    return true;
+                }
+            });
+            if (kind != Kind.Object) {
+                r.register4("getAndAdd" + kind.name(), Receiver.class, Object.class, long.class, javaClass, new InvocationPlugin() {
+                    public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode object, ValueNode offset, ValueNode delta) {
+                        // Emits a null-check for the otherwise unused receiver
+                        unsafe.get();
+                        b.addPush(kind.getStackKind(), new AtomicReadAndAddNode(object, offset, delta, LocationIdentity.any()));
+                        return true;
+                    }
+                });
+            }
+        }
+    }
+}
--- a/graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64Guards.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package com.oracle.graal.replacements.amd64;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.replacements.*;
-import com.oracle.graal.amd64.*;
-
-public class AMD64Guards {
-    public static class CountLeadingZerosSupported implements SubstitutionGuard {
-        private AMD64 arch;
-
-        public CountLeadingZerosSupported(Architecture arch) {
-            this.arch = (AMD64) arch;
-        }
-
-        public boolean execute() {
-            return arch.getFlags().contains(AMD64.Flag.UseCountLeadingZerosInstruction);
-        }
-    }
-
-    public static class CountTrailingZerosSupported implements SubstitutionGuard {
-        private AMD64 arch;
-
-        public CountTrailingZerosSupported(Architecture arch) {
-            this.arch = (AMD64) arch;
-        }
-
-        public boolean execute() {
-            return arch.getFlags().contains(AMD64.Flag.UseCountTrailingZerosInstruction);
-        }
-    }
-}
--- a/graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64IntegerSubstitutions.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package com.oracle.graal.replacements.amd64;
-
-import com.oracle.graal.api.replacements.*;
-
-@ClassSubstitution(Integer.class)
-public class AMD64IntegerSubstitutions {
-
-    @MethodSubstitution(guard = AMD64Guards.CountLeadingZerosSupported.class)
-    public static int numberOfLeadingZeros(int i) {
-        return AMD64CountLeadingZerosNode.count(i);
-    }
-
-    @MethodSubstitution(guard = AMD64Guards.CountTrailingZerosSupported.class)
-    public static int numberOfTrailingZeros(int i) {
-        return AMD64CountTrailingZerosNode.count(i);
-    }
-}
--- a/graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64LongSubstitutions.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package com.oracle.graal.replacements.amd64;
-
-import com.oracle.graal.api.replacements.*;
-
-@ClassSubstitution(Long.class)
-public class AMD64LongSubstitutions {
-
-    @MethodSubstitution(guard = AMD64Guards.CountLeadingZerosSupported.class)
-    public static int numberOfLeadingZeros(long i) {
-        return AMD64CountLeadingZerosNode.count(i);
-    }
-
-    @MethodSubstitution(guard = AMD64Guards.CountTrailingZerosSupported.class)
-    public static int numberOfTrailingZeros(long i) {
-        return AMD64CountTrailingZerosNode.count(i);
-    }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64MathIntrinsicNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.amd64;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.*;
+import com.oracle.graal.compiler.common.type.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.graph.spi.*;
+import com.oracle.graal.lir.amd64.*;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.nodeinfo.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.calc.*;
+import com.oracle.graal.nodes.spi.*;
+
+@NodeInfo
+public final class AMD64MathIntrinsicNode extends UnaryNode implements ArithmeticLIRLowerable {
+
+    public static final NodeClass<AMD64MathIntrinsicNode> TYPE = NodeClass.create(AMD64MathIntrinsicNode.class);
+    protected final Operation operation;
+
+    public enum Operation {
+        LOG,
+        LOG10,
+        SIN,
+        COS,
+        TAN
+    }
+
+    public Operation operation() {
+        return operation;
+    }
+
+    public static ValueNode create(ValueNode value, Operation op) {
+        ValueNode c = tryConstantFold(value, op);
+        if (c != null) {
+            return c;
+        }
+        return new AMD64MathIntrinsicNode(value, op);
+    }
+
+    protected static ValueNode tryConstantFold(ValueNode value, Operation op) {
+        if (value.isConstant()) {
+            double ret = doCompute(value.asJavaConstant().asDouble(), op);
+            return ConstantNode.forDouble(ret);
+        }
+        return null;
+    }
+
+    protected AMD64MathIntrinsicNode(ValueNode value, Operation op) {
+        super(TYPE, StampFactory.forKind(Kind.Double), value);
+        assert value.stamp() instanceof FloatStamp && PrimitiveStamp.getBits(value.stamp()) == 64;
+        this.operation = op;
+    }
+
+    @Override
+    public void generate(NodeMappableLIRBuilder builder, ArithmeticLIRGenerator lirGen) {
+        AMD64ArithmeticLIRGenerator gen = (AMD64ArithmeticLIRGenerator) lirGen;
+        Value input = builder.operand(getValue());
+        Value result;
+        switch (operation()) {
+            case LOG:
+                result = gen.emitMathLog(input, false);
+                break;
+            case LOG10:
+                result = gen.emitMathLog(input, true);
+                break;
+            case SIN:
+                result = gen.emitMathSin(input);
+                break;
+            case COS:
+                result = gen.emitMathCos(input);
+                break;
+            case TAN:
+                result = gen.emitMathTan(input);
+                break;
+            default:
+                throw GraalInternalError.shouldNotReachHere();
+        }
+        builder.setResult(this, result);
+    }
+
+    @Override
+    public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) {
+        ValueNode c = tryConstantFold(forValue, operation());
+        if (c != null) {
+            return c;
+        }
+        return this;
+    }
+
+    @NodeIntrinsic
+    public static native double compute(double value, @ConstantNodeParameter Operation op);
+
+    private static double doCompute(double value, Operation op) {
+        switch (op) {
+            case LOG:
+                return Math.log(value);
+            case LOG10:
+                return Math.log10(value);
+            case SIN:
+                return Math.sin(value);
+            case COS:
+                return Math.cos(value);
+            case TAN:
+                return Math.tan(value);
+            default:
+                throw new GraalInternalError("unknown op %s", op);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64MathSubstitutions.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.amd64;
+
+import static com.oracle.graal.compiler.target.Backend.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.graph.Node.ConstantNodeParameter;
+import com.oracle.graal.graph.Node.NodeIntrinsic;
+import com.oracle.graal.nodes.extended.*;
+import com.oracle.graal.replacements.amd64.AMD64MathIntrinsicNode.Operation;
+
+/**
+ * Substitutions for some {@link java.lang.Math} methods that leverage AMD64 instructions for
+ * selected input values.
+ */
+public class AMD64MathSubstitutions {
+
+    private static final double PI_4 = Math.PI / 4;
+
+    /**
+     * Special cases from {@link Math#pow} and __ieee754_pow (in sharedRuntimeTrans.cpp).
+     */
+    public static double pow(double x, double y) {
+        // If the second argument is positive or negative zero, then the result is 1.0.
+        if (y == 0.0D) {
+            return 1;
+        }
+
+        // If the second argument is 1.0, then the result is the same as the first argument.
+        if (y == 1.0D) {
+            return x;
+        }
+
+        // If the second argument is NaN, then the result is NaN.
+        if (Double.isNaN(y)) {
+            return Double.NaN;
+        }
+
+        // If the first argument is NaN and the second argument is nonzero, then the result is NaN.
+        if (Double.isNaN(x) && y != 0.0D) {
+            return Double.NaN;
+        }
+
+        // x**-1 = 1/x
+        if (y == -1.0D) {
+            return 1 / x;
+        }
+
+        // x**2 = x*x
+        if (y == 2.0D) {
+            return x * x;
+        }
+
+        // x**0.5 = sqrt(x)
+        if (y == 0.5D && x >= 0.0D) {
+            return Math.sqrt(x);
+        }
+        return callDouble2(ARITHMETIC_POW, x, y);
+    }
+
+    // NOTE on snippets below:
+    // Math.sin(), .cos() and .tan() guarantee a value within 1 ULP of the
+    // exact result, but x87 trigonometric FPU instructions are only that
+    // accurate within [-pi/4, pi/4]. Examine the passed value and provide
+    // a slow path for inputs outside of that interval.
+
+    public static double sin(double x) {
+        if (Math.abs(x) < PI_4) {
+            return AMD64MathIntrinsicNode.compute(x, Operation.SIN);
+        } else {
+            return callDouble1(ARITHMETIC_SIN, x);
+        }
+    }
+
+    public static double cos(double x) {
+        if (Math.abs(x) < PI_4) {
+            return AMD64MathIntrinsicNode.compute(x, Operation.COS);
+        } else {
+            return callDouble1(ARITHMETIC_COS, x);
+        }
+    }
+
+    public static double tan(double x) {
+        if (Math.abs(x) < PI_4) {
+            return AMD64MathIntrinsicNode.compute(x, Operation.TAN);
+        } else {
+            return callDouble1(ARITHMETIC_TAN, x);
+        }
+    }
+
+    @NodeIntrinsic(value = ForeignCallNode.class, setStampFromReturnType = true)
+    private static native double callDouble1(@ConstantNodeParameter ForeignCallDescriptor descriptor, double value);
+
+    @NodeIntrinsic(value = ForeignCallNode.class, setStampFromReturnType = true)
+    private static native double callDouble2(@ConstantNodeParameter ForeignCallDescriptor descriptor, double a, double b);
+}
--- a/graal/com.oracle.graal.replacements.amd64/src/com/oracle/graal/replacements/amd64/AMD64Substitutions.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package com.oracle.graal.replacements.amd64;
-
-import static com.oracle.graal.compiler.common.GraalOptions.*;
-
-import com.oracle.graal.amd64.*;
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.api.replacements.*;
-import com.oracle.graal.api.runtime.*;
-import com.oracle.graal.nodes.spi.*;
-
-/**
- * Method substitutions that are VM-independent but AMD64-dependent.
- */
-@ServiceProvider(ReplacementsProvider.class)
-public class AMD64Substitutions implements ReplacementsProvider {
-
-    public void registerReplacements(MetaAccessProvider metaAccess, LoweringProvider lowerer, SnippetReflectionProvider snippetReflection, Replacements replacements, TargetDescription target) {
-        if (Intrinsify.getValue() && target.arch instanceof AMD64) {
-            replacements.registerSubstitutions(Integer.class, AMD64IntegerSubstitutions.class);
-            replacements.registerSubstitutions(Long.class, AMD64LongSubstitutions.class);
-        }
-    }
-
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements.sparc/src/com/oracle/graal/replacements/sparc/SPARCGraphBuilderPlugins.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.sparc;
+
+import static com.oracle.graal.compiler.target.Backend.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.graphbuilderconf.GraphBuilderConfiguration.Plugins;
+import com.oracle.graal.graphbuilderconf.*;
+import com.oracle.graal.graphbuilderconf.InvocationPlugins.Registration;
+import com.oracle.graal.replacements.*;
+
+public class SPARCGraphBuilderPlugins {
+
+    public static void register(Plugins plugins, ForeignCallsProvider foreignCalls) {
+        InvocationPlugins invocationPlugins = plugins.getInvocationPlugins();
+        registerIntegerLongPlugins(invocationPlugins, IntegerSubstitutions.class, Kind.Int);
+        registerIntegerLongPlugins(invocationPlugins, LongSubstitutions.class, Kind.Long);
+        registerMathPlugins(invocationPlugins, foreignCalls);
+    }
+
+    private static void registerIntegerLongPlugins(InvocationPlugins plugins, Class<?> substituteDeclaringClass, Kind kind) {
+        Class<?> declaringClass = kind.toBoxedJavaClass();
+        Class<?> type = kind.toJavaClass();
+        Registration r = new Registration(plugins, declaringClass);
+        r.registerMethodSubstitution(substituteDeclaringClass, "numberOfLeadingZeros", type);
+        r.registerMethodSubstitution(substituteDeclaringClass, "numberOfTrailingZeros", type);
+    }
+
+    private static void registerMathPlugins(InvocationPlugins plugins, ForeignCallsProvider foreignCalls) {
+        Registration r = new Registration(plugins, Math.class);
+        r.register1("sin", Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_SIN));
+        r.register1("cos", Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_COS));
+        r.register1("tan", Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_TAN));
+        r.register1("exp", Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_EXP));
+        r.register1("log", Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_LOG));
+        r.register1("log10", Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_LOG10));
+        r.register2("pow", Double.TYPE, Double.TYPE, new ForeignCallPlugin(foreignCalls, ARITHMETIC_POW));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements.test/src/com/oracle/graal/replacements/test/UnsignedIntegerTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.test;
+
+import org.junit.*;
+
+import com.oracle.graal.compiler.test.*;
+
+/**
+ * Tests the unsigned operations on {@link Integer} and {@link Long}.
+ */
+public class UnsignedIntegerTest extends GraalCompilerTest {
+
+    public static int compareInteger(int a, int b) {
+        return Integer.compareUnsigned(a, b);
+    }
+
+    public static int divideInteger(int a, int b) {
+        return Integer.divideUnsigned(a, b);
+    }
+
+    public static int remainderInteger(int a, int b) {
+        return Integer.remainderUnsigned(a, b);
+    }
+
+    public static long compareLong(long a, long b) {
+        return Long.compareUnsigned(a, b);
+    }
+
+    public static long divideLong(long a, long b) {
+        return Long.divideUnsigned(a, b);
+    }
+
+    public static long remainderLong(long a, long b) {
+        return Long.remainderUnsigned(a, b);
+    }
+
+    private void testInteger(int a, int b) {
+        test("compareInteger", a, b);
+        test("divideInteger", a, b);
+        test("remainderInteger", a, b);
+    }
+
+    private void testLong(long a, long b) {
+        test("compareLong", a, b);
+        test("divideLong", a, b);
+        test("remainderLong", a, b);
+    }
+
+    @Test
+    public void testInteger() {
+        testInteger(5, 7);
+        testInteger(-3, -7);
+        testInteger(-3, 7);
+        testInteger(42, -5);
+    }
+
+    @Test
+    public void testLong() {
+        testLong(5, 7);
+        testLong(-3, -7);
+        testLong(-3, 7);
+        testLong(42, -5);
+    }
+}
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/ArraySubstitutions.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/ArraySubstitutions.java	Fri Apr 10 16:58:26 2015 -0700
@@ -25,17 +25,14 @@
 import static com.oracle.graal.nodes.extended.BranchProbabilityNode.*;
 
 import com.oracle.graal.api.meta.*;
-import com.oracle.graal.api.replacements.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.java.*;
 
 /**
  * Substitutions for {@link java.lang.reflect.Array} methods.
  */
-@ClassSubstitution(java.lang.reflect.Array.class)
 public class ArraySubstitutions {
 
-    @MethodSubstitution
     public static Object newInstance(Class<?> componentType, int length) throws NegativeArraySizeException {
         // The error cases must be handled here since DynamicNewArrayNode can only deoptimize the
         // caller in response to exceptions.
@@ -48,7 +45,6 @@
         return DynamicNewArrayNode.newArray(GuardingPiNode.asNonNullClass(componentType), length);
     }
 
-    @MethodSubstitution
     public static int getLength(Object array) {
         if (!array.getClass().isArray()) {
             DeoptimizeNode.deopt(DeoptimizationAction.None, DeoptimizationReason.RuntimeConstraint);
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/ArraysSubstitutions.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/ArraysSubstitutions.java	Fri Apr 10 16:58:26 2015 -0700
@@ -22,16 +22,13 @@
  */
 package com.oracle.graal.replacements;
 
-import com.oracle.graal.api.replacements.*;
 import com.oracle.graal.replacements.nodes.*;
 
 /**
  * Substitutions for {@link java.util.Arrays} methods.
  */
-@ClassSubstitution(value = java.util.Arrays.class)
 public class ArraysSubstitutions {
 
-    @MethodSubstitution
     public static boolean equals(boolean[] a, boolean[] a2) {
         if (a == a2) {
             return true;
@@ -42,7 +39,6 @@
         return ArrayEqualsNode.equals(a, a2, a.length);
     }
 
-    @MethodSubstitution
     public static boolean equals(byte[] a, byte[] a2) {
         if (a == a2) {
             return true;
@@ -53,7 +49,6 @@
         return ArrayEqualsNode.equals(a, a2, a.length);
     }
 
-    @MethodSubstitution
     public static boolean equals(char[] a, char[] a2) {
         if (a == a2) {
             return true;
@@ -64,7 +59,6 @@
         return ArrayEqualsNode.equals(a, a2, a.length);
     }
 
-    @MethodSubstitution
     public static boolean equals(short[] a, short[] a2) {
         if (a == a2) {
             return true;
@@ -75,7 +69,6 @@
         return ArrayEqualsNode.equals(a, a2, a.length);
     }
 
-    @MethodSubstitution
     public static boolean equals(int[] a, int[] a2) {
         if (a == a2) {
             return true;
@@ -86,7 +79,6 @@
         return ArrayEqualsNode.equals(a, a2, a.length);
     }
 
-    @MethodSubstitution
     public static boolean equals(long[] a, long[] a2) {
         if (a == a2) {
             return true;
@@ -97,7 +89,6 @@
         return ArrayEqualsNode.equals(a, a2, a.length);
     }
 
-    @MethodSubstitution
     public static boolean equals(float[] a, float[] a2) {
         if (a == a2) {
             return true;
@@ -108,7 +99,6 @@
         return ArrayEqualsNode.equals(a, a2, a.length);
     }
 
-    @MethodSubstitution
     public static boolean equals(double[] a, double[] a2) {
         if (a == a2) {
             return true;
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/DefaultJavaLoweringProvider.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/DefaultJavaLoweringProvider.java	Fri Apr 10 16:58:26 2015 -0700
@@ -327,11 +327,15 @@
         Stamp loadStamp = loadStamp(read.stamp(), valueKind, read.isCompressible());
 
         ReadNode memoryRead = graph.add(new ReadNode(read.object(), read.location(), loadStamp, read.getBarrierType()));
-        // An unsafe read must not float otherwise it may float above
-        // a test guaranteeing the read is safe.
-        memoryRead.setForceFixed(true);
+        GuardingNode guard = read.getGuard();
         ValueNode readValue = implicitLoadConvert(graph, valueKind, memoryRead, read.isCompressible());
-        memoryRead.setGuard(read.getGuard());
+        if (guard == null) {
+            // An unsafe read must not float otherwise it may float above
+            // a test guaranteeing the read is safe.
+            memoryRead.setForceFixed(true);
+        } else {
+            memoryRead.setGuard(guard);
+        }
         read.replaceAtUsages(readValue);
         graph.replaceFixed(read, memoryRead);
     }
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/GraalMethodSubstitutions.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.replacements;
-
-import static com.oracle.graal.compiler.common.GraalOptions.*;
-
-import java.lang.reflect.*;
-import java.util.*;
-
-import com.oracle.graal.api.code.*;
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.api.replacements.*;
-import com.oracle.graal.api.runtime.*;
-import com.oracle.graal.nodes.spi.*;
-
-/**
- * Method substitutions that are VM-independent.
- */
-@ServiceProvider(ReplacementsProvider.class)
-public class GraalMethodSubstitutions implements ReplacementsProvider {
-
-    public void registerReplacements(MetaAccessProvider metaAccess, LoweringProvider loweringProvider, SnippetReflectionProvider snippetReflection, Replacements replacements, TargetDescription target) {
-        if (Intrinsify.getValue()) {
-            replacements.registerSubstitutions(Arrays.class, ArraysSubstitutions.class);
-            replacements.registerSubstitutions(Array.class, ArraySubstitutions.class);
-            replacements.registerSubstitutions(String.class, StringSubstitutions.class);
-            replacements.registerSubstitutions(Math.class, MathSubstitutionsX86.class);
-            replacements.registerSubstitutions(Long.class, LongSubstitutions.class);
-            replacements.registerSubstitutions(Integer.class, IntegerSubstitutions.class);
-        }
-    }
-}
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/IntrinsicGraphBuilder.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/IntrinsicGraphBuilder.java	Fri Apr 10 16:58:26 2015 -0700
@@ -24,7 +24,6 @@
 
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.meta.*;
-import com.oracle.graal.api.replacements.*;
 import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.compiler.common.type.*;
 import com.oracle.graal.graphbuilderconf.*;
@@ -34,7 +33,6 @@
 import com.oracle.graal.nodes.StructuredGraph.AllowAssumptions;
 import com.oracle.graal.nodes.calc.*;
 import com.oracle.graal.nodes.spi.*;
-import com.oracle.graal.phases.util.*;
 
 /**
  * Implementation of {@link GraphBuilderContext} used to produce a graph for a method based on an
@@ -42,17 +40,19 @@
  */
 public class IntrinsicGraphBuilder implements GraphBuilderContext, Receiver {
 
-    private final Providers providers;
-    private final SnippetReflectionProvider snippetReflection;
+    private final MetaAccessProvider metaAccess;
+    private final ConstantReflectionProvider constantReflection;
+    private final StampProvider stampProvider;
     private final StructuredGraph graph;
     private final ResolvedJavaMethod method;
     private FixedWithNextNode lastInstr;
     private ValueNode[] arguments;
     private ValueNode returnValue;
 
-    public IntrinsicGraphBuilder(Providers providers, SnippetReflectionProvider snippetReflection, ResolvedJavaMethod method) {
-        this.providers = providers;
-        this.snippetReflection = snippetReflection;
+    public IntrinsicGraphBuilder(MetaAccessProvider metaAccess, ConstantReflectionProvider constantReflection, StampProvider stampProvider, ResolvedJavaMethod method) {
+        this.metaAccess = metaAccess;
+        this.constantReflection = constantReflection;
+        this.stampProvider = stampProvider;
         this.graph = new StructuredGraph(method, AllowAssumptions.YES);
         this.method = method;
         this.lastInstr = graph.start();
@@ -135,19 +135,15 @@
     }
 
     public StampProvider getStampProvider() {
-        return providers.getStampProvider();
+        return stampProvider;
     }
 
     public MetaAccessProvider getMetaAccess() {
-        return providers.getMetaAccess();
+        return metaAccess;
     }
 
     public ConstantReflectionProvider getConstantReflection() {
-        return providers.getConstantReflection();
-    }
-
-    public SnippetReflectionProvider getSnippetReflection() {
-        return snippetReflection;
+        return constantReflection;
     }
 
     public StructuredGraph getGraph() {
@@ -200,11 +196,20 @@
 
     public StructuredGraph buildGraph(InvocationPlugin plugin) {
         Receiver receiver = method.isStatic() ? null : this;
-        if (InvocationPlugin.execute(this, method, plugin, receiver, arguments)) {
+        if (plugin.execute(this, method, receiver, arguments)) {
             assert (returnValue != null) == (method.getSignature().getReturnKind() != Kind.Void) : method;
             append(new ReturnNode(returnValue));
             return graph;
         }
         return null;
     }
+
+    public void intrinsify(ResolvedJavaMethod targetMethod, ResolvedJavaMethod substitute, ValueNode[] args) {
+        throw GraalInternalError.shouldNotReachHere();
+    }
+
+    @Override
+    public String toString() {
+        return String.format("%s:intrinsic", method.format("%H.%n(%p)"));
+    }
 }
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/LongSubstitutions.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/LongSubstitutions.java	Fri Apr 10 16:58:26 2015 -0700
@@ -22,13 +22,10 @@
  */
 package com.oracle.graal.replacements;
 
-import com.oracle.graal.api.replacements.*;
 import com.oracle.graal.replacements.nodes.*;
 
-@ClassSubstitution(Long.class)
 public class LongSubstitutions {
 
-    @MethodSubstitution
     public static int numberOfLeadingZeros(long i) {
         if (i == 0) {
             return 64;
@@ -36,7 +33,6 @@
         return 63 - BitScanReverseNode.unsafeScan(i);
     }
 
-    @MethodSubstitution
     public static int numberOfTrailingZeros(long i) {
         if (i == 0) {
             return 64;
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/MathSubstitutionsX86.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.replacements;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.api.replacements.*;
-import com.oracle.graal.graph.Node.ConstantNodeParameter;
-import com.oracle.graal.graph.Node.NodeIntrinsic;
-import com.oracle.graal.nodes.extended.*;
-import com.oracle.graal.replacements.nodes.*;
-import com.oracle.graal.replacements.nodes.MathIntrinsicNode.Operation;
-
-/**
- * Substitutions for {@link java.lang.Math} methods.
- */
-@ClassSubstitution(value = java.lang.Math.class)
-public class MathSubstitutionsX86 {
-
-    private static final double PI_4 = Math.PI / 4;
-
-    /**
-     * Special cases from {@link Math#pow} and __ieee754_pow (in sharedRuntimeTrans.cpp).
-     */
-    @MethodSubstitution(guard = MathGuard.class)
-    public static double pow(double x, double y) {
-        // If the second argument is positive or negative zero, then the result is 1.0.
-        if (y == 0.0D) {
-            return 1;
-        }
-
-        // If the second argument is 1.0, then the result is the same as the first argument.
-        if (y == 1.0D) {
-            return x;
-        }
-
-        // If the second argument is NaN, then the result is NaN.
-        if (Double.isNaN(y)) {
-            return Double.NaN;
-        }
-
-        // If the first argument is NaN and the second argument is nonzero, then the result is NaN.
-        if (Double.isNaN(x) && y != 0.0D) {
-            return Double.NaN;
-        }
-
-        // x**-1 = 1/x
-        if (y == -1.0D) {
-            return 1 / x;
-        }
-
-        // x**2 = x*x
-        if (y == 2.0D) {
-            return x * x;
-        }
-
-        // x**0.5 = sqrt(x)
-        if (y == 0.5D && x >= 0.0D) {
-            return Math.sqrt(x);
-        }
-        return callDouble2(ARITHMETIC_POW, x, y);
-    }
-
-    // NOTE on snippets below:
-    // Math.sin(), .cos() and .tan() guarantee a value within 1 ULP of the
-    // exact result, but x87 trigonometric FPU instructions are only that
-    // accurate within [-pi/4, pi/4]. Examine the passed value and provide
-    // a slow path for inputs outside of that interval.
-
-    @MethodSubstitution(guard = MathGuard.class)
-    public static double sin(double x) {
-        if (Math.abs(x) < PI_4) {
-            return MathIntrinsicNode.compute(x, Operation.SIN);
-        } else {
-            return callDouble1(ARITHMETIC_SIN, x);
-        }
-    }
-
-    @MethodSubstitution(guard = MathGuard.class)
-    public static double cos(double x) {
-        if (Math.abs(x) < PI_4) {
-            return MathIntrinsicNode.compute(x, Operation.COS);
-        } else {
-            return callDouble1(ARITHMETIC_COS, x);
-        }
-    }
-
-    @MethodSubstitution(guard = MathGuard.class)
-    public static double tan(double x) {
-        if (Math.abs(x) < PI_4) {
-            return MathIntrinsicNode.compute(x, Operation.TAN);
-        } else {
-            return callDouble1(ARITHMETIC_TAN, x);
-        }
-    }
-
-    public static class MathGuard implements SubstitutionGuard {
-        public boolean execute() {
-            // FIXME should return whether the current compilation target supports these
-            String arch = System.getProperty("os.arch");
-            return arch.equals("amd64") || arch.equals("x86_64");
-        }
-    }
-
-    public static final ForeignCallDescriptor ARITHMETIC_SIN = new ForeignCallDescriptor("arithmeticSin", double.class, double.class);
-    public static final ForeignCallDescriptor ARITHMETIC_COS = new ForeignCallDescriptor("arithmeticCos", double.class, double.class);
-    public static final ForeignCallDescriptor ARITHMETIC_TAN = new ForeignCallDescriptor("arithmeticTan", double.class, double.class);
-    public static final ForeignCallDescriptor ARITHMETIC_POW = new ForeignCallDescriptor("arithmeticPow", double.class, double.class, double.class);
-
-    @NodeIntrinsic(value = ForeignCallNode.class, setStampFromReturnType = true)
-    private static native double callDouble1(@ConstantNodeParameter ForeignCallDescriptor descriptor, double value);
-
-    @NodeIntrinsic(value = ForeignCallNode.class, setStampFromReturnType = true)
-    private static native double callDouble2(@ConstantNodeParameter ForeignCallDescriptor descriptor, double a, double b);
-}
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/ReplacementsImpl.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/ReplacementsImpl.java	Fri Apr 10 16:58:26 2015 -0700
@@ -337,17 +337,23 @@
 
     @Override
     public StructuredGraph getSubstitution(ResolvedJavaMethod original, boolean fromBytecodeOnly) {
+        ResolvedJavaMethod substitute = null;
         if (!fromBytecodeOnly) {
             InvocationPlugin plugin = graphBuilderPlugins.getInvocationPlugins().lookupInvocation(original);
-            if (plugin != null) {
-                StructuredGraph graph = new IntrinsicGraphBuilder(providers, snippetReflection, original).buildGraph(plugin);
+            if (plugin instanceof MethodSubstitutionPlugin) {
+                MethodSubstitutionPlugin msplugin = (MethodSubstitutionPlugin) plugin;
+                substitute = msplugin.getSubstitute(providers.getMetaAccess());
+            } else if (plugin != null) {
+                StructuredGraph graph = new IntrinsicGraphBuilder(providers.getMetaAccess(), providers.getConstantReflection(), providers.getStampProvider(), original).buildGraph(plugin);
                 if (graph != null) {
                     return graph;
                 }
             }
         }
-        ClassReplacements cr = getClassReplacements(original.getDeclaringClass().getName());
-        ResolvedJavaMethod substitute = cr == null ? null : cr.methodSubstitutions.get(original);
+        if (substitute == null) {
+            ClassReplacements cr = getClassReplacements(original.getDeclaringClass().getName());
+            substitute = cr == null ? null : cr.methodSubstitutions.get(original);
+        }
         if (substitute == null) {
             return null;
         }
@@ -661,12 +667,11 @@
         protected Instance createGraphBuilder(MetaAccessProvider metaAccess, StampProvider stampProvider, ConstantReflectionProvider constantReflection, GraphBuilderConfiguration graphBuilderConfig,
                         OptimisticOptimizations optimisticOpts) {
             ReplacementContext initialReplacementContext = null;
-            if (method.getAnnotation(MethodSubstitution.class) != null) {
+            if (method.getAnnotation(Snippet.class) == null) {
                 // Late inlined intrinsic
                 initialReplacementContext = new IntrinsicContext(substitutedMethod, method, null, -1);
             } else {
                 // Snippet
-                assert method.getAnnotation(Snippet.class) != null;
                 ResolvedJavaMethod original = substitutedMethod != null ? substitutedMethod : method;
                 initialReplacementContext = new ReplacementContext(original, method);
             }
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/ReplacementsUtil.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/ReplacementsUtil.java	Fri Apr 10 16:58:26 2015 -0700
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.replacements;
 
+import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.replacements.nodes.*;
 
 // JaCoCo Exclude
@@ -35,7 +36,7 @@
     static {
         boolean assertionsEnabled = false;
         assert (assertionsEnabled = true) != false;
-        REPLACEMENTS_ASSERTIONS_ENABLED = assertionsEnabled;
+        REPLACEMENTS_ASSERTIONS_ENABLED = assertionsEnabled && GraalOptions.ImmutableCode.getValue() == null;
     }
 
     /**
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/SnippetTemplate.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/SnippetTemplate.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
 import java.util.*;
 import java.util.concurrent.*;
 import java.util.concurrent.atomic.*;
+import java.util.function.*;
 import java.util.stream.*;
 
 import com.oracle.graal.api.code.*;
@@ -87,6 +88,7 @@
     public abstract static class SnippetInfo {
 
         protected final ResolvedJavaMethod method;
+        protected final LocationIdentity[] privateLocations;
 
         /**
          * Lazily constructed parts of {@link SnippetInfo}.
@@ -147,8 +149,9 @@
 
         protected abstract Lazy lazy();
 
-        protected SnippetInfo(ResolvedJavaMethod method) {
+        protected SnippetInfo(ResolvedJavaMethod method, LocationIdentity[] privateLocations) {
             this.method = method;
+            this.privateLocations = privateLocations;
             instantiationCounter = Debug.metric("SnippetInstantiationCount[%s]", method);
             instantiationTimer = Debug.timer("SnippetInstantiationTime[%s]", method);
             assert method.isStatic() : "snippet method must be static: " + method.format("%H.%n");
@@ -193,8 +196,8 @@
     protected static class LazySnippetInfo extends SnippetInfo {
         protected final AtomicReference<Lazy> lazy = new AtomicReference<>(null);
 
-        protected LazySnippetInfo(ResolvedJavaMethod method) {
-            super(method);
+        protected LazySnippetInfo(ResolvedJavaMethod method, LocationIdentity[] privateLocations) {
+            super(method, privateLocations);
         }
 
         @Override
@@ -209,8 +212,8 @@
     protected static class EagerSnippetInfo extends SnippetInfo {
         protected final Lazy lazy;
 
-        protected EagerSnippetInfo(ResolvedJavaMethod method) {
-            super(method);
+        protected EagerSnippetInfo(ResolvedJavaMethod method, LocationIdentity[] privateLocations) {
+            super(method, privateLocations);
             lazy = new Lazy(method);
         }
 
@@ -497,7 +500,7 @@
          * {@link Snippet} and returns a {@link SnippetInfo} value describing it. There must be
          * exactly one snippet method in {@code declaringClass}.
          */
-        protected SnippetInfo snippet(Class<? extends Snippets> declaringClass, String methodName) {
+        protected SnippetInfo snippet(Class<? extends Snippets> declaringClass, String methodName, LocationIdentity... privateLocations) {
             assert methodName != null;
             Method method = findMethod(declaringClass, methodName, null);
             assert method != null : "did not find @" + Snippet.class.getSimpleName() + " method in " + declaringClass + " named " + methodName;
@@ -506,9 +509,9 @@
             ResolvedJavaMethod javaMethod = providers.getMetaAccess().lookupJavaMethod(method);
             providers.getReplacements().registerSnippet(javaMethod);
             if (LAZY_SNIPPETS) {
-                return new LazySnippetInfo(javaMethod);
+                return new LazySnippetInfo(javaMethod, privateLocations);
             } else {
-                return new EagerSnippetInfo(javaMethod);
+                return new EagerSnippetInfo(javaMethod, privateLocations);
             }
         }
 
@@ -555,6 +558,8 @@
      */
     protected SnippetTemplate(final Providers providers, SnippetReflectionProvider snippetReflection, Arguments args) {
         this.snippetReflection = snippetReflection;
+        this.info = args.info;
+
         Object[] constantArgs = getConstantArgs(args);
         StructuredGraph snippetGraph = providers.getReplacements().getSnippet(args.info.method, constantArgs);
         instantiationTimer = Debug.timer("SnippetTemplateInstantiationTime[%#s]", args);
@@ -720,7 +725,7 @@
 
         assert checkAllVarargPlaceholdersAreDeleted(parameterCount, placeholders);
 
-        new FloatingReadPhase(false, true).apply(snippetCopy);
+        new FloatingReadPhase(true, true).apply(snippetCopy);
 
         MemoryAnchorNode anchor = snippetCopy.add(new MemoryAnchorNode());
         snippetCopy.start().replaceAtUsages(InputType.Memory, anchor);
@@ -820,6 +825,8 @@
      */
     private final StructuredGraph snippet;
 
+    private final SnippetInfo info;
+
     /**
      * The named parameters of this template that must be bound to values during instantiation. For
      * a parameter that is still live after specialization, the value in this map is either a
@@ -996,14 +1003,14 @@
             LocationIdentity locationIdentity = ((MemoryCheckpoint.Single) replacee).getLocationIdentity();
             if (locationIdentity.isAny()) {
                 assert !(memoryMap.getLastLocationAccess(any()) instanceof MemoryAnchorNode) : replacee + " kills ANY_LOCATION, but snippet does not";
+                // if the replacee kills ANY_LOCATION, the snippet can kill arbitrary locations
+                return true;
             }
             assert kills.contains(locationIdentity) : replacee + " kills " + locationIdentity + ", but snippet doesn't contain a kill to this location";
-            return true;
+            kills.remove(locationIdentity);
         }
         assert !(replacee instanceof MemoryCheckpoint.Multi) : replacee + " multi not supported (yet)";
 
-        Debug.log("WARNING: %s is not a MemoryCheckpoint, but the snippet graph contains kills (%s). You might want %s to be a MemoryCheckpoint", replacee, kills, replacee);
-
         // remove ANY_LOCATION if it's just a kill by the start node
         if (memoryMap.getLastLocationAccess(any()) instanceof MemoryAnchorNode) {
             kills.remove(any());
@@ -1012,24 +1019,26 @@
         // node can only lower to a ANY_LOCATION kill if the replacee also kills ANY_LOCATION
         assert !kills.contains(any()) : "snippet graph contains a kill to ANY_LOCATION, but replacee (" + replacee + ") doesn't kill ANY_LOCATION.  kills: " + kills;
 
+        if (SnippetCounters.getValue()) {
+            /*
+             * accesses to snippet counters are artificially introduced and violate the memory
+             * semantics.
+             */
+            kills.remove(SnippetCounter.SNIPPET_COUNTER_LOCATION);
+        }
+
         /*
-         * kills to other locations than ANY_LOCATION can be still inserted if there aren't any
-         * floating reads accessing this locations. Example: In HotSpot, InstanceOfNode is lowered
-         * to a snippet containing a write to SECONDARY_SUPER_CACHE_LOCATION. This is runtime
-         * specific, so the runtime independent InstanceOfNode can not kill this location. However,
-         * if no FloatingReadNode is reading from this location, the kill to this location is fine.
+         * Kills to private locations are safe, since there can be no floating read to these
+         * locations except reads that are introduced by the snippet itself or related snippets in
+         * the same lowering round. These reads are anchored to a MemoryAnchor at the beginning of
+         * their snippet, so they can not float above a kill in another instance of the same
+         * snippet.
          */
-        for (FloatingReadNode frn : replacee.graph().getNodes().filter(FloatingReadNode.class)) {
-            LocationIdentity locationIdentity = frn.location().getLocationIdentity();
-            if (SnippetCounters.getValue()) {
-                // accesses to snippet counters are artificially introduced and violate the memory
-                // semantics.
-                if (locationIdentity.equals(SnippetCounter.SNIPPET_COUNTER_LOCATION)) {
-                    continue;
-                }
-            }
-            assert !kills.contains(locationIdentity) : frn + " reads from location \"" + locationIdentity + "\" but " + replacee + " does not kill this location";
+        for (LocationIdentity p : this.info.privateLocations) {
+            kills.remove(p);
         }
+
+        assert kills.isEmpty() : "snippet graph kills non-private locations " + Arrays.toString(kills.toArray()) + " that replacee (" + replacee + ") doesn't kill";
         return true;
     }
 
@@ -1097,21 +1106,23 @@
     }
 
     private void rewireMemoryGraph(ValueNode replacee, Map<Node, Node> duplicates) {
-        // rewire outgoing memory edges
-        replaceMemoryUsages(replacee, new MemoryOutputMap(replacee, duplicates));
+        if (replacee.graph().isAfterFloatingReadPhase()) {
+            // rewire outgoing memory edges
+            replaceMemoryUsages(replacee, new MemoryOutputMap(replacee, duplicates));
 
-        ReturnNode ret = (ReturnNode) duplicates.get(returnNode);
-        MemoryMapNode memoryMap = ret.getMemoryMap();
-        ret.setMemoryMap(null);
-        memoryMap.safeDelete();
+            ReturnNode ret = (ReturnNode) duplicates.get(returnNode);
+            MemoryMapNode memoryMap = ret.getMemoryMap();
+            ret.setMemoryMap(null);
+            memoryMap.safeDelete();
 
-        if (memoryAnchor != null) {
-            // rewire incoming memory edges
-            MemoryAnchorNode memoryDuplicate = (MemoryAnchorNode) duplicates.get(memoryAnchor);
-            replaceMemoryUsages(memoryDuplicate, new MemoryInputMap(replacee));
+            if (memoryAnchor != null) {
+                // rewire incoming memory edges
+                MemoryAnchorNode memoryDuplicate = (MemoryAnchorNode) duplicates.get(memoryAnchor);
+                replaceMemoryUsages(memoryDuplicate, new MemoryInputMap(replacee));
 
-            if (memoryDuplicate.hasNoUsages()) {
-                memoryDuplicate.graph().removeFixed(memoryDuplicate);
+                if (memoryDuplicate.hasNoUsages()) {
+                    memoryDuplicate.graph().removeFixed(memoryDuplicate);
+                }
             }
         }
     }
@@ -1128,7 +1139,7 @@
         }
     }
 
-    private static void replaceMemoryUsages(ValueNode node, MemoryMap map) {
+    private void replaceMemoryUsages(ValueNode node, MemoryMap map) {
         for (Node usage : node.usages().snapshot()) {
             if (usage instanceof MemoryMapNode) {
                 continue;
@@ -1141,7 +1152,10 @@
                     Position pos = iter.nextPosition();
                     if (pos.getInputType() == InputType.Memory && pos.get(usage) == node) {
                         MemoryNode replacement = map.getLastLocationAccess(location);
-                        if (replacement != null) {
+                        if (replacement == null) {
+                            assert LocationIdentity.any().equals(location) || Arrays.stream(info.privateLocations).anyMatch(Predicate.isEqual(location)) : "Snippet " + info.method.format("%h.%n") +
+                                            " contains access to the non-private location " + location + ", but replacee doesn't access this location.";
+                        } else {
                             pos.set(usage, replacement.asNode());
                         }
                     }
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/StandardGraphBuilderPlugins.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/StandardGraphBuilderPlugins.java	Fri Apr 10 16:58:26 2015 -0700
@@ -23,12 +23,17 @@
 package com.oracle.graal.replacements;
 
 import static com.oracle.graal.api.code.MemoryBarriers.*;
-import static com.oracle.graal.replacements.nodes.MathIntrinsicNode.Operation.*;
+import static com.oracle.graal.compiler.common.GraalOptions.*;
+
+import java.lang.reflect.*;
+import java.util.*;
+
 import sun.misc.*;
 
 import com.oracle.graal.api.code.*;
 import com.oracle.graal.api.directives.*;
 import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.common.type.*;
 import com.oracle.graal.graph.*;
@@ -43,6 +48,7 @@
 import com.oracle.graal.nodes.util.*;
 import com.oracle.graal.options.*;
 import com.oracle.graal.replacements.nodes.*;
+import com.oracle.graal.replacements.nodes.arithmetic.*;
 
 /**
  * Provides non-runtime specific {@link InvocationPlugin}s.
@@ -56,10 +62,10 @@
     }
     // @formatter:on
 
-    public static void registerInvocationPlugins(MetaAccessProvider metaAccess, Architecture arch, InvocationPlugins plugins, boolean useBoxingPlugins) {
+    public static void registerInvocationPlugins(MetaAccessProvider metaAccess, InvocationPlugins plugins, boolean useBoxingPlugins) {
         registerObjectPlugins(plugins);
         registerClassPlugins(plugins);
-        registerMathPlugins(arch, plugins);
+        registerMathPlugins(plugins);
         registerUnsignedMathPlugins(plugins);
         registerCharacterPlugins(plugins);
         registerShortPlugins(plugins);
@@ -67,7 +73,10 @@
         registerIntegerLongPlugins(plugins, Kind.Long);
         registerFloatPlugins(plugins);
         registerDoublePlugins(plugins);
-        registerUnsafePlugins(arch, plugins);
+        registerStringPlugins(plugins);
+        registerArraysPlugins(plugins);
+        registerArrayPlugins(plugins);
+        registerUnsafePlugins(plugins);
         registerEdgesPlugins(metaAccess, plugins);
         registerGraalDirectivesPlugins(plugins);
         if (useBoxingPlugins) {
@@ -78,7 +87,54 @@
         }
     }
 
-    private static void registerUnsafePlugins(Architecture arch, InvocationPlugins plugins) {
+    private static final Field STRING_VALUE_FIELD;
+    static {
+        try {
+            STRING_VALUE_FIELD = String.class.getDeclaredField("value");
+        } catch (NoSuchFieldException e) {
+            throw new GraalInternalError(e);
+        }
+    }
+
+    private static void registerStringPlugins(InvocationPlugins plugins) {
+        /*
+         * AMD64's String.equals substitution needs about 8 registers so we disable it if there is
+         * some artificial register pressure.
+         */
+        if (RegisterPressure.getValue() == null) {
+            Registration r = new Registration(plugins, String.class);
+            r.registerMethodSubstitution(StringSubstitutions.class, "equals", Receiver.class, Object.class);
+
+            r = new Registration(plugins, StringSubstitutions.class);
+            r.register1("getValue", String.class, new InvocationPlugin() {
+                public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode value) {
+                    ResolvedJavaField field = b.getMetaAccess().lookupJavaField(STRING_VALUE_FIELD);
+                    b.addPush(new LoadFieldNode(value, field));
+                    return true;
+                }
+            });
+        }
+    }
+
+    private static void registerArraysPlugins(InvocationPlugins plugins) {
+        Registration r = new Registration(plugins, Arrays.class);
+        r.registerMethodSubstitution(ArraysSubstitutions.class, "equals", boolean[].class, boolean[].class);
+        r.registerMethodSubstitution(ArraysSubstitutions.class, "equals", byte[].class, byte[].class);
+        r.registerMethodSubstitution(ArraysSubstitutions.class, "equals", short[].class, short[].class);
+        r.registerMethodSubstitution(ArraysSubstitutions.class, "equals", char[].class, char[].class);
+        r.registerMethodSubstitution(ArraysSubstitutions.class, "equals", int[].class, int[].class);
+        r.registerMethodSubstitution(ArraysSubstitutions.class, "equals", float[].class, float[].class);
+        r.registerMethodSubstitution(ArraysSubstitutions.class, "equals", long[].class, long[].class);
+        r.registerMethodSubstitution(ArraysSubstitutions.class, "equals", double[].class, double[].class);
+    }
+
+    private static void registerArrayPlugins(InvocationPlugins plugins) {
+        Registration r = new Registration(plugins, Array.class);
+        r.registerMethodSubstitution(ArraySubstitutions.class, "newInstance", Class.class, int.class);
+        r.registerMethodSubstitution(ArraySubstitutions.class, "getLength", Object.class);
+    }
+
+    private static void registerUnsafePlugins(InvocationPlugins plugins) {
         Registration r = new Registration(plugins, Unsafe.class);
         for (Kind kind : Kind.values()) {
             if ((kind.isPrimitive() && kind != Kind.Void) || kind == Kind.Object) {
@@ -118,39 +174,9 @@
                     return true;
                 }
             });
-
-            if (getAndSetEnabled(arch)) {
-                r.register4("getAndSet" + kind.name(), Receiver.class, Object.class, long.class, javaClass, new InvocationPlugin() {
-                    public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode object, ValueNode offset, ValueNode value) {
-                        // Emits a null-check for the otherwise unused receiver
-                        unsafe.get();
-                        b.addPush(kind.getStackKind(), new AtomicReadAndWriteNode(object, offset, value, kind, LocationIdentity.any()));
-                        return true;
-                    }
-                });
-                if (kind != Kind.Object) {
-                    r.register4("getAndAdd" + kind.name(), Receiver.class, Object.class, long.class, javaClass, new InvocationPlugin() {
-                        public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode object, ValueNode offset, ValueNode delta) {
-                            // Emits a null-check for the otherwise unused receiver
-                            unsafe.get();
-                            b.addPush(kind.getStackKind(), new AtomicReadAndAddNode(object, offset, delta, LocationIdentity.any()));
-                            return true;
-                        }
-                    });
-                }
-            }
         }
     }
 
-    /**
-     * Determines if the platform includes such for intrinsifying the {@link Unsafe#getAndSetInt}
-     * method family.
-     */
-    public static boolean getAndSetEnabled(Architecture arch) {
-        // FIXME should return whether the current compilation target supports these
-        return arch.getName().equals("AMD64");
-    }
-
     private static void registerIntegerLongPlugins(InvocationPlugins plugins, Kind kind) {
         Class<?> declaringClass = kind.toBoxedJavaClass();
         Class<?> type = kind.toJavaClass();
@@ -175,7 +201,7 @@
         });
         r.register2("remainderUnsigned", type, type, new InvocationPlugin() {
             public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode dividend, ValueNode divisor) {
-                b.push(kind, b.recursiveAppend(new UnsignedDivNode(dividend, divisor).canonical(null, dividend, divisor)));
+                b.push(kind, b.recursiveAppend(new UnsignedRemNode(dividend, divisor).canonical(null, dividend, divisor)));
                 return true;
             }
         });
@@ -241,8 +267,29 @@
         });
     }
 
-    private static void registerMathPlugins(Architecture arch, InvocationPlugins plugins) {
+    private static void registerMathPlugins(InvocationPlugins plugins) {
         Registration r = new Registration(plugins, Math.class);
+        for (Kind kind : new Kind[]{Kind.Int, Kind.Long}) {
+            Class<?> type = kind.toJavaClass();
+            r.register2("addExact", type, type, new InvocationPlugin() {
+                public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode x, ValueNode y) {
+                    b.addPush(kind.getStackKind(), new IntegerAddExactNode(x, y));
+                    return true;
+                }
+            });
+            r.register2("subtractExact", type, type, new InvocationPlugin() {
+                public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode x, ValueNode y) {
+                    b.addPush(kind.getStackKind(), new IntegerSubExactNode(x, y));
+                    return true;
+                }
+            });
+            r.register2("multiplyExact", type, type, new InvocationPlugin() {
+                public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode x, ValueNode y) {
+                    b.addPush(kind.getStackKind(), new IntegerMulExactNode(x, y));
+                    return true;
+                }
+            });
+        }
         r.register1("abs", Float.TYPE, new InvocationPlugin() {
             public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode value) {
                 b.push(Kind.Float, b.recursiveAppend(new AbsNode(value).canonical(null, value)));
@@ -261,31 +308,6 @@
                 return true;
             }
         });
-        r.register2("pow", Double.TYPE, Double.TYPE, new InvocationPlugin() {
-            public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode x, ValueNode y) {
-                ValueNode folded = MathPowNode.tryFold(x, y);
-                if (folded != null) {
-                    b.addPush(Kind.Double, folded);
-                } else {
-                    b.addPush(Kind.Double, new MathPowNode(b.getInvokeKind(), targetMethod, b.bci(), b.getInvokeReturnType(), x, y));
-                }
-                return true;
-            }
-        });
-        if (getAndSetEnabled(arch)) {
-            r.register1("log", Double.TYPE, new InvocationPlugin() {
-                public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode value) {
-                    b.push(Kind.Double, b.recursiveAppend(MathIntrinsicNode.create(value, LOG)));
-                    return true;
-                }
-            });
-            r.register1("log10", Double.TYPE, new InvocationPlugin() {
-                public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode value) {
-                    b.push(Kind.Double, b.recursiveAppend(MathIntrinsicNode.create(value, LOG10)));
-                    return true;
-                }
-            });
-        }
     }
 
     public static class UnsignedMathPlugin implements InvocationPlugin {
@@ -623,7 +645,7 @@
         String[] names = {"org.openjdk.jmh.infra.Blackhole", "org.openjdk.jmh.logic.BlackHole"};
         for (String name : names) {
             Class<?> blackholeClass;
-            blackholeClass = ReplacementsImpl.resolveClass(name, true);
+            blackholeClass = MethodSubstitutionPlugin.resolveClass(name, true);
             if (blackholeClass != null) {
                 Registration r = new Registration(plugins, blackholeClass);
                 for (Kind kind : Kind.values()) {
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/StringSubstitutions.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/StringSubstitutions.java	Fri Apr 10 16:58:26 2015 -0700
@@ -22,12 +22,8 @@
  */
 package com.oracle.graal.replacements;
 
-import static com.oracle.graal.compiler.common.UnsafeAccess.*;
-
-import java.lang.reflect.*;
-
-import com.oracle.graal.api.replacements.*;
-import com.oracle.graal.compiler.common.*;
+import com.oracle.graal.graphbuilderconf.*;
+import com.oracle.graal.nodes.java.*;
 import com.oracle.graal.replacements.nodes.*;
 
 import edu.umd.cs.findbugs.annotations.*;
@@ -35,24 +31,8 @@
 /**
  * Substitutions for {@link java.lang.String} methods.
  */
-@ClassSubstitution(value = java.lang.String.class)
 public class StringSubstitutions {
 
-    /**
-     * Offset of the {@link String#value} field.
-     */
-    @java.lang.SuppressWarnings("javadoc") private static final long valueOffset;
-
-    static {
-        try {
-            Field valueField = String.class.getDeclaredField("value");
-            valueOffset = unsafe.objectFieldOffset(valueField);
-        } catch (NoSuchFieldException | SecurityException | IllegalArgumentException e) {
-            throw new GraalInternalError(e);
-        }
-    }
-
-    @MethodSubstitution(isStatic = false)
     @SuppressFBWarnings(value = "ES_COMPARING_PARAMETER_STRING_WITH_EQ", justification = "reference equality on the receiver is what we want")
     public static boolean equals(final String thisString, Object obj) {
         if (thisString == obj) {
@@ -69,9 +49,14 @@
             return true;
         }
 
-        final char[] array1 = (char[]) unsafe.getObject(thisString, valueOffset);
-        final char[] array2 = (char[]) unsafe.getObject(thatString, valueOffset);
+        final char[] array1 = getValue(thisString);
+        final char[] array2 = getValue(thatString);
 
         return ArrayEqualsNode.equals(array1, array2, array1.length);
     }
+
+    /**
+     * Will be intrinsified with an {@link InvocationPlugin} to a {@link LoadFieldNode}.
+     */
+    private static native char[] getValue(String s);
 }
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/ArrayEqualsNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/ArrayEqualsNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -63,7 +63,7 @@
 
     @Override
     public Node canonical(CanonicalizerTool tool) {
-        if (hasNoUsages()) {
+        if (tool.allUsagesAvailable() && hasNoUsages()) {
             return null;
         }
         if (GraphUtil.unproxify(array1) == GraphUtil.unproxify(array2)) {
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/BasicObjectCloneNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/BasicObjectCloneNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -77,15 +77,14 @@
             return null;
         } else if (objectStamp.isExactType()) {
             return isCloneableType(objectStamp.type(), metaAccess) ? objectStamp.type() : null;
-        } else {
+        } else if (assumptions != null) {
             AssumptionResult<ResolvedJavaType> leafConcreteSubtype = objectStamp.type().findLeafConcreteSubtype();
             if (leafConcreteSubtype != null && isCloneableType(leafConcreteSubtype.getResult(), metaAccess)) {
                 assumptions.record(leafConcreteSubtype);
                 return leafConcreteSubtype.getResult();
-            } else {
-                return null;
             }
         }
+        return null;
     }
 
     @Override
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/MacroNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/MacroNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -170,6 +170,10 @@
             InliningUtil.inline(invoke, replacementGraph, false, null);
             Debug.dump(graph(), "After inlining replacement %s", replacementGraph);
         } else {
+            if (invoke.bci() < 0) {
+                throw new GraalInternalError("%s: cannot lower to invoke with invalid BCI: %s", graph(), this);
+            }
+
             if (invoke.stateAfter() == null) {
                 ResolvedJavaMethod method = graph().method();
                 if (method.getAnnotation(MethodSubstitution.class) != null || method.getAnnotation(Snippet.class) != null) {
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/MathIntrinsicNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.replacements.nodes;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.*;
-import com.oracle.graal.compiler.common.type.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.graph.spi.*;
-import com.oracle.graal.lir.gen.*;
-import com.oracle.graal.nodeinfo.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.calc.*;
-import com.oracle.graal.nodes.spi.*;
-
-@NodeInfo
-public final class MathIntrinsicNode extends UnaryNode implements ArithmeticLIRLowerable {
-
-    public static final NodeClass<MathIntrinsicNode> TYPE = NodeClass.create(MathIntrinsicNode.class);
-    protected final Operation operation;
-
-    public enum Operation {
-        LOG,
-        LOG10,
-        SIN,
-        COS,
-        TAN
-    }
-
-    public Operation operation() {
-        return operation;
-    }
-
-    public static ValueNode create(ValueNode value, Operation op) {
-        ValueNode c = tryConstantFold(value, op);
-        if (c != null) {
-            return c;
-        }
-        return new MathIntrinsicNode(value, op);
-    }
-
-    protected static ValueNode tryConstantFold(ValueNode value, Operation op) {
-        if (value.isConstant()) {
-            double ret = doCompute(value.asJavaConstant().asDouble(), op);
-            return ConstantNode.forDouble(ret);
-        }
-        return null;
-    }
-
-    protected MathIntrinsicNode(ValueNode value, Operation op) {
-        super(TYPE, StampFactory.forKind(Kind.Double), value);
-        assert value.stamp() instanceof FloatStamp && PrimitiveStamp.getBits(value.stamp()) == 64;
-        this.operation = op;
-    }
-
-    @Override
-    public void generate(NodeMappableLIRBuilder builder, ArithmeticLIRGenerator gen) {
-        Value input = builder.operand(getValue());
-        Value result;
-        switch (operation()) {
-            case LOG:
-                result = gen.emitMathLog(input, false);
-                break;
-            case LOG10:
-                result = gen.emitMathLog(input, true);
-                break;
-            case SIN:
-                result = gen.emitMathSin(input);
-                break;
-            case COS:
-                result = gen.emitMathCos(input);
-                break;
-            case TAN:
-                result = gen.emitMathTan(input);
-                break;
-            default:
-                throw GraalInternalError.shouldNotReachHere();
-        }
-        builder.setResult(this, result);
-    }
-
-    @Override
-    public ValueNode canonical(CanonicalizerTool tool, ValueNode forValue) {
-        ValueNode c = tryConstantFold(forValue, operation());
-        if (c != null) {
-            return c;
-        }
-        return this;
-    }
-
-    @NodeIntrinsic
-    public static native double compute(double value, @ConstantNodeParameter Operation op);
-
-    private static double doCompute(double value, Operation op) {
-        switch (op) {
-            case LOG:
-                return Math.log(value);
-            case LOG10:
-                return Math.log10(value);
-            case SIN:
-                return Math.sin(value);
-            case COS:
-                return Math.cos(value);
-            case TAN:
-                return Math.tan(value);
-            default:
-                throw new GraalInternalError("unknown op %s", op);
-        }
-    }
-}
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/MathPowNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.replacements.nodes;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.graph.spi.*;
-import com.oracle.graal.nodeinfo.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
-
-@NodeInfo
-public final class MathPowNode extends MacroStateSplitNode implements Canonicalizable.Binary<ValueNode> {
-
-    public static final NodeClass<MathPowNode> TYPE = NodeClass.create(MathPowNode.class);
-
-    public MathPowNode(InvokeKind invokeKind, ResolvedJavaMethod targetMethod, int bci, JavaType returnType, ValueNode x, ValueNode y) {
-        super(TYPE, invokeKind, targetMethod, bci, returnType, x, y);
-    }
-
-    public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
-        ValueNode folded = tryFold(forX, forY);
-        return folded != null ? folded : this;
-    }
-
-    public static ValueNode tryFold(ValueNode x, ValueNode y) {
-        if (x.isConstant() && y.isConstant()) {
-            double xPrim = x.asJavaConstant().asDouble();
-            double yPrim = y.asJavaConstant().asDouble();
-            return ConstantNode.forDouble(Math.pow(xPrim, yPrim));
-        }
-        return null;
-    }
-
-    public ValueNode getX() {
-        return arguments.get(0);
-    }
-
-    public ValueNode getY() {
-        return arguments.get(1);
-    }
-}
--- a/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/PureFunctionMacroNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/PureFunctionMacroNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -50,7 +50,7 @@
 
     @Override
     public Node canonical(CanonicalizerTool tool) {
-        if (hasNoUsages()) {
+        if (tool.allUsagesAvailable() && hasNoUsages()) {
             return null;
         } else {
             ValueNode param = arguments.get(0);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/arithmetic/IntegerAddExactNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.nodes.arithmetic;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.type.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.graph.spi.*;
+import com.oracle.graal.nodeinfo.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.calc.*;
+import com.oracle.graal.nodes.spi.*;
+
+/**
+ * Node representing an exact integer addition that will throw an {@link ArithmeticException} in
+ * case the addition would overflow the 32 bit range.
+ */
+@NodeInfo
+public final class IntegerAddExactNode extends AddNode implements IntegerExactArithmeticNode {
+    public static final NodeClass<IntegerAddExactNode> TYPE = NodeClass.create(IntegerAddExactNode.class);
+
+    public IntegerAddExactNode(ValueNode x, ValueNode y) {
+        super(TYPE, x, y);
+        setStamp(x.stamp().unrestricted());
+        assert x.stamp().isCompatible(y.stamp()) && x.stamp() instanceof IntegerStamp;
+    }
+
+    @Override
+    public boolean inferStamp() {
+        // TODO Should probably use a specialized version which understands that it can't overflow
+        return false;
+    }
+
+    @Override
+    public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
+        ValueNode result = findSynonym(forX, forY);
+        if (result == null) {
+            return this;
+        } else {
+            return result;
+        }
+    }
+
+    private static ValueNode findSynonym(ValueNode forX, ValueNode forY) {
+        if (forX.isConstant() && !forY.isConstant()) {
+            return new IntegerAddExactNode(forY, forX);
+        }
+        if (forX.isConstant()) {
+            ConstantNode constantNode = canonicalXconstant(forX, forY);
+            if (constantNode != null) {
+                return constantNode;
+            }
+        } else if (forY.isConstant()) {
+            long c = forY.asJavaConstant().asLong();
+            if (c == 0) {
+                return forX;
+            }
+        }
+        return null;
+    }
+
+    private static ConstantNode canonicalXconstant(ValueNode forX, ValueNode forY) {
+        JavaConstant xConst = forX.asJavaConstant();
+        JavaConstant yConst = forY.asJavaConstant();
+        if (xConst != null && yConst != null) {
+            assert xConst.getKind() == yConst.getKind();
+            try {
+                if (xConst.getKind() == Kind.Int) {
+                    return ConstantNode.forInt(Math.addExact(xConst.asInt(), yConst.asInt()));
+                } else {
+                    assert xConst.getKind() == Kind.Long;
+                    return ConstantNode.forLong(Math.addExact(xConst.asLong(), yConst.asLong()));
+                }
+            } catch (ArithmeticException ex) {
+                // The operation will result in an overflow exception, so do not canonicalize.
+            }
+        }
+        return null;
+    }
+
+    @Override
+    public IntegerExactArithmeticSplitNode createSplit(AbstractBeginNode next, AbstractBeginNode deopt) {
+        return graph().add(new IntegerAddExactSplitNode(stamp(), getX(), getY(), next, deopt));
+    }
+
+    @Override
+    public void lower(LoweringTool tool) {
+        IntegerExactArithmeticSplitNode.lower(tool, this);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/arithmetic/IntegerAddExactSplitNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.nodes.arithmetic;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.type.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.nodeinfo.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.spi.*;
+
+@NodeInfo
+public final class IntegerAddExactSplitNode extends IntegerExactArithmeticSplitNode {
+    public static final NodeClass<IntegerAddExactSplitNode> TYPE = NodeClass.create(IntegerAddExactSplitNode.class);
+
+    public IntegerAddExactSplitNode(Stamp stamp, ValueNode x, ValueNode y, AbstractBeginNode next, AbstractBeginNode overflowSuccessor) {
+        super(TYPE, stamp, x, y, next, overflowSuccessor);
+    }
+
+    @Override
+    protected Value generateArithmetic(NodeLIRBuilderTool gen) {
+        return gen.getLIRGeneratorTool().emitAdd(gen.operand(getX()), gen.operand(getY()), true);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/arithmetic/IntegerExactArithmeticNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.nodes.arithmetic;
+
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.spi.*;
+
+interface IntegerExactArithmeticNode extends Lowerable {
+
+    IntegerExactArithmeticSplitNode createSplit(AbstractBeginNode next, AbstractBeginNode deopt);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/arithmetic/IntegerExactArithmeticSplitNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.nodes.arithmetic;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.type.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.nodeinfo.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.calc.*;
+import com.oracle.graal.nodes.spi.*;
+
+@NodeInfo
+public abstract class IntegerExactArithmeticSplitNode extends ControlSplitNode implements LIRLowerable {
+    public static final NodeClass<IntegerExactArithmeticSplitNode> TYPE = NodeClass.create(IntegerExactArithmeticSplitNode.class);
+
+    @Successor AbstractBeginNode next;
+    @Successor AbstractBeginNode overflowSuccessor;
+    @Input ValueNode x;
+    @Input ValueNode y;
+
+    protected IntegerExactArithmeticSplitNode(NodeClass<? extends IntegerExactArithmeticSplitNode> c, Stamp stamp, ValueNode x, ValueNode y, AbstractBeginNode next, AbstractBeginNode overflowSuccessor) {
+        super(c, stamp);
+        this.x = x;
+        this.y = y;
+        this.overflowSuccessor = overflowSuccessor;
+        this.next = next;
+    }
+
+    @Override
+    public AbstractBeginNode getPrimarySuccessor() {
+        return next;
+    }
+
+    @Override
+    public double probability(AbstractBeginNode successor) {
+        return successor == next ? 1 : 0;
+    }
+
+    public AbstractBeginNode getNext() {
+        return next;
+    }
+
+    public AbstractBeginNode getOverflowSuccessor() {
+        return overflowSuccessor;
+    }
+
+    public ValueNode getX() {
+        return x;
+    }
+
+    public ValueNode getY() {
+        return y;
+    }
+
+    @Override
+    public void generate(NodeLIRBuilderTool generator) {
+        generator.setResult(this, generateArithmetic(generator));
+        generator.emitOverflowCheckBranch(getOverflowSuccessor(), getNext(), stamp, probability(getOverflowSuccessor()));
+    }
+
+    protected abstract Value generateArithmetic(NodeLIRBuilderTool generator);
+
+    static void lower(LoweringTool tool, IntegerExactArithmeticNode node) {
+        if (node.asNode().graph().getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) {
+            FloatingNode floatingNode = (FloatingNode) node;
+            FixedWithNextNode previous = tool.lastFixedNode();
+            FixedNode next = previous.next();
+            previous.setNext(null);
+            DeoptimizeNode deopt = floatingNode.graph().add(new DeoptimizeNode(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.ArithmeticException));
+            AbstractBeginNode normalBegin = floatingNode.graph().add(new BeginNode());
+            normalBegin.setNext(next);
+            IntegerExactArithmeticSplitNode split = node.createSplit(normalBegin, BeginNode.begin(deopt));
+            previous.setNext(split);
+            floatingNode.replaceAndDelete(split);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/arithmetic/IntegerMulExactNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.nodes.arithmetic;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.type.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.graph.spi.*;
+import com.oracle.graal.nodeinfo.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.calc.*;
+import com.oracle.graal.nodes.spi.*;
+
+/**
+ * Node representing an exact integer multiplication that will throw an {@link ArithmeticException}
+ * in case the addition would overflow the 32 bit range.
+ */
+@NodeInfo
+public final class IntegerMulExactNode extends MulNode implements IntegerExactArithmeticNode {
+    public static final NodeClass<IntegerMulExactNode> TYPE = NodeClass.create(IntegerMulExactNode.class);
+
+    public IntegerMulExactNode(ValueNode x, ValueNode y) {
+        super(TYPE, x, y);
+        setStamp(x.stamp().unrestricted());
+        assert x.stamp().isCompatible(y.stamp()) && x.stamp() instanceof IntegerStamp;
+    }
+
+    @Override
+    public boolean inferStamp() {
+        return false;
+    }
+
+    @Override
+    public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
+        if (forX.isConstant() && !forY.isConstant()) {
+            return new IntegerMulExactNode(forY, forX);
+        }
+        if (forX.isConstant()) {
+            return canonicalXconstant(forX, forY);
+        } else if (forY.isConstant()) {
+            long c = forY.asJavaConstant().asLong();
+            if (c == 1) {
+                return forX;
+            }
+            if (c == 0) {
+                return ConstantNode.forIntegerStamp(stamp(), 0);
+            }
+        }
+        return this;
+    }
+
+    private ValueNode canonicalXconstant(ValueNode forX, ValueNode forY) {
+        JavaConstant xConst = forX.asJavaConstant();
+        JavaConstant yConst = forY.asJavaConstant();
+        assert xConst.getKind() == yConst.getKind();
+        try {
+            if (xConst.getKind() == Kind.Int) {
+                return ConstantNode.forInt(Math.multiplyExact(xConst.asInt(), yConst.asInt()));
+            } else {
+                assert xConst.getKind() == Kind.Long;
+                return ConstantNode.forLong(Math.multiplyExact(xConst.asLong(), yConst.asLong()));
+            }
+        } catch (ArithmeticException ex) {
+            // The operation will result in an overflow exception, so do not canonicalize.
+        }
+        return this;
+    }
+
+    @Override
+    public IntegerExactArithmeticSplitNode createSplit(AbstractBeginNode next, AbstractBeginNode deopt) {
+        return graph().add(new IntegerMulExactSplitNode(stamp(), getX(), getY(), next, deopt));
+    }
+
+    @Override
+    public void lower(LoweringTool tool) {
+        IntegerExactArithmeticSplitNode.lower(tool, this);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/arithmetic/IntegerMulExactSplitNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.nodes.arithmetic;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.type.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.nodeinfo.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.spi.*;
+
+@NodeInfo
+public final class IntegerMulExactSplitNode extends IntegerExactArithmeticSplitNode {
+    public static final NodeClass<IntegerMulExactSplitNode> TYPE = NodeClass.create(IntegerMulExactSplitNode.class);
+
+    public IntegerMulExactSplitNode(Stamp stamp, ValueNode x, ValueNode y, AbstractBeginNode next, AbstractBeginNode overflowSuccessor) {
+        super(TYPE, stamp, x, y, next, overflowSuccessor);
+    }
+
+    @Override
+    protected Value generateArithmetic(NodeLIRBuilderTool gen) {
+        return gen.getLIRGeneratorTool().emitMul(gen.operand(getX()), gen.operand(getY()), true);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/arithmetic/IntegerMulHighNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.nodes.arithmetic;
+
+import java.util.function.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.type.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.graph.spi.*;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.nodeinfo.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.calc.*;
+import com.oracle.graal.nodes.spi.*;
+
+@NodeInfo(shortName = "*H")
+public final class IntegerMulHighNode extends BinaryNode implements ArithmeticLIRLowerable {
+    public static final NodeClass<IntegerMulHighNode> TYPE = NodeClass.create(IntegerMulHighNode.class);
+
+    public IntegerMulHighNode(ValueNode x, ValueNode y) {
+        this((IntegerStamp) x.stamp().unrestricted(), x, y);
+    }
+
+    public IntegerMulHighNode(IntegerStamp stamp, ValueNode x, ValueNode y) {
+        super(TYPE, stamp, x, y);
+    }
+
+    /**
+     * Determines the minimum and maximum result of this node for the given inputs and returns the
+     * result of the given BiFunction on the minimum and maximum values.
+     */
+    private <T> T processExtremes(ValueNode forX, ValueNode forY, BiFunction<Long, Long, T> op) {
+        IntegerStamp xStamp = (IntegerStamp) forX.stamp();
+        IntegerStamp yStamp = (IntegerStamp) forY.stamp();
+
+        Kind kind = getKind();
+        assert kind == Kind.Int || kind == Kind.Long;
+        long[] xExtremes = {xStamp.lowerBound(), xStamp.upperBound()};
+        long[] yExtremes = {yStamp.lowerBound(), yStamp.upperBound()};
+        long min = Long.MAX_VALUE;
+        long max = Long.MIN_VALUE;
+        for (long a : xExtremes) {
+            for (long b : yExtremes) {
+                long result = kind == Kind.Int ? multiplyHigh((int) a, (int) b) : multiplyHigh(a, b);
+                min = Math.min(min, result);
+                max = Math.max(max, result);
+            }
+        }
+        return op.apply(min, max);
+    }
+
+    @Override
+    public boolean inferStamp() {
+        return updateStamp(processExtremes(getX(), getY(), (min, max) -> StampFactory.forInteger(getKind(), min, max)));
+    }
+
+    @SuppressWarnings("cast")
+    @Override
+    public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
+        return processExtremes(forX, forY, (min, max) -> min == (long) max ? ConstantNode.forIntegerKind(getKind(), min) : this);
+    }
+
+    @Override
+    public void generate(NodeMappableLIRBuilder builder, ArithmeticLIRGenerator gen) {
+        Value a = builder.operand(getX());
+        Value b = builder.operand(getY());
+        builder.setResult(this, gen.emitMulHigh(a, b));
+    }
+
+    public static int multiplyHigh(int x, int y) {
+        long r = (long) x * (long) y;
+        return (int) (r >> 32);
+    }
+
+    public static long multiplyHigh(long x, long y) {
+        // Checkstyle: stop
+        long x0, y0, z0;
+        long x1, y1, z1, z2, t;
+        // Checkstyle: resume
+
+        x0 = x & 0xFFFFFFFFL;
+        x1 = x >> 32;
+
+        y0 = y & 0xFFFFFFFFL;
+        y1 = y >> 32;
+
+        z0 = x0 * y0;
+        t = x1 * y0 + (z0 >>> 32);
+        z1 = t & 0xFFFFFFFFL;
+        z2 = t >> 32;
+        z1 += x0 * y1;
+
+        return x1 * y1 + z2 + (z1 >> 32);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/arithmetic/IntegerSubExactNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.nodes.arithmetic;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.type.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.graph.spi.*;
+import com.oracle.graal.nodeinfo.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.calc.*;
+import com.oracle.graal.nodes.spi.*;
+import com.oracle.graal.nodes.util.*;
+
+/**
+ * Node representing an exact integer substraction that will throw an {@link ArithmeticException} in
+ * case the addition would overflow the 32 bit range.
+ */
+@NodeInfo
+public final class IntegerSubExactNode extends SubNode implements IntegerExactArithmeticNode {
+    public static final NodeClass<IntegerSubExactNode> TYPE = NodeClass.create(IntegerSubExactNode.class);
+
+    public IntegerSubExactNode(ValueNode x, ValueNode y) {
+        super(TYPE, x, y);
+        setStamp(x.stamp().unrestricted());
+        assert x.stamp().isCompatible(y.stamp()) && x.stamp() instanceof IntegerStamp;
+    }
+
+    @Override
+    public boolean inferStamp() {
+        // TODO Should probably use a specialized version which understands that it can't overflow
+        return false;
+    }
+
+    @Override
+    public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
+        if (GraphUtil.unproxify(forX) == GraphUtil.unproxify(forY)) {
+            return ConstantNode.forIntegerStamp(stamp(), 0);
+        }
+        if (forX.isConstant() && forY.isConstant()) {
+            return canonicalXYconstant(forX, forY);
+        } else if (forY.isConstant()) {
+            long c = forY.asJavaConstant().asLong();
+            if (c == 0) {
+                return forX;
+            }
+        }
+        return this;
+    }
+
+    private ValueNode canonicalXYconstant(ValueNode forX, ValueNode forY) {
+        JavaConstant xConst = forX.asJavaConstant();
+        JavaConstant yConst = forY.asJavaConstant();
+        assert xConst.getKind() == yConst.getKind();
+        try {
+            if (xConst.getKind() == Kind.Int) {
+                return ConstantNode.forInt(Math.subtractExact(xConst.asInt(), yConst.asInt()));
+            } else {
+                assert xConst.getKind() == Kind.Long;
+                return ConstantNode.forLong(Math.subtractExact(xConst.asLong(), yConst.asLong()));
+            }
+        } catch (ArithmeticException ex) {
+            // The operation will result in an overflow exception, so do not canonicalize.
+        }
+        return this;
+    }
+
+    @Override
+    public IntegerExactArithmeticSplitNode createSplit(AbstractBeginNode next, AbstractBeginNode deopt) {
+        return graph().add(new IntegerSubExactSplitNode(stamp(), getX(), getY(), next, deopt));
+    }
+
+    @Override
+    public void lower(LoweringTool tool) {
+        IntegerExactArithmeticSplitNode.lower(tool, this);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/arithmetic/IntegerSubExactSplitNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.nodes.arithmetic;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.type.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.nodeinfo.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.spi.*;
+
+@NodeInfo
+public final class IntegerSubExactSplitNode extends IntegerExactArithmeticSplitNode {
+    public static final NodeClass<IntegerSubExactSplitNode> TYPE = NodeClass.create(IntegerSubExactSplitNode.class);
+
+    public IntegerSubExactSplitNode(Stamp stamp, ValueNode x, ValueNode y, AbstractBeginNode next, AbstractBeginNode overflowSuccessor) {
+        super(TYPE, stamp, x, y, next, overflowSuccessor);
+    }
+
+    @Override
+    protected Value generateArithmetic(NodeLIRBuilderTool gen) {
+        return gen.getLIRGeneratorTool().emitSub(gen.operand(getX()), gen.operand(getY()), true);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/arithmetic/UnsignedMulHighNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.replacements.nodes.arithmetic;
+
+import java.util.function.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.type.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.graph.spi.*;
+import com.oracle.graal.lir.gen.*;
+import com.oracle.graal.nodeinfo.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.calc.*;
+import com.oracle.graal.nodes.spi.*;
+
+@NodeInfo(shortName = "|*H|")
+public final class UnsignedMulHighNode extends BinaryNode implements ArithmeticLIRLowerable {
+
+    public static final NodeClass<UnsignedMulHighNode> TYPE = NodeClass.create(UnsignedMulHighNode.class);
+
+    public UnsignedMulHighNode(ValueNode x, ValueNode y) {
+        this((IntegerStamp) x.stamp().unrestricted(), x, y);
+    }
+
+    public UnsignedMulHighNode(IntegerStamp stamp, ValueNode x, ValueNode y) {
+        super(TYPE, stamp, x, y);
+    }
+
+    /**
+     * Determines the minimum and maximum result of this node for the given inputs and returns the
+     * result of the given BiFunction on the minimum and maximum values. Note that the minima and
+     * maxima are calculated using signed min/max functions, while the values themselves are
+     * unsigned.
+     */
+    private <T> T processExtremes(ValueNode forX, ValueNode forY, BiFunction<Long, Long, T> op) {
+        IntegerStamp xStamp = (IntegerStamp) forX.stamp();
+        IntegerStamp yStamp = (IntegerStamp) forY.stamp();
+
+        Kind kind = getKind();
+        assert kind == Kind.Int || kind == Kind.Long;
+        long[] xExtremes = {xStamp.lowerBound(), xStamp.upperBound()};
+        long[] yExtremes = {yStamp.lowerBound(), yStamp.upperBound()};
+        long min = Long.MAX_VALUE;
+        long max = Long.MIN_VALUE;
+        for (long a : xExtremes) {
+            for (long b : yExtremes) {
+                long result = kind == Kind.Int ? multiplyHighUnsigned((int) a, (int) b) : multiplyHighUnsigned(a, b);
+                min = Math.min(min, result);
+                max = Math.max(max, result);
+            }
+        }
+        return op.apply(min, max);
+    }
+
+    @SuppressWarnings("cast")
+    @Override
+    public boolean inferStamp() {
+        // if min is negative, then the value can reach into the unsigned range
+        return updateStamp(processExtremes(getX(), getY(), (min, max) -> (min == (long) max || min >= 0) ? StampFactory.forInteger(getKind(), min, max) : StampFactory.forKind(getKind())));
+    }
+
+    @SuppressWarnings("cast")
+    @Override
+    public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
+        return processExtremes(forX, forY, (min, max) -> min == (long) max ? ConstantNode.forIntegerKind(getKind(), min) : this);
+    }
+
+    @Override
+    public void generate(NodeMappableLIRBuilder builder, ArithmeticLIRGenerator gen) {
+        Value a = builder.operand(getX());
+        Value b = builder.operand(getY());
+        builder.setResult(this, gen.emitUMulHigh(a, b));
+    }
+
+    public static int multiplyHighUnsigned(int x, int y) {
+        long xl = x & 0xFFFFFFFFL;
+        long yl = y & 0xFFFFFFFFL;
+        long r = xl * yl;
+        return (int) (r >> 32);
+    }
+
+    public static long multiplyHighUnsigned(long x, long y) {
+        // Checkstyle: stop
+        long x0, y0, z0;
+        long x1, y1, z1, z2, t;
+        // Checkstyle: resume
+
+        x0 = x & 0xFFFFFFFFL;
+        x1 = x >>> 32;
+
+        y0 = y & 0xFFFFFFFFL;
+        y1 = y >>> 32;
+
+        z0 = x0 * y0;
+        t = x1 * y0 + (z0 >>> 32);
+        z1 = t & 0xFFFFFFFFL;
+        z2 = t >>> 32;
+        z1 += x0 * y1;
+
+        return x1 * y1 + z2 + (z1 >>> 32);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.truffle.hotspot.amd64/src/com/oracle/graal/truffle/hotspot/amd64/AMD64RawNativeCallNodeFactory.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.truffle.hotspot.amd64;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.api.runtime.*;
+import com.oracle.graal.hotspot.amd64.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.truffle.hotspot.nfi.*;
+
+@ServiceProvider(RawNativeCallNodeFactory.class)
+public class AMD64RawNativeCallNodeFactory implements RawNativeCallNodeFactory {
+    public FixedWithNextNode createRawCallNode(Kind returnType, JavaConstant functionPointer, ValueNode... args) {
+        return new AMD64RawNativeCallNode(returnType, functionPointer, args);
+    }
+
+    public String getArchitecture() {
+        return "AMD64";
+    }
+}
--- a/graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/HotSpotTruffleRuntime.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/HotSpotTruffleRuntime.java	Fri Apr 10 16:58:26 2015 -0700
@@ -57,6 +57,8 @@
 import com.oracle.graal.printer.*;
 import com.oracle.graal.runtime.*;
 import com.oracle.graal.truffle.*;
+import com.oracle.graal.truffle.hotspot.nfi.*;
+import com.oracle.nfi.api.*;
 import com.oracle.truffle.api.*;
 import com.oracle.truffle.api.nodes.*;
 
@@ -337,4 +339,27 @@
             throw new GraalInternalError(e);
         }
     }
+
+    private static RawNativeCallNodeFactory getRawNativeCallNodeFactory(String arch) {
+        for (RawNativeCallNodeFactory factory : Services.load(RawNativeCallNodeFactory.class)) {
+            if (factory.getArchitecture().equals(arch)) {
+                return factory;
+            }
+        }
+        // No RawNativeCallNodeFactory on this platform.
+        return null;
+    }
+
+    /**
+     * Called from the VM.
+     */
+    public static NativeFunctionInterface createNativeFunctionInterface() {
+        HotSpotVMConfig config = HotSpotGraalRuntime.runtime().getConfig();
+        Backend backend = HotSpotGraalRuntime.runtime().getHostBackend();
+        RawNativeCallNodeFactory factory = getRawNativeCallNodeFactory(backend.getTarget().arch.getName());
+        if (factory == null) {
+            return null;
+        }
+        return new HotSpotNativeFunctionInterface(HotSpotGraalRuntime.runtime().getHostProviders(), factory, backend, config.dllLoad, config.dllLookup, config.rtldDefault);
+    }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/nfi/HotSpotNativeFunctionHandle.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.truffle.hotspot.nfi;
+
+import java.util.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.debug.Debug.Scope;
+import com.oracle.nfi.api.*;
+
+public class HotSpotNativeFunctionHandle implements NativeFunctionHandle {
+
+    private final InstalledCode code;
+    private final String name;
+    private final Class<?>[] argumentTypes;
+
+    public HotSpotNativeFunctionHandle(InstalledCode code, String name, Class<?>... argumentTypes) {
+        this.argumentTypes = argumentTypes;
+        this.name = name;
+        this.code = code;
+    }
+
+    private void traceCall(Object... args) {
+        try (Scope s = Debug.scope("GNFI")) {
+            if (Debug.isLogEnabled()) {
+                Debug.log("[GNFI] %s%s", name, Arrays.toString(args));
+            }
+        }
+    }
+
+    private void traceResult(Object result) {
+        try (Scope s = Debug.scope("GNFI")) {
+            if (Debug.isLogEnabled()) {
+                Debug.log("[GNFI] %s --> %s", name, result);
+            }
+        }
+    }
+
+    @Override
+    public Object call(Object... args) {
+        assert checkArgs(args);
+        try {
+            traceCall(args);
+            Object res = code.executeVarargs(args, null, null);
+            traceResult(res);
+            return res;
+        } catch (InvalidInstalledCodeException e) {
+            throw GraalInternalError.shouldNotReachHere("Execution of GNFI Callstub failed: " + name);
+        }
+    }
+
+    private boolean checkArgs(Object... args) {
+        assert args.length == argumentTypes.length : this + " expected " + argumentTypes.length + " args, got " + args.length;
+        for (int i = 0; i < argumentTypes.length; i++) {
+            Object arg = args[i];
+            assert arg != null;
+            Class<?> expectedType = argumentTypes[i];
+            if (expectedType.isPrimitive()) {
+                Kind kind = Kind.fromJavaClass(expectedType);
+                expectedType = kind.toBoxedJavaClass();
+            }
+            assert expectedType == arg.getClass() : this + " expected arg " + i + " to be " + expectedType.getName() + ", not " + arg.getClass().getName();
+
+        }
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return name + Arrays.toString(argumentTypes);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/nfi/HotSpotNativeFunctionInterface.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.truffle.hotspot.nfi;
+
+import static com.oracle.graal.api.code.CodeUtil.*;
+import static com.oracle.graal.compiler.common.UnsafeAccess.*;
+import static com.oracle.graal.truffle.hotspot.nfi.NativeCallStubGraphBuilder.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.code.CallingConvention.Type;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.*;
+import com.oracle.graal.compiler.target.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.debug.Debug.Scope;
+import com.oracle.graal.hotspot.*;
+import com.oracle.graal.hotspot.meta.*;
+import com.oracle.graal.lir.asm.*;
+import com.oracle.graal.lir.phases.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.phases.*;
+import com.oracle.graal.phases.tiers.*;
+import com.oracle.nfi.api.*;
+
+public class HotSpotNativeFunctionInterface implements NativeFunctionInterface {
+
+    private final HotSpotProviders providers;
+    private final Backend backend;
+    private final HotSpotNativeLibraryHandle rtldDefault;
+    private final HotSpotNativeFunctionPointer libraryLoadFunctionPointer;
+    private final HotSpotNativeFunctionPointer functionLookupFunctionPointer;
+    private final RawNativeCallNodeFactory factory;
+
+    private HotSpotNativeFunctionHandle libraryLookupFunctionHandle;
+    private HotSpotNativeFunctionHandle dllLookupFunctionHandle;
+
+    public HotSpotNativeFunctionInterface(HotSpotProviders providers, RawNativeCallNodeFactory factory, Backend backend, long dlopen, long dlsym, long rtldDefault) {
+        this.rtldDefault = rtldDefault == HotSpotVMConfig.INVALID_RTLD_DEFAULT_HANDLE ? null : new HotSpotNativeLibraryHandle("RTLD_DEFAULT", rtldDefault);
+        this.providers = providers;
+        assert backend != null;
+        this.backend = backend;
+        this.factory = factory;
+        this.libraryLoadFunctionPointer = new HotSpotNativeFunctionPointer(dlopen, "os::dll_load");
+        this.functionLookupFunctionPointer = new HotSpotNativeFunctionPointer(dlsym, "os::dll_lookup");
+    }
+
+    @Override
+    public HotSpotNativeLibraryHandle getLibraryHandle(String libPath) {
+        if (libraryLookupFunctionHandle == null) {
+            libraryLookupFunctionHandle = createHandle(libraryLoadFunctionPointer, long.class, long.class, long.class, int.class);
+        }
+
+        int ebufLen = 1024;
+        // Allocating a single chunk for both the error message buffer and the
+        // file name simplifies deallocation below.
+        long buffer = unsafe.allocateMemory(ebufLen + libPath.length() + 1);
+        long ebuf = buffer;
+        long libPathCString = writeCString(libPath, buffer + ebufLen);
+        try {
+            long handle = (long) libraryLookupFunctionHandle.call(libPathCString, ebuf, ebufLen);
+            if (handle == 0) {
+                throw new UnsatisfiedLinkError(libPath);
+            }
+            return new HotSpotNativeLibraryHandle(libPath, handle);
+        } finally {
+            unsafe.freeMemory(buffer);
+        }
+    }
+
+    @Override
+    public HotSpotNativeFunctionHandle getFunctionHandle(NativeLibraryHandle library, String name, Class<?> returnType, Class<?>... argumentTypes) {
+        HotSpotNativeFunctionPointer functionPointer = lookupFunctionPointer(name, library, false);
+        return createHandle(functionPointer, returnType, argumentTypes);
+    }
+
+    @Override
+    public HotSpotNativeFunctionHandle getFunctionHandle(NativeLibraryHandle[] libraries, String name, Class<?> returnType, Class<?>... argumentTypes) {
+        HotSpotNativeFunctionPointer functionPointer = null;
+        for (NativeLibraryHandle libraryHandle : libraries) {
+            functionPointer = lookupFunctionPointer(name, libraryHandle, false);
+            if (functionPointer != null) {
+                return createHandle(functionPointer, returnType, argumentTypes);
+            }
+        }
+        // Fall back to default library path
+        return getFunctionHandle(name, returnType, argumentTypes);
+    }
+
+    @Override
+    public HotSpotNativeFunctionHandle getFunctionHandle(String name, Class<?> returnType, Class<?>... argumentTypes) {
+        if (rtldDefault == null) {
+            throw new UnsatisfiedLinkError(name);
+        }
+        return getFunctionHandle(rtldDefault, name, returnType, argumentTypes);
+    }
+
+    private HotSpotNativeFunctionPointer lookupFunctionPointer(String name, NativeLibraryHandle library, boolean linkageErrorIfMissing) {
+        if (name == null || library == null) {
+            throw new NullPointerException();
+        }
+
+        if (dllLookupFunctionHandle == null) {
+            dllLookupFunctionHandle = createHandle(functionLookupFunctionPointer, long.class, long.class, long.class);
+        }
+        long nameCString = createCString(name);
+        try {
+            long functionPointer = (long) dllLookupFunctionHandle.call(((HotSpotNativeLibraryHandle) library).value, nameCString);
+            if (functionPointer == 0L) {
+                if (!linkageErrorIfMissing) {
+                    return null;
+                }
+                throw new UnsatisfiedLinkError(name);
+            }
+            return new HotSpotNativeFunctionPointer(functionPointer, name);
+        } finally {
+            unsafe.freeMemory(nameCString);
+        }
+    }
+
+    @Override
+    public HotSpotNativeFunctionHandle getFunctionHandle(NativeFunctionPointer functionPointer, Class<?> returnType, Class<?>... argumentTypes) {
+        if (!(functionPointer instanceof HotSpotNativeFunctionPointer)) {
+            throw new UnsatisfiedLinkError(functionPointer.getName());
+        }
+        return createHandle(functionPointer, returnType, argumentTypes);
+    }
+
+    private HotSpotNativeFunctionHandle createHandle(NativeFunctionPointer functionPointer, Class<?> returnType, Class<?>... argumentTypes) {
+        HotSpotNativeFunctionPointer hs = (HotSpotNativeFunctionPointer) functionPointer;
+        if (hs != null) {
+            InstalledCode code = installNativeFunctionStub(hs.value, returnType, argumentTypes);
+            return new HotSpotNativeFunctionHandle(code, hs.name, argumentTypes);
+        } else {
+            return null;
+        }
+    }
+
+    /**
+     * Creates and installs a stub for calling a native function.
+     */
+    private InstalledCode installNativeFunctionStub(long functionPointer, Class<?> returnType, Class<?>... argumentTypes) {
+        StructuredGraph g = getGraph(providers, factory, functionPointer, returnType, argumentTypes);
+        Suites suites = providers.getSuites().createSuites();
+        LIRSuites lirSuites = providers.getSuites().createLIRSuites();
+        PhaseSuite<HighTierContext> phaseSuite = backend.getSuites().getDefaultGraphBuilderSuite().copy();
+        CallingConvention cc = getCallingConvention(providers.getCodeCache(), Type.JavaCallee, g.method(), false);
+        CompilationResult compResult = GraalCompiler.compileGraph(g, cc, g.method(), providers, backend, backend.getTarget(), phaseSuite, OptimisticOptimizations.ALL,
+                        DefaultProfilingInfo.get(TriState.UNKNOWN), null, suites, lirSuites, new CompilationResult(), CompilationResultBuilderFactory.Default);
+        InstalledCode installedCode;
+        try (Scope s = Debug.scope("CodeInstall", providers.getCodeCache(), g.method())) {
+            installedCode = providers.getCodeCache().addMethod(g.method(), compResult, null, null);
+        } catch (Throwable e) {
+            throw Debug.handle(e);
+        }
+        return installedCode;
+    }
+
+    @Override
+    public HotSpotNativeFunctionPointer getFunctionPointer(NativeLibraryHandle[] libraries, String name) {
+        for (NativeLibraryHandle libraryHandle : libraries) {
+            HotSpotNativeFunctionPointer functionPointer = lookupFunctionPointer(name, libraryHandle, false);
+            if (functionPointer != null) {
+                return functionPointer;
+            }
+        }
+        // Fall back to default library path
+        if (rtldDefault == null) {
+            throw new UnsatisfiedLinkError(name);
+        }
+        return lookupFunctionPointer(name, rtldDefault, false);
+    }
+
+    public boolean isDefaultLibrarySearchSupported() {
+        return rtldDefault != null;
+    }
+
+    @Override
+    public NativeFunctionPointer getNativeFunctionPointerFromRawValue(long rawValue) {
+        return new HotSpotNativeFunctionPointer(rawValue, null);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/nfi/HotSpotNativeFunctionPointer.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.truffle.hotspot.nfi;
+
+import com.oracle.nfi.api.*;
+
+public class HotSpotNativeFunctionPointer implements NativeFunctionPointer {
+
+    final long value;
+    final String name;
+
+    public HotSpotNativeFunctionPointer(long value, String name) {
+        if (value == 0) {
+            throw new UnsatisfiedLinkError(name);
+        }
+        this.value = value;
+        this.name = name;
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    public long getRawValue() {
+        return value;
+    }
+
+    @Override
+    public String toString() {
+        return name + "@0x" + Long.toHexString(value);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/nfi/HotSpotNativeLibraryHandle.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.truffle.hotspot.nfi;
+
+import com.oracle.nfi.api.*;
+
+public class HotSpotNativeLibraryHandle implements NativeLibraryHandle {
+
+    final long value;
+    final String name;
+
+    public HotSpotNativeLibraryHandle(String name, long handle) {
+        this.name = name;
+        this.value = handle;
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (obj instanceof HotSpotNativeLibraryHandle) {
+            HotSpotNativeLibraryHandle that = (HotSpotNativeLibraryHandle) obj;
+            return that.value == value;
+        }
+        return false;
+    }
+
+    @Override
+    public int hashCode() {
+        return (int) value;
+    }
+
+    @Override
+    public String toString() {
+        return name + "@" + value;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/nfi/NativeCallStubGraphBuilder.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.truffle.hotspot.nfi;
+
+import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*;
+
+import java.util.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.compiler.common.*;
+import com.oracle.graal.compiler.common.type.*;
+import com.oracle.graal.hotspot.meta.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.StructuredGraph.AllowAssumptions;
+import com.oracle.graal.nodes.extended.*;
+import com.oracle.graal.nodes.java.*;
+import com.oracle.graal.nodes.virtual.*;
+
+/**
+ * Utility creating a graph for a stub used to call a native function.
+ */
+public class NativeCallStubGraphBuilder {
+
+    /**
+     * Creates a graph for a stub used to call a native function.
+     *
+     * @param functionPointer a native function pointer
+     * @param returnType the type of the return value
+     * @param argumentTypes the types of the arguments
+     * @return the graph that represents the call stub
+     */
+    public static StructuredGraph getGraph(HotSpotProviders providers, RawNativeCallNodeFactory factory, long functionPointer, Class<?> returnType, Class<?>... argumentTypes) {
+        try {
+            ResolvedJavaMethod method = providers.getMetaAccess().lookupJavaMethod(NativeCallStubGraphBuilder.class.getMethod("libCall", Object.class, Object.class, Object.class));
+            StructuredGraph g = new StructuredGraph(method, AllowAssumptions.NO);
+            ParameterNode arg0 = g.unique(new ParameterNode(0, StampFactory.forKind(Kind.Object)));
+            ParameterNode arg1 = g.unique(new ParameterNode(1, StampFactory.forKind(Kind.Object)));
+            ParameterNode arg2 = g.unique(new ParameterNode(2, StampFactory.forKind(Kind.Object)));
+            FrameState frameState = g.add(new FrameState(null, method, 0, Arrays.asList(new ValueNode[]{arg0, arg1, arg2}), 3, 0, false, false, null, new ArrayList<EscapeObjectState>()));
+            g.start().setStateAfter(frameState);
+            List<ValueNode> parameters = new ArrayList<>();
+            FixedWithNextNode fixedWithNext = getParameters(g, arg0, argumentTypes.length, argumentTypes, parameters, providers);
+            JavaConstant functionPointerNode = JavaConstant.forLong(functionPointer);
+
+            ValueNode[] arguments = new ValueNode[parameters.size()];
+
+            for (int i = 0; i < arguments.length; i++) {
+                arguments[i] = parameters.get(i);
+            }
+
+            FixedWithNextNode callNode = g.add(factory.createRawCallNode(getKind(returnType), functionPointerNode, arguments));
+
+            if (fixedWithNext == null) {
+                g.start().setNext(callNode);
+            } else {
+                fixedWithNext.setNext(callNode);
+            }
+
+            // box result
+            BoxNode boxedResult;
+            if (callNode.getKind() != Kind.Void) {
+                if (callNode.getKind() == Kind.Object) {
+                    throw new IllegalArgumentException("Return type not supported: " + returnType.getName());
+                }
+                ResolvedJavaType type = providers.getMetaAccess().lookupJavaType(callNode.getKind().toBoxedJavaClass());
+                boxedResult = g.unique(new BoxNode(callNode, type, callNode.getKind()));
+            } else {
+                boxedResult = g.unique(new BoxNode(ConstantNode.forLong(0, g), providers.getMetaAccess().lookupJavaType(Long.class), Kind.Long));
+            }
+
+            ReturnNode returnNode = g.add(new ReturnNode(boxedResult));
+            callNode.setNext(returnNode);
+            return g;
+        } catch (NoSuchMethodException e) {
+            throw GraalInternalError.shouldNotReachHere("Call Stub method not found");
+        }
+    }
+
+    private static FixedWithNextNode getParameters(StructuredGraph g, ParameterNode argumentsArray, int numArgs, Class<?>[] argumentTypes, List<ValueNode> args, HotSpotProviders providers) {
+        assert numArgs == argumentTypes.length;
+        FixedWithNextNode last = null;
+        for (int i = 0; i < numArgs; i++) {
+            // load boxed array element:
+            LoadIndexedNode boxedElement = g.add(new LoadIndexedNode(argumentsArray, ConstantNode.forInt(i, g), Kind.Object));
+            if (i == 0) {
+                g.start().setNext(boxedElement);
+                last = boxedElement;
+            } else {
+                last.setNext(boxedElement);
+                last = boxedElement;
+            }
+            Class<?> type = argumentTypes[i];
+            Kind kind = getKind(type);
+            if (kind == Kind.Object) {
+                // array value
+                Kind arrayElementKind = getElementKind(type);
+                LocationIdentity locationIdentity = NamedLocationIdentity.getArrayLocation(arrayElementKind);
+                int displacement = runtime().getArrayBaseOffset(arrayElementKind);
+                ConstantNode index = ConstantNode.forInt(0, g);
+                int indexScaling = runtime().getArrayIndexScale(arrayElementKind);
+                IndexedLocationNode locationNode = g.unique(new IndexedLocationNode(locationIdentity, displacement, index, indexScaling));
+                Stamp wordStamp = StampFactory.forKind(providers.getWordTypes().getWordKind());
+                ComputeAddressNode arrayAddress = g.unique(new ComputeAddressNode(boxedElement, locationNode, wordStamp));
+                args.add(arrayAddress);
+            } else {
+                // boxed primitive value
+                try {
+                    ResolvedJavaField field = providers.getMetaAccess().lookupJavaField(kind.toBoxedJavaClass().getDeclaredField("value"));
+                    LoadFieldNode loadFieldNode = g.add(new LoadFieldNode(boxedElement, field));
+                    last.setNext(loadFieldNode);
+                    last = loadFieldNode;
+                    args.add(loadFieldNode);
+                } catch (NoSuchFieldException e) {
+                    throw new GraalInternalError(e);
+                }
+            }
+        }
+        return last;
+    }
+
+    public static Kind getElementKind(Class<?> clazz) {
+        Class<?> componentType = clazz.getComponentType();
+        if (componentType == null) {
+            throw new IllegalArgumentException("Parameter type not supported: " + clazz);
+        }
+        if (componentType.isPrimitive()) {
+            return Kind.fromJavaClass(componentType);
+        }
+        throw new IllegalArgumentException("Parameter type not supported: " + clazz);
+    }
+
+    private static Kind getKind(Class<?> clazz) {
+        if (clazz == int.class || clazz == Integer.class) {
+            return Kind.Int;
+        } else if (clazz == long.class || clazz == Long.class) {
+            return Kind.Long;
+        } else if (clazz == char.class || clazz == Character.class) {
+            return Kind.Char;
+        } else if (clazz == byte.class || clazz == Byte.class) {
+            return Kind.Byte;
+        } else if (clazz == float.class || clazz == Float.class) {
+            return Kind.Float;
+        } else if (clazz == double.class || clazz == Double.class) {
+            return Kind.Double;
+        } else if (clazz == int[].class || clazz == long[].class || clazz == char[].class || clazz == byte[].class || clazz == float[].class || clazz == double[].class) {
+            return Kind.Object;
+        } else if (clazz == void.class) {
+            return Kind.Void;
+        } else {
+            throw new IllegalArgumentException("Type not supported: " + clazz);
+        }
+    }
+
+    @SuppressWarnings("unused")
+    public static Object libCall(Object argLoc, Object unused1, Object unused2) {
+        throw GraalInternalError.shouldNotReachHere("GNFI libCall method must not be called");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/nfi/RawNativeCallNodeFactory.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.truffle.hotspot.nfi;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.api.runtime.*;
+import com.oracle.graal.nodes.*;
+
+/**
+ * Factory for creating a node that makes a direct call to a native function pointer.
+ */
+public interface RawNativeCallNodeFactory extends Service {
+    FixedWithNextNode createRawCallNode(Kind returnType, JavaConstant functionPointer, ValueNode... args);
+
+    String getArchitecture();
+}
--- a/graal/com.oracle.graal.truffle.test/src/com/oracle/graal/truffle/test/BytecodeInterpreterPartialEvaluationTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.truffle.test/src/com/oracle/graal/truffle/test/BytecodeInterpreterPartialEvaluationTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -22,6 +22,8 @@
  */
 package com.oracle.graal.truffle.test;
 
+import java.util.*;
+
 import org.junit.*;
 
 import com.oracle.truffle.api.*;
@@ -155,11 +157,11 @@
         return 42;
     }
 
-    private static void assertReturns42(Program program) {
+    private static void assertReturns42(RootNode program) {
         Assert.assertEquals(Integer.valueOf(42), Truffle.getRuntime().createCallTarget(program).call());
     }
 
-    private void assertPartialEvalEqualsAndRunsCorrect(Program program) {
+    private void assertPartialEvalEqualsAndRunsCorrect(RootNode program) {
         assertReturns42(program);
         assertPartialEvalEquals("constant42", program);
     }
@@ -317,4 +319,213 @@
         /* 44: */Bytecode.RETURN};
         assertPartialEvalEqualsAndRunsCorrect(new Program("manyIfsProgram", bytecodes, 0, 3));
     }
+
+    public abstract static class Inst {
+        public abstract boolean execute(VirtualFrame frame);
+
+        public abstract int getTrueSucc();
+
+        public abstract int getFalseSucc();
+
+        public static class Const extends Inst {
+            private final FrameSlot slot;
+            private final int value;
+            private final int next;
+
+            public Const(FrameSlot slot, int value, int next) {
+                this.slot = slot;
+                this.value = value;
+                this.next = next;
+            }
+
+            @Override
+            public boolean execute(VirtualFrame frame) {
+                frame.setInt(slot, value);
+                return true;
+            }
+
+            @Override
+            public int getTrueSucc() {
+                return next;
+            }
+
+            @Override
+            public int getFalseSucc() {
+                return next;
+            }
+        }
+
+        public static class Return extends Inst {
+            public Return() {
+            }
+
+            @Override
+            public boolean execute(VirtualFrame frame) {
+                return true;
+            }
+
+            @Override
+            public int getTrueSucc() {
+                return -1;
+            }
+
+            @Override
+            public int getFalseSucc() {
+                return -1;
+            }
+        }
+
+        public static class IfZero extends Inst {
+            private final FrameSlot slot;
+            private final int thenInst;
+            private final int elseInst;
+
+            public IfZero(FrameSlot slot, int thenInst, int elseInst) {
+                this.slot = slot;
+                this.thenInst = thenInst;
+                this.elseInst = elseInst;
+            }
+
+            @Override
+            public boolean execute(VirtualFrame frame) {
+                return (FrameUtil.getIntSafe(frame, slot) == 0);
+            }
+
+            @Override
+            public int getTrueSucc() {
+                return thenInst;
+            }
+
+            @Override
+            public int getFalseSucc() {
+                return elseInst;
+            }
+        }
+
+        public static class IfLt extends Inst {
+            private final FrameSlot slot1;
+            private final FrameSlot slot2;
+            private final int thenInst;
+            private final int elseInst;
+
+            public IfLt(FrameSlot slot1, FrameSlot slot2, int thenInst, int elseInst) {
+                this.slot1 = slot1;
+                this.slot2 = slot2;
+                this.thenInst = thenInst;
+                this.elseInst = elseInst;
+            }
+
+            @Override
+            public boolean execute(VirtualFrame frame) {
+                return (FrameUtil.getIntSafe(frame, slot1) < FrameUtil.getIntSafe(frame, slot2));
+            }
+
+            @Override
+            public int getTrueSucc() {
+                return thenInst;
+            }
+
+            @Override
+            public int getFalseSucc() {
+                return elseInst;
+            }
+        }
+    }
+
+    public static class InstArrayProgram extends RootNode {
+        private final String name;
+        @CompilationFinal protected final Inst[] inst;
+        protected final FrameSlot returnSlot;
+
+        public InstArrayProgram(String name, Inst[] inst, FrameSlot returnSlot, FrameDescriptor fd) {
+            super(null, fd);
+            this.name = name;
+            this.inst = inst;
+            this.returnSlot = returnSlot;
+        }
+
+        @Override
+        public String toString() {
+            return name;
+        }
+
+        @Override
+        @ExplodeLoop(merge = true)
+        public Object execute(VirtualFrame frame) {
+            int ip = 0;
+            while (ip != -1) {
+                CompilerAsserts.partialEvaluationConstant(ip);
+                if (inst[ip].execute(frame)) {
+                    ip = inst[ip].getTrueSucc();
+                } else {
+                    ip = inst[ip].getFalseSucc();
+                }
+            }
+            return FrameUtil.getIntSafe(frame, returnSlot);
+        }
+    }
+
+    @Test
+    public void instArraySimpleIfProgram() {
+        FrameDescriptor fd = new FrameDescriptor();
+        FrameSlot valueSlot = fd.addFrameSlot("value", FrameSlotKind.Int);
+        FrameSlot returnSlot = fd.addFrameSlot("return", FrameSlotKind.Int);
+        Inst[] inst = new Inst[]{
+        /* 0: */new Inst.Const(valueSlot, 1, 1),
+        /* 1: */new Inst.IfZero(valueSlot, 2, 4),
+        /* 2: */new Inst.Const(returnSlot, 41, 3),
+        /* 3: */new Inst.Return(),
+        /* 4: */new Inst.Const(returnSlot, 42, 5),
+        /* 5: */new Inst.Return()};
+        assertPartialEvalEqualsAndRunsCorrect(new InstArrayProgram("instArraySimpleIfProgram", inst, returnSlot, fd));
+    }
+
+    /**
+     * Slightly modified version to expose a partial evaluation bug with ExplodeLoop(merge=true).
+     */
+    public static class InstArrayProgram2 extends InstArrayProgram {
+        public InstArrayProgram2(String name, Inst[] inst, FrameSlot returnSlot, FrameDescriptor fd) {
+            super(name, inst, returnSlot, fd);
+        }
+
+        @Override
+        @ExplodeLoop(merge = true)
+        public Object execute(VirtualFrame frame) {
+            int ip = 0;
+            while (ip != -1) {
+                CompilerAsserts.partialEvaluationConstant(ip);
+                if (inst[ip].execute(frame)) {
+                    ip = inst[ip].getTrueSucc();
+                } else {
+                    ip = inst[ip].getFalseSucc();
+                }
+            }
+            if (frame.getArguments().length > 0) {
+                return new Random();
+            } else {
+                return FrameUtil.getIntSafe(frame, returnSlot);
+            }
+        }
+    }
+
+    @Ignore("produces a bad graph")
+    @Test
+    public void instArraySimpleIfProgram2() {
+        FrameDescriptor fd = new FrameDescriptor();
+        FrameSlot value1Slot = fd.addFrameSlot("value1", FrameSlotKind.Int);
+        FrameSlot value2Slot = fd.addFrameSlot("value2", FrameSlotKind.Int);
+        FrameSlot returnSlot = fd.addFrameSlot("return", FrameSlotKind.Int);
+        Inst[] inst = new Inst[]{
+        /* 0: */new Inst.Const(value1Slot, 100, 1),
+        /* 1: */new Inst.Const(value2Slot, 100, 2),
+        /* 2: */new Inst.IfLt(value1Slot, value2Slot, 3, 5),
+        /* 3: */new Inst.Const(returnSlot, 41, 4),
+        /* 4: */new Inst.Return(),
+        /* 5: */new Inst.Const(returnSlot, 42, 6),
+        /* 6: */new Inst.Return()};
+        InstArrayProgram program = new InstArrayProgram2("instArraySimpleIfProgram2", inst, returnSlot, fd);
+        program.execute(Truffle.getRuntime().createVirtualFrame(new Object[0], fd));
+        program.execute(Truffle.getRuntime().createVirtualFrame(new Object[1], fd));
+        assertPartialEvalEqualsAndRunsCorrect(program);
+    }
 }
--- a/graal/com.oracle.graal.truffle.test/src/com/oracle/graal/truffle/test/ConditionAnchoringTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.truffle.test/src/com/oracle/graal/truffle/test/ConditionAnchoringTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -137,7 +137,7 @@
     @Override
     protected GraphBuilderConfiguration editGraphBuilderConfiguration(GraphBuilderConfiguration conf) {
         // get UnsafeAccessImpl.unsafeGetInt intrinsified
-        TruffleGraphBuilderPlugins.registerUnsafeAccessImplPlugins(conf.getPlugins().getInvocationPlugins());
+        TruffleGraphBuilderPlugins.registerUnsafeAccessImplPlugins(conf.getPlugins().getInvocationPlugins(), false);
         // get UnsafeAccess.getInt inlined
         conf.getPlugins().setInlineInvokePlugin(new InlineEverythingPlugin());
         conf.getPlugins().setLoadFieldPlugin(new FoldLoadsPlugins(getMetaAccess(), getConstantReflection()));
--- a/graal/com.oracle.graal.truffle.test/src/com/oracle/graal/truffle/test/SimplePartialEvaluationTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.truffle.test/src/com/oracle/graal/truffle/test/SimplePartialEvaluationTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -61,9 +61,9 @@
         } catch (SourceStackTrace t) {
             // Expected verification error occurred.
             StackTraceElement[] trace = t.getStackTrace();
-            Assert.assertEquals("com.oracle.truffle.api.nodes.RootNode.getFrameDescriptor(RootNode.java)", trace[0].toString());
-            String secondString = trace[1].toString();
-            Assert.assertEquals("com.oracle.graal.truffle.OptimizedCallTarget.callRoot(OptimizedCallTarget.java:" /* "259)" */, secondString.substring(0, secondString.length() - 4));
+            Assert.assertTrue(trace[0].toString().startsWith("com.oracle.truffle.api.CompilerAsserts.neverPartOfCompilation(CompilerAsserts.java:"));
+            Assert.assertTrue(trace[1].toString().startsWith("com.oracle.graal.truffle.test.nodes.NeverPartOfCompilationTestNode.execute(NeverPartOfCompilationTestNode.java:"));
+            Assert.assertTrue(trace[2].toString().startsWith("com.oracle.graal.truffle.test.nodes.RootTestNode.execute(RootTestNode.java:"));
         }
     }
 
@@ -75,6 +75,13 @@
     }
 
     @Test
+    public void twoMergesLoopExplosion() {
+        FrameDescriptor fd = new FrameDescriptor();
+        AbstractTestNode result = new AddTestNode(new TwoMergesExplodedLoopTestNode(5), new ConstantTestNode(37));
+        assertPartialEvalEquals("constant42", new RootTestNode(fd, "nestedLoopExplosion", result));
+    }
+
+    @Test
     public void sequenceConstants() {
         FrameDescriptor fd = new FrameDescriptor();
         AbstractTestNode result = new BlockTestNode(new AbstractTestNode[]{new ConstantTestNode(40), new ConstantTestNode(42)});
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.truffle.test/src/com/oracle/graal/truffle/test/nodes/TwoMergesExplodedLoopTestNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.truffle.test.nodes;
+
+import com.oracle.truffle.api.*;
+import com.oracle.truffle.api.frame.*;
+import com.oracle.truffle.api.nodes.*;
+
+@NodeInfo
+public class TwoMergesExplodedLoopTestNode extends AbstractTestNode {
+
+    static class Flag {
+        boolean flag = true;
+    }
+
+    private final int count;
+
+    public TwoMergesExplodedLoopTestNode(int count) {
+        this.count = count;
+    }
+
+    @ExplodeLoop(merge = false)
+    @Override
+    public int execute(VirtualFrame frame) {
+        Flag flag = new Flag();
+        int result = 0;
+        int i = 0;
+        while (i < count) {
+            i++;
+
+            CompilerAsserts.partialEvaluationConstant(result);
+
+            if (flag.flag) {
+                result++;
+                continue;
+            } else {
+                result--;
+                continue;
+            }
+        }
+        return result;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/CachingPEGraphDecoder.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.truffle;
+
+import java.util.*;
+
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.graphbuilderconf.*;
+import com.oracle.graal.java.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.StructuredGraph.AllowAssumptions;
+import com.oracle.graal.phases.common.*;
+import com.oracle.graal.phases.tiers.*;
+import com.oracle.graal.phases.util.*;
+
+/**
+ * A graph decoder that provides all necessary encoded graphs on-the-fly (by parsing the methods and
+ * encoding the graphs).
+ */
+public class CachingPEGraphDecoder extends PEGraphDecoder {
+
+    private final Providers providers;
+    private final GraphBuilderConfiguration graphBuilderConfig;
+    private final AllowAssumptions allowAssumptions;
+    private final Map<ResolvedJavaMethod, EncodedGraph> graphCache;
+
+    public CachingPEGraphDecoder(Providers providers, GraphBuilderConfiguration graphBuilderConfig, AllowAssumptions allowAssumptions) {
+        super(providers.getMetaAccess(), providers.getConstantReflection(), providers.getStampProvider());
+
+        this.providers = providers;
+        this.graphBuilderConfig = graphBuilderConfig;
+        this.allowAssumptions = allowAssumptions;
+        this.graphCache = new HashMap<>();
+    }
+
+    private EncodedGraph createGraph(ResolvedJavaMethod method) {
+        StructuredGraph graph = new StructuredGraph(method, allowAssumptions);
+        try (Debug.Scope scope = Debug.scope("createGraph", graph)) {
+
+            new GraphBuilderPhase.Instance(providers.getMetaAccess(), providers.getStampProvider(), providers.getConstantReflection(), graphBuilderConfig, TruffleCompilerImpl.Optimizations, null).apply(graph);
+
+            PhaseContext context = new PhaseContext(providers);
+            new CanonicalizerPhase().apply(graph, context);
+
+            EncodedGraph encodedGraph = GraphEncoder.encodeSingleGraph(graph);
+            graphCache.put(method, encodedGraph);
+            return encodedGraph;
+
+        } catch (Throwable ex) {
+            throw Debug.handle(ex);
+        }
+    }
+
+    @Override
+    protected EncodedGraph lookupEncodedGraph(ResolvedJavaMethod method) {
+        EncodedGraph result = graphCache.get(method);
+        if (result == null && method.hasBytecodes()) {
+            result = createGraph(method);
+        }
+        return result;
+    }
+}
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/OptimizedCallTarget.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/OptimizedCallTarget.java	Fri Apr 10 16:58:26 2015 -0700
@@ -163,12 +163,23 @@
     public final Object callDirect(Object... args) {
         compilationProfile.reportDirectCall();
         profileArguments(args);
-        Object result = doInvoke(args);
-        Class<?> klass = profiledReturnType;
-        if (klass != null && CompilerDirectives.inCompiledCode() && profiledReturnTypeAssumption.isValid()) {
-            result = FrameWithoutBoxing.unsafeCast(result, klass, true, true);
+        try {
+            Object result = doInvoke(args);
+            Class<?> klass = profiledReturnType;
+            if (klass != null && CompilerDirectives.inCompiledCode() && profiledReturnTypeAssumption.isValid()) {
+                result = FrameWithoutBoxing.unsafeCast(result, klass, true, true);
+            }
+            return result;
+        } catch (Throwable t) {
+            t = exceptionProfile.profile(t);
+            if (t instanceof RuntimeException) {
+                throw (RuntimeException) t;
+            } else if (t instanceof Error) {
+                throw (Error) t;
+            } else {
+                throw new RuntimeException(t);
+            }
         }
-        return result;
     }
 
     public final Object callInlined(Object... arguments) {
@@ -238,18 +249,7 @@
     }
 
     protected Object doInvoke(Object[] args) {
-        try {
-            return callBoundary(args);
-        } catch (Throwable t) {
-            t = exceptionProfile.profile(t);
-            if (t instanceof RuntimeException) {
-                throw (RuntimeException) t;
-            } else if (t instanceof Error) {
-                throw (Error) t;
-            } else {
-                throw new RuntimeException(t);
-            }
-        }
+        return callBoundary(args);
     }
 
     @TruffleCallBoundary
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/PEGraphDecoder.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,621 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.truffle;
+
+import static com.oracle.graal.compiler.common.GraalInternalError.*;
+import static com.oracle.graal.java.AbstractBytecodeParser.Options.*;
+
+import java.util.*;
+
+import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.debug.*;
+import com.oracle.graal.graph.*;
+import com.oracle.graal.graph.spi.*;
+import com.oracle.graal.graphbuilderconf.*;
+import com.oracle.graal.graphbuilderconf.InlineInvokePlugin.InlineInfo;
+import com.oracle.graal.graphbuilderconf.InvocationPlugins.InvocationPluginReceiver;
+import com.oracle.graal.java.*;
+import com.oracle.graal.nodes.*;
+import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
+import com.oracle.graal.nodes.extended.*;
+import com.oracle.graal.nodes.java.*;
+import com.oracle.graal.nodes.spi.*;
+import com.oracle.graal.nodes.util.*;
+import com.oracle.graal.phases.common.inlining.*;
+
+/**
+ * A graph decoder that performs partial evaluation, i.e., that performs method inlining and
+ * canonicalization/simplification of nodes during decoding.
+ *
+ * Inlining and loop explosion are configured via the plugin mechanism also used by the
+ * {@link GraphBuilderPhase}. However, not all callback methods defined in
+ * {@link GraphBuilderContext} are available since decoding is more limited than graph building.
+ *
+ * The standard {@link Canonicalizable#canonical node canonicalization} interface is used to
+ * canonicalize nodes during decoding. Additionally, {@link IfNode branches} and
+ * {@link IntegerSwitchNode switches} with constant conditions are simplified.
+ */
+public abstract class PEGraphDecoder extends GraphDecoder {
+
+    protected final MetaAccessProvider metaAccess;
+    protected final ConstantReflectionProvider constantReflection;
+    protected final StampProvider stampProvider;
+
+    protected class PEMethodScope extends MethodScope {
+        protected final ResolvedJavaMethod method;
+        protected final Invoke invoke;
+        protected final int inliningDepth;
+
+        protected final LoopExplosionPlugin loopExplosionPlugin;
+        protected final InvocationPlugins invocationPlugins;
+        protected final InlineInvokePlugin inlineInvokePlugin;
+        protected final ParameterPlugin parameterPlugin;
+        protected final ValueNode[] arguments;
+
+        protected FrameState outerFrameState;
+        protected BytecodePosition bytecodePosition;
+
+        protected PEMethodScope(StructuredGraph targetGraph, MethodScope caller, EncodedGraph encodedGraph, ResolvedJavaMethod method, Invoke invoke, int inliningDepth,
+                        LoopExplosionPlugin loopExplosionPlugin, InvocationPlugins invocationPlugins, InlineInvokePlugin inlineInvokePlugin, ParameterPlugin parameterPlugin, ValueNode[] arguments) {
+            super(targetGraph, caller, encodedGraph, loopExplosionKind(method, loopExplosionPlugin));
+
+            this.method = method;
+            this.invoke = invoke;
+            this.inliningDepth = inliningDepth;
+            this.loopExplosionPlugin = loopExplosionPlugin;
+            this.invocationPlugins = invocationPlugins;
+            this.inlineInvokePlugin = inlineInvokePlugin;
+            this.parameterPlugin = parameterPlugin;
+            this.arguments = arguments;
+        }
+    }
+
+    protected class PECanonicalizerTool implements CanonicalizerTool {
+        @Override
+        public MetaAccessProvider getMetaAccess() {
+            return metaAccess;
+        }
+
+        @Override
+        public ConstantReflectionProvider getConstantReflection() {
+            return constantReflection;
+        }
+
+        @Override
+        public boolean canonicalizeReads() {
+            return true;
+        }
+
+        @Override
+        public boolean allUsagesAvailable() {
+            return false;
+        }
+    }
+
+    protected class PENonAppendGraphBuilderContext implements GraphBuilderContext {
+        private final PEMethodScope methodScope;
+
+        public PENonAppendGraphBuilderContext(PEMethodScope methodScope) {
+            this.methodScope = methodScope;
+        }
+
+        @Override
+        public BailoutException bailout(String string) {
+            throw new BailoutException(string);
+        }
+
+        @Override
+        public StampProvider getStampProvider() {
+            return stampProvider;
+        }
+
+        @Override
+        public MetaAccessProvider getMetaAccess() {
+            return metaAccess;
+        }
+
+        @Override
+        public ConstantReflectionProvider getConstantReflection() {
+            return constantReflection;
+        }
+
+        @Override
+        public StructuredGraph getGraph() {
+            return methodScope.graph;
+        }
+
+        @Override
+        public int getDepth() {
+            return methodScope.inliningDepth;
+        }
+
+        @Override
+        public Replacement getReplacement() {
+            return null;
+        }
+
+        @Override
+        public <T extends ValueNode> T append(T value) {
+            throw unimplemented();
+        }
+
+        @Override
+        public <T extends ValueNode> T recursiveAppend(T value) {
+            throw unimplemented();
+        }
+
+        @Override
+        public void push(Kind kind, ValueNode value) {
+            throw unimplemented();
+        }
+
+        @Override
+        public void handleReplacedInvoke(InvokeKind invokeKind, ResolvedJavaMethod targetMethod, ValueNode[] args) {
+            throw unimplemented();
+        }
+
+        @Override
+        public void intrinsify(ResolvedJavaMethod targetMethod, ResolvedJavaMethod substitute, ValueNode[] args) {
+            throw unimplemented();
+        }
+
+        @Override
+        public FrameState createStateAfter() {
+            throw unimplemented();
+        }
+
+        @Override
+        public GraphBuilderContext getParent() {
+            throw unimplemented();
+        }
+
+        @Override
+        public ResolvedJavaMethod getMethod() {
+            throw unimplemented();
+        }
+
+        @Override
+        public int bci() {
+            throw unimplemented();
+        }
+
+        @Override
+        public InvokeKind getInvokeKind() {
+            throw unimplemented();
+        }
+
+        @Override
+        public JavaType getInvokeReturnType() {
+            throw unimplemented();
+        }
+    }
+
+    protected class PEAppendGraphBuilderContext extends PENonAppendGraphBuilderContext {
+        protected final Invoke invoke;
+        protected FixedWithNextNode lastInstr;
+        protected ValueNode pushedNode;
+
+        public PEAppendGraphBuilderContext(PEMethodScope methodScope, Invoke invoke, FixedWithNextNode lastInstr) {
+            super(methodScope);
+            this.invoke = invoke;
+            this.lastInstr = lastInstr;
+        }
+
+        @Override
+        public void push(Kind kind, ValueNode value) {
+            if (pushedNode != null) {
+                throw unimplemented("Only one push is supported");
+            }
+            pushedNode = value;
+        }
+
+        @Override
+        public FrameState createStateAfter() {
+            return invoke.stateAfter().duplicate();
+        }
+
+        @Override
+        public <T extends ValueNode> T append(T v) {
+            if (v.graph() != null) {
+                return v;
+            }
+            T added = getGraph().addOrUnique(v);
+            if (added == v) {
+                updateLastInstruction(v);
+            }
+            return added;
+        }
+
+        @Override
+        public <T extends ValueNode> T recursiveAppend(T v) {
+            if (v.graph() != null) {
+                return v;
+            }
+            T added = getGraph().addOrUniqueWithInputs(v);
+            if (added == v) {
+                updateLastInstruction(v);
+            }
+            return added;
+        }
+
+        private <T extends ValueNode> void updateLastInstruction(T v) {
+            if (v instanceof FixedNode) {
+                FixedNode fixedNode = (FixedNode) v;
+                lastInstr.setNext(fixedNode);
+                if (fixedNode instanceof FixedWithNextNode) {
+                    FixedWithNextNode fixedWithNextNode = (FixedWithNextNode) fixedNode;
+                    assert fixedWithNextNode.next() == null : "cannot append instruction to instruction which isn't end";
+                    lastInstr = fixedWithNextNode;
+                } else {
+                    lastInstr = null;
+                }
+            }
+        }
+    }
+
+    public PEGraphDecoder(MetaAccessProvider metaAccess, ConstantReflectionProvider constantReflection, StampProvider stampProvider) {
+        this.metaAccess = metaAccess;
+        this.constantReflection = constantReflection;
+        this.stampProvider = stampProvider;
+    }
+
+    protected static LoopExplosionKind loopExplosionKind(ResolvedJavaMethod method, LoopExplosionPlugin loopExplosionPlugin) {
+        if (loopExplosionPlugin == null) {
+            return LoopExplosionKind.NONE;
+        } else if (loopExplosionPlugin.shouldMergeExplosions(method)) {
+            return LoopExplosionKind.MERGE_EXPLODE;
+        } else if (loopExplosionPlugin.shouldExplodeLoops(method)) {
+            return LoopExplosionKind.FULL_EXPLODE;
+        } else {
+            return LoopExplosionKind.NONE;
+        }
+    }
+
+    public void decode(StructuredGraph targetGraph, ResolvedJavaMethod method, LoopExplosionPlugin loopExplosionPlugin, InvocationPlugins invocationPlugins, InlineInvokePlugin inlineInvokePlugin,
+                    ParameterPlugin parameterPlugin) {
+        PEMethodScope methodScope = new PEMethodScope(targetGraph, null, lookupEncodedGraph(method), method, null, 0, loopExplosionPlugin, invocationPlugins, inlineInvokePlugin, parameterPlugin, null);
+        decode(methodScope);
+        cleanupGraph(methodScope);
+        methodScope.graph.verify();
+    }
+
+    @Override
+    protected void cleanupGraph(MethodScope methodScope) {
+        GraphBuilderPhase.connectLoopEndToBegin(methodScope.graph);
+        super.cleanupGraph(methodScope);
+    }
+
+    @Override
+    protected void checkLoopExplosionIteration(MethodScope s, LoopScope loopScope) {
+        PEMethodScope methodScope = (PEMethodScope) s;
+
+        Debug.dump(methodScope.graph, "Loop iteration " + loopScope.loopIteration);
+
+        if (loopScope.loopIteration > MaximumLoopExplosionCount.getValue()) {
+            String message = "too many loop explosion iterations - does the explosion not terminate for method " + methodScope.method + "?";
+            if (FailedLoopExplosionIsFatal.getValue()) {
+                throw new RuntimeException(message);
+            } else {
+                throw new BailoutException(message);
+            }
+        }
+    }
+
+    @Override
+    protected void simplifyInvoke(MethodScope s, LoopScope loopScope, int invokeOrderId, Invoke invoke) {
+        if (!(invoke.callTarget() instanceof MethodCallTargetNode)) {
+            return;
+        }
+        PEMethodScope methodScope = (PEMethodScope) s;
+        MethodCallTargetNode callTarget = (MethodCallTargetNode) invoke.callTarget();
+
+        // attempt to devirtualize the call
+        ResolvedJavaType contextType = (invoke.stateAfter() == null && invoke.stateDuring() == null) ? null : invoke.getContextType();
+        ResolvedJavaMethod specialCallTarget = MethodCallTargetNode.findSpecialCallTarget(callTarget.invokeKind(), callTarget.receiver(), callTarget.targetMethod(), contextType);
+        if (specialCallTarget != null) {
+            callTarget.setTargetMethod(specialCallTarget);
+            callTarget.setInvokeKind(InvokeKind.Special);
+        }
+
+        if (tryInvocationPlugin(methodScope, loopScope, invokeOrderId, invoke)) {
+            return;
+        }
+        if (tryInline(methodScope, loopScope, invokeOrderId, invoke)) {
+            return;
+        }
+
+        if (methodScope.inlineInvokePlugin != null) {
+            methodScope.inlineInvokePlugin.notifyOfNoninlinedInvoke(new PENonAppendGraphBuilderContext(methodScope), callTarget.targetMethod(), invoke);
+        }
+    }
+
+    protected boolean tryInvocationPlugin(PEMethodScope methodScope, LoopScope loopScope, int invokeOrderId, Invoke invoke) {
+        if (methodScope.invocationPlugins == null) {
+            return false;
+        }
+
+        MethodCallTargetNode callTarget = (MethodCallTargetNode) invoke.callTarget();
+        ResolvedJavaMethod targetMethod = callTarget.targetMethod();
+        InvocationPlugin invocationPlugin = methodScope.invocationPlugins.lookupInvocation(targetMethod);
+        if (invocationPlugin == null) {
+            return false;
+        }
+
+        ValueNode[] arguments = callTarget.arguments().toArray(new ValueNode[0]);
+        FixedWithNextNode invokePredecessor = (FixedWithNextNode) invoke.asNode().predecessor();
+        FixedNode invokeNext = invoke.next();
+        AbstractBeginNode invokeException = null;
+        if (invoke instanceof InvokeWithExceptionNode) {
+            invokeException = ((InvokeWithExceptionNode) invoke).exceptionEdge();
+        }
+
+        invoke.asNode().replaceAtPredecessor(null);
+
+        PEAppendGraphBuilderContext graphBuilderContext = new PEAppendGraphBuilderContext(methodScope, invoke, invokePredecessor);
+        InvocationPluginReceiver invocationPluginReceiver = new InvocationPluginReceiver(graphBuilderContext);
+        if (invocationPlugin.execute(graphBuilderContext, targetMethod, invocationPluginReceiver.init(targetMethod, arguments), arguments)) {
+
+            if (graphBuilderContext.lastInstr != null) {
+                registerNode(loopScope, invokeOrderId, graphBuilderContext.pushedNode, true, true);
+                invoke.asNode().replaceAtUsages(graphBuilderContext.pushedNode);
+            } else {
+                assert graphBuilderContext.pushedNode == null : "Why push a node when the invoke does not return anyway?";
+                invoke.asNode().replaceAtUsages(null);
+            }
+
+            deleteInvoke(invoke);
+            if (invokeException != null) {
+                invokeException.safeDelete();
+            }
+
+            if (graphBuilderContext.lastInstr != null) {
+                graphBuilderContext.lastInstr.setNext(invokeNext);
+            } else {
+                invokeNext.replaceAtPredecessor(null);
+                invokeNext.safeDelete();
+            }
+            return true;
+
+        } else {
+            /* Restore original state: invoke is in Graph. */
+            invokePredecessor.setNext(invoke.asNode());
+            return false;
+        }
+    }
+
+    protected boolean tryInline(PEMethodScope methodScope, LoopScope loopScope, int invokeOrderId, Invoke invoke) {
+        if (methodScope.inlineInvokePlugin == null || !invoke.getInvokeKind().isDirect()) {
+            return false;
+        }
+
+        MethodCallTargetNode callTarget = (MethodCallTargetNode) invoke.callTarget();
+        ResolvedJavaMethod targetMethod = callTarget.targetMethod();
+        if (!targetMethod.canBeInlined()) {
+            return false;
+        }
+
+        ValueNode[] arguments = callTarget.arguments().toArray(new ValueNode[0]);
+        GraphBuilderContext graphBuilderContext = new PENonAppendGraphBuilderContext(methodScope);
+        InlineInfo inlineInfo = methodScope.inlineInvokePlugin.getInlineInfo(graphBuilderContext, targetMethod, arguments, callTarget.returnType());
+        if (inlineInfo == null) {
+            return false;
+        }
+        assert !inlineInfo.isIntrinsic && !inlineInfo.isReplacement : "not supported";
+
+        ResolvedJavaMethod inlineMethod = inlineInfo.methodToInline;
+        EncodedGraph graphToInline = lookupEncodedGraph(inlineMethod);
+        if (graphToInline == null) {
+            return false;
+        }
+
+        int exceptionObjectOrderId = -1;
+        if (invoke instanceof InvokeWithExceptionNode) {
+            /*
+             * We need to have the regular next node (usually a KillingBeginNode) and the exception
+             * edge node (always an ExceptionObjectNode) fully decoded, because both can be changed
+             * or replaced as part of the inlining process. The GraphEncoder adds these two
+             * successors in a known order (first the regular next node, then the exception edge)
+             * that we can rely on here.
+             */
+            assert ((InvokeWithExceptionNode) invoke).next().next() == null;
+            processNextNode(methodScope, loopScope);
+            assert ((InvokeWithExceptionNode) invoke).next().next() != null;
+
+            assert ((InvokeWithExceptionNode) invoke).exceptionEdge().next() == null;
+            exceptionObjectOrderId = loopScope.nodesToProcess.nextSetBit(0);
+            processNextNode(methodScope, loopScope);
+            assert ((InvokeWithExceptionNode) invoke).exceptionEdge().next() != null;
+        }
+
+        PEMethodScope inlineScope = new PEMethodScope(methodScope.graph, methodScope, graphToInline, inlineMethod, invoke, methodScope.inliningDepth + 1, methodScope.loopExplosionPlugin,
+                        methodScope.invocationPlugins, methodScope.inlineInvokePlugin, null, arguments);
+        /* Do the actual inlining by decoding the inlineMethod */
+        decode(inlineScope);
+
+        ValueNode exceptionValue = null;
+        if (inlineScope.unwindNode != null) {
+            exceptionValue = inlineScope.unwindNode.exception();
+        }
+
+        FixedNode firstInlinedNode = inlineScope.startNode.next();
+        /* The StartNode was only necessary as a placeholder during decoding. */
+        inlineScope.startNode.safeDelete();
+
+        assert inlineScope.startNode.stateAfter() == null;
+        ValueNode returnValue = InliningUtil.finishInlining(invoke, methodScope.graph, firstInlinedNode, inlineScope.returnNodes, inlineScope.unwindNode, inlineScope.encodedGraph.getAssumptions(),
+                        inlineScope.encodedGraph.getInlinedMethods(), null);
+
+        /*
+         * Usage the handles that we have on the return value and the exception to update the
+         * orderId->Node table.
+         */
+        registerNode(loopScope, invokeOrderId, returnValue, true, true);
+        if (invoke instanceof InvokeWithExceptionNode) {
+            registerNode(loopScope, exceptionObjectOrderId, exceptionValue, true, true);
+        }
+        deleteInvoke(invoke);
+
+        methodScope.inlineInvokePlugin.postInline(inlineMethod);
+        return true;
+    }
+
+    private static void deleteInvoke(Invoke invoke) {
+        /*
+         * Clean up unused nodes. We cannot just call killCFG on the invoke node because that can
+         * kill too much: nodes that are decoded later can use values that appear unused by now.
+         */
+        FrameState frameState = invoke.stateAfter();
+        invoke.asNode().safeDelete();
+        invoke.callTarget().safeDelete();
+        if (frameState != null && frameState.hasNoUsages()) {
+            frameState.safeDelete();
+        }
+    }
+
+    protected abstract EncodedGraph lookupEncodedGraph(ResolvedJavaMethod method);
+
+    @Override
+    protected void simplifyFixedNode(MethodScope s, LoopScope loopScope, int nodeOrderId, FixedNode node) {
+        PEMethodScope methodScope = (PEMethodScope) s;
+
+        if (node instanceof IfNode && ((IfNode) node).condition() instanceof LogicConstantNode) {
+            IfNode ifNode = (IfNode) node;
+            boolean condition = ((LogicConstantNode) ifNode.condition()).getValue();
+            AbstractBeginNode survivingSuccessor = ifNode.getSuccessor(condition);
+            AbstractBeginNode deadSuccessor = ifNode.getSuccessor(!condition);
+
+            methodScope.graph.removeSplit(ifNode, survivingSuccessor);
+            assert deadSuccessor.next() == null : "must not be parsed yet";
+            deadSuccessor.safeDelete();
+
+        } else if (node instanceof IntegerSwitchNode && ((IntegerSwitchNode) node).value().isConstant()) {
+            IntegerSwitchNode switchNode = (IntegerSwitchNode) node;
+            int value = switchNode.value().asJavaConstant().asInt();
+            AbstractBeginNode survivingSuccessor = switchNode.successorAtKey(value);
+            List<Node> allSuccessors = switchNode.successors().snapshot();
+
+            methodScope.graph.removeSplit(switchNode, survivingSuccessor);
+            for (Node successor : allSuccessors) {
+                if (successor != survivingSuccessor) {
+                    assert ((AbstractBeginNode) successor).next() == null : "must not be parsed yet";
+                    successor.safeDelete();
+                }
+            }
+
+        } else if (node instanceof Canonicalizable) {
+            Node canonical = ((Canonicalizable) node).canonical(new PECanonicalizerTool());
+            if (canonical == null) {
+                /*
+                 * This is a possible return value of canonicalization. However, we might need to
+                 * add additional usages later on for which we need a node. Therefore, we just do
+                 * nothing and leave the node in place.
+                 */
+            } else if (canonical != node) {
+                if (!canonical.isAlive()) {
+                    assert !canonical.isDeleted();
+                    canonical = methodScope.graph.addOrUniqueWithInputs(canonical);
+                    if (canonical instanceof FixedWithNextNode) {
+                        methodScope.graph.addBeforeFixed(node, (FixedWithNextNode) canonical);
+                    }
+                }
+                GraphUtil.unlinkFixedNode((FixedWithNextNode) node);
+                node.replaceAtUsages(canonical);
+                node.safeDelete();
+                assert lookupNode(loopScope, nodeOrderId) == node;
+                registerNode(loopScope, nodeOrderId, canonical, true, false);
+            }
+        }
+    }
+
+    @Override
+    protected Node handleFloatingNodeBeforeAdd(MethodScope s, LoopScope loopScope, Node node) {
+        PEMethodScope methodScope = (PEMethodScope) s;
+
+        if (node instanceof ParameterNode) {
+            if (methodScope.arguments != null) {
+                Node result = methodScope.arguments[((ParameterNode) node).index()];
+                assert result != null;
+                return result;
+
+            } else if (methodScope.parameterPlugin != null) {
+                GraphBuilderContext graphBuilderContext = new PENonAppendGraphBuilderContext(methodScope);
+                Node result = methodScope.parameterPlugin.interceptParameter(graphBuilderContext, ((ParameterNode) node).index(), ((ParameterNode) node).stamp());
+                if (result != null) {
+                    return result;
+                }
+            }
+
+        } else if (node instanceof Canonicalizable) {
+            Node canonical = ((Canonicalizable) node).canonical(new PECanonicalizerTool());
+            if (canonical == null) {
+                /*
+                 * This is a possible return value of canonicalization. However, we might need to
+                 * add additional usages later on for which we need a node. Therefore, we just do
+                 * nothing and leave the node in place.
+                 */
+            } else if (canonical != node) {
+                if (!canonical.isAlive()) {
+                    assert !canonical.isDeleted();
+                    canonical = methodScope.graph.addOrUniqueWithInputs(canonical);
+                }
+                assert node.hasNoUsages();
+                // methodScope.graph.replaceFloating((FloatingNode) node, canonical);
+                return canonical;
+            }
+        }
+        return node;
+    }
+
+    @Override
+    protected Node handleFloatingNodeAfterAdd(MethodScope s, LoopScope loopScope, Node node) {
+        PEMethodScope methodScope = (PEMethodScope) s;
+
+        if (methodScope.isInlinedMethod()) {
+            if (node instanceof SimpleInfopointNode) {
+                methodScope.bytecodePosition = InliningUtil.processSimpleInfopoint(methodScope.invoke, (SimpleInfopointNode) node, methodScope.bytecodePosition);
+                return node;
+
+            } else if (node instanceof FrameState) {
+                FrameState stateAtExceptionEdge = null;
+                if (methodScope.invoke instanceof InvokeWithExceptionNode) {
+                    InvokeWithExceptionNode invokeWithException = ((InvokeWithExceptionNode) methodScope.invoke);
+                    ExceptionObjectNode obj = (ExceptionObjectNode) invokeWithException.exceptionEdge();
+                    stateAtExceptionEdge = obj.stateAfter();
+                }
+                if (methodScope.outerFrameState == null) {
+                    FrameState stateAtReturn = methodScope.invoke.stateAfter();
+                    Kind invokeReturnKind = methodScope.invoke.asNode().getKind();
+                    methodScope.outerFrameState = stateAtReturn.duplicateModifiedDuringCall(methodScope.invoke.bci(), invokeReturnKind);
+                }
+                return InliningUtil.processFrameState((FrameState) node, methodScope.invoke, methodScope.method, stateAtExceptionEdge, methodScope.outerFrameState, true);
+
+            } else if (node instanceof MonitorIdNode) {
+                InliningUtil.processMonitorId(methodScope.invoke, (MonitorIdNode) node);
+                return node;
+            }
+        }
+
+        return node;
+    }
+}
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/PartialEvaluator.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/PartialEvaluator.java	Fri Apr 10 16:58:26 2015 -0700
@@ -22,8 +22,11 @@
  */
 package com.oracle.graal.truffle;
 
+import static com.oracle.graal.compiler.common.GraalOptions.*;
+import static com.oracle.graal.java.AbstractBytecodeParser.Options.*;
 import static com.oracle.graal.truffle.TruffleCompilerOptions.*;
 
+import java.lang.invoke.*;
 import java.util.*;
 
 import com.oracle.graal.api.meta.*;
@@ -41,6 +44,7 @@
 import com.oracle.graal.nodes.java.*;
 import com.oracle.graal.nodes.spi.*;
 import com.oracle.graal.nodes.virtual.*;
+import com.oracle.graal.options.*;
 import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
 import com.oracle.graal.phases.common.inlining.*;
@@ -52,6 +56,7 @@
 import com.oracle.graal.truffle.nodes.frame.*;
 import com.oracle.graal.truffle.nodes.frame.NewFrameNode.VirtualOnlyInstanceNode;
 import com.oracle.graal.truffle.phases.*;
+import com.oracle.graal.truffle.substitutions.*;
 import com.oracle.graal.virtual.phases.ea.*;
 import com.oracle.truffle.api.*;
 import com.oracle.truffle.api.CompilerDirectives.TruffleBoundary;
@@ -62,6 +67,9 @@
  */
 public class PartialEvaluator {
 
+    @Option(help = "New partial evaluation on Graal graphs", type = OptionType.Expert)//
+    public static final StableOptionValue<Boolean> GraphPE = new StableOptionValue<>(false);
+
     private final Providers providers;
     private final CanonicalizerPhase canonicalizer;
     private final SnippetReflectionProvider snippetReflection;
@@ -156,17 +164,44 @@
 
     private class PEInlineInvokePlugin implements InlineInvokePlugin {
 
+        private final boolean duringParsing;
         private Deque<TruffleInlining> inlining;
         private OptimizedDirectCallNode lastDirectCallNode;
         private final Replacements replacements;
 
-        public PEInlineInvokePlugin(TruffleInlining inlining, Replacements replacements) {
+        private final InvocationPlugins invocationPlugins;
+        private final LoopExplosionPlugin loopExplosionPlugin;
+
+        public PEInlineInvokePlugin(TruffleInlining inlining, Replacements replacements, boolean duringParsing, InvocationPlugins invocationPlugins, LoopExplosionPlugin loopExplosionPlugin) {
             this.inlining = new ArrayDeque<>();
             this.inlining.push(inlining);
             this.replacements = replacements;
+            this.duringParsing = duringParsing;
+            this.invocationPlugins = invocationPlugins;
+            this.loopExplosionPlugin = loopExplosionPlugin;
+        }
+
+        private boolean hasMethodHandleArgument(ValueNode[] arguments) {
+            /*
+             * We want to process invokes that have a constant MethodHandle parameter. And the
+             * method must be statically bound, otherwise we do not have a single target method.
+             */
+            for (ValueNode argument : arguments) {
+                if (argument.isConstant()) {
+                    JavaConstant constant = argument.asJavaConstant();
+                    if (constant.getKind() == Kind.Object && snippetReflection.asObject(MethodHandle.class, constant) != null) {
+                        return true;
+                    }
+                }
+            }
+            return false;
         }
 
         public InlineInfo getInlineInfo(GraphBuilderContext builder, ResolvedJavaMethod original, ValueNode[] arguments, JavaType returnType) {
+            if (duringParsing && (invocationPlugins.lookupInvocation(original) != null || loopExplosionPlugin.shouldExplodeLoops(original))) {
+                return null;
+            }
+
             if (original.getAnnotation(TruffleBoundary.class) != null) {
                 return null;
             }
@@ -176,17 +211,23 @@
             assert !builder.parsingReplacement();
             if (TruffleCompilerOptions.TruffleFunctionInlining.getValue()) {
                 if (original.equals(callSiteProxyMethod)) {
+                    if (duringParsing) {
+                        return null;
+                    }
                     ValueNode arg1 = arguments[0];
                     if (!arg1.isConstant()) {
                         GraalInternalError.shouldNotReachHere("The direct call node does not resolve to a constant!");
                     }
 
-                    Object callNode = builder.getSnippetReflection().asObject(Object.class, (JavaConstant) arg1.asConstant());
+                    Object callNode = snippetReflection.asObject(Object.class, (JavaConstant) arg1.asConstant());
                     if (callNode instanceof OptimizedDirectCallNode) {
                         OptimizedDirectCallNode directCallNode = (OptimizedDirectCallNode) callNode;
                         lastDirectCallNode = directCallNode;
                     }
                 } else if (original.equals(callDirectMethod)) {
+                    if (duringParsing) {
+                        return null;
+                    }
                     TruffleInliningDecision decision = getDecision(inlining.peek(), lastDirectCallNode);
                     lastDirectCallNode = null;
                     if (decision != null && decision.isInline()) {
@@ -196,6 +237,11 @@
                     }
                 }
             }
+
+            if (duringParsing && (!original.hasBytecodes() || original.getCode().length >= TrivialInliningSize.getValue() || builder.getDepth() >= InlineDuringParsingMaxDepth.getValue()) &&
+                            !hasMethodHandleArgument(arguments)) {
+                return null;
+            }
             return new InlineInfo(original, false, false);
         }
 
@@ -222,28 +268,72 @@
 
     }
 
-    @SuppressWarnings("unused")
-    private void fastPartialEvaluation(OptimizedCallTarget callTarget, StructuredGraph graph, PhaseContext baseContext, HighTierContext tierContext) {
+    protected void doFastPE(OptimizedCallTarget callTarget, StructuredGraph graph) {
         GraphBuilderConfiguration newConfig = configForRoot.copy();
+        InvocationPlugins invocationPlugins = newConfig.getPlugins().getInvocationPlugins();
+        TruffleGraphBuilderPlugins.registerInvocationPlugins(providers.getMetaAccess(), invocationPlugins, false, snippetReflection);
+
         newConfig.setUseProfiling(false);
         Plugins plugins = newConfig.getPlugins();
         plugins.setLoadFieldPlugin(new InterceptLoadFieldPlugin());
         plugins.setParameterPlugin(new InterceptReceiverPlugin(callTarget));
         callTarget.setInlining(new TruffleInlining(callTarget, new DefaultInliningPolicy()));
-        InlineInvokePlugin inlinePlugin = new PEInlineInvokePlugin(callTarget.getInlining(), providers.getReplacements());
+
+        InlineInvokePlugin inlinePlugin = new PEInlineInvokePlugin(callTarget.getInlining(), providers.getReplacements(), false, null, null);
         if (PrintTruffleExpansionHistogram.getValue()) {
             inlinePlugin = new HistogramInlineInvokePlugin(graph, inlinePlugin);
         }
         plugins.setInlineInvokePlugin(inlinePlugin);
         plugins.setLoopExplosionPlugin(new PELoopExplosionPlugin());
-        InvocationPlugins invocationPlugins = newConfig.getPlugins().getInvocationPlugins();
-        new GraphBuilderPhase.Instance(providers.getMetaAccess(), providers.getStampProvider(), this.snippetReflection, providers.getConstantReflection(), newConfig,
-                        TruffleCompilerImpl.Optimizations, null).apply(graph);
+        new GraphBuilderPhase.Instance(providers.getMetaAccess(), providers.getStampProvider(), providers.getConstantReflection(), newConfig, TruffleCompilerImpl.Optimizations, null).apply(graph);
         if (PrintTruffleExpansionHistogram.getValue()) {
             ((HistogramInlineInvokePlugin) inlinePlugin).print(callTarget, System.out);
         }
+    }
+
+    protected void doGraphPE(OptimizedCallTarget callTarget, StructuredGraph graph) {
+        GraphBuilderConfiguration newConfig = configForRoot.copy();
+        InvocationPlugins parsingInvocationPlugins = newConfig.getPlugins().getInvocationPlugins();
+        TruffleGraphBuilderPlugins.registerInvocationPlugins(providers.getMetaAccess(), parsingInvocationPlugins, true, snippetReflection);
+
+        callTarget.setInlining(new TruffleInlining(callTarget, new DefaultInliningPolicy()));
+
+        LoopExplosionPlugin loopExplosionPlugin = new PELoopExplosionPlugin();
+
+        newConfig.setUseProfiling(false);
+        Plugins plugins = newConfig.getPlugins();
+        plugins.setLoadFieldPlugin(new InterceptLoadFieldPlugin());
+        plugins.setInlineInvokePlugin(new PEInlineInvokePlugin(callTarget.getInlining(), providers.getReplacements(), true, parsingInvocationPlugins, loopExplosionPlugin));
+
+        CachingPEGraphDecoder decoder = new CachingPEGraphDecoder(providers, newConfig, AllowAssumptions.from(graph.getAssumptions() != null));
+
+        ParameterPlugin parameterPlugin = new InterceptReceiverPlugin(callTarget);
+
+        InvocationPlugins decodingInvocationPlugins = new InvocationPlugins(providers.getMetaAccess());
+        TruffleGraphBuilderPlugins.registerInvocationPlugins(providers.getMetaAccess(), decodingInvocationPlugins, false, snippetReflection);
+        InlineInvokePlugin decodingInlinePlugin = new PEInlineInvokePlugin(callTarget.getInlining(), providers.getReplacements(), false, decodingInvocationPlugins, loopExplosionPlugin);
+        if (PrintTruffleExpansionHistogram.getValue()) {
+            decodingInlinePlugin = new HistogramInlineInvokePlugin(graph, decodingInlinePlugin);
+        }
+
+        decoder.decode(graph, graph.method(), loopExplosionPlugin, decodingInvocationPlugins, decodingInlinePlugin, parameterPlugin);
+
+        if (PrintTruffleExpansionHistogram.getValue()) {
+            ((HistogramInlineInvokePlugin) decodingInlinePlugin).print(callTarget, System.out);
+        }
+    }
+
+    @SuppressWarnings("unused")
+    private void fastPartialEvaluation(OptimizedCallTarget callTarget, StructuredGraph graph, PhaseContext baseContext, HighTierContext tierContext) {
+        if (GraphPE.getValue()) {
+            doGraphPE(callTarget, graph);
+        } else {
+            doFastPE(callTarget, graph);
+        }
         Debug.dump(graph, "After FastPE");
 
+        graph.maybeCompress();
+
         // Perform deoptimize to guard conversion.
         new ConvertDeoptimizeToGuardPhase().apply(graph, tierContext);
 
@@ -268,6 +358,44 @@
 
         // recompute loop frequencies now that BranchProbabilities have had time to canonicalize
         ComputeLoopFrequenciesClosure.compute(graph);
+
+        graph.maybeCompress();
+
+        if (TruffleCompilerOptions.TraceTrufflePerformanceWarnings.getValue()) {
+            reportPerformanceWarnings(graph);
+        }
+    }
+
+    private static void reportPerformanceWarnings(StructuredGraph graph) {
+        ArrayList<ValueNode> warnings = new ArrayList<>();
+        for (MethodCallTargetNode call : graph.getNodes(MethodCallTargetNode.TYPE)) {
+            if (call.targetMethod().getAnnotation(TruffleBoundary.class) == null && call.targetMethod().getAnnotation(TruffleCallBoundary.class) == null) {
+                TracePerformanceWarningsListener.logPerformanceWarning(String.format("not inlined %s call to %s (%s)", call.invokeKind(), call.targetMethod(), call), null);
+                warnings.add(call);
+            }
+        }
+
+        for (CheckCastNode cast : graph.getNodes().filter(CheckCastNode.class)) {
+            if (cast.type().findLeafConcreteSubtype() == null) {
+                TracePerformanceWarningsListener.logPerformanceWarning(String.format("non-leaf type checkcast: %s (%s)", cast.type().getName(), cast), null);
+                warnings.add(cast);
+            }
+        }
+
+        for (InstanceOfNode instanceOf : graph.getNodes().filter(InstanceOfNode.class)) {
+            if (instanceOf.type().findLeafConcreteSubtype() == null) {
+                TracePerformanceWarningsListener.logPerformanceWarning(String.format("non-leaf type instanceof: %s (%s)", instanceOf.type().getName(), instanceOf), null);
+                warnings.add(instanceOf);
+            }
+        }
+
+        if (Debug.isEnabled() && !warnings.isEmpty()) {
+            try (Scope s = Debug.scope("TrufflePerformanceWarnings", graph)) {
+                Debug.dump(graph, "performance warnings %s", warnings);
+            } catch (Throwable t) {
+                Debug.handle(t);
+            }
+        }
     }
 
     public StructuredGraph createRootGraph(StructuredGraph graph) {
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCompilerImpl.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCompilerImpl.java	Fri Apr 10 16:58:26 2015 -0700
@@ -37,7 +37,7 @@
 import com.oracle.graal.debug.*;
 import com.oracle.graal.debug.Debug.Scope;
 import com.oracle.graal.graphbuilderconf.*;
-import com.oracle.graal.graphbuilderconf.GraphBuilderConfiguration.*;
+import com.oracle.graal.graphbuilderconf.GraphBuilderConfiguration.Plugins;
 import com.oracle.graal.java.*;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.phases.*;
@@ -49,7 +49,6 @@
 import com.oracle.graal.printer.*;
 import com.oracle.graal.runtime.*;
 import com.oracle.graal.truffle.nodes.*;
-import com.oracle.graal.truffle.substitutions.*;
 import com.oracle.truffle.api.*;
 import com.oracle.truffle.api.nodes.*;
 
@@ -77,7 +76,7 @@
         StringIndexOutOfBoundsException.class,
         ClassCastException.class
     };
-    // @formatter:off
+    // @formatter:on
 
     public static final OptimisticOptimizations Optimizations = OptimisticOptimizations.ALL.remove(OptimisticOptimizations.Optimization.UseExceptionProbability,
                     OptimisticOptimizations.Optimization.RemoveNeverExecutedCode, OptimisticOptimizations.Optimization.UseTypeCheckedInlining, OptimisticOptimizations.Optimization.UseTypeCheckHints);
@@ -96,11 +95,14 @@
         ResolvedJavaType[] skippedExceptionTypes = getSkippedExceptionTypes(providers.getMetaAccess());
 
         GraphBuilderPhase phase = (GraphBuilderPhase) backend.getSuites().getDefaultGraphBuilderSuite().findPhase(GraphBuilderPhase.class).previous();
-        InvocationPlugins invocationPlugins = new InvocationPlugins(phase.getGraphBuilderConfig().getPlugins().getInvocationPlugins());
+        Plugins graalPlugins = phase.getGraphBuilderConfig().getPlugins();
+        InvocationPlugins invocationPlugins = new InvocationPlugins(graalPlugins.getInvocationPlugins());
         Plugins plugins = new Plugins(invocationPlugins);
-        TruffleGraphBuilderPlugins.registerInvocationPlugins(providers.getMetaAccess(), invocationPlugins);
-
         this.config = GraphBuilderConfiguration.getDefault(plugins).withSkippedExceptionTypes(skippedExceptionTypes);
+        // Since invocationPlugins may include MethodSubstitutionPlugins, we
+        // need to copy the GenericInvocationPlugins so that @NodeIntrinsics
+        // and Word methods in method substitutions are handled correctly.
+        plugins.setGenericInvocationPlugin(graalPlugins.getGenericInvocationPlugin());
 
         this.partialEvaluator = new PartialEvaluator(providers, config, Graal.getRequiredCapability(SnippetReflectionProvider.class));
 
@@ -163,8 +165,8 @@
             CodeCacheProvider codeCache = providers.getCodeCache();
             CallingConvention cc = getCallingConvention(codeCache, Type.JavaCallee, graph.method(), false);
             CompilationResult compilationResult = new CompilationResult(name);
-            result = compileGraph(graph, cc, graph.method(), providers, backend, codeCache.getTarget(), graphBuilderSuite, Optimizations, getProfilingInfo(graph), speculationLog, suites,
-                            lirSuites, compilationResult, CompilationResultBuilderFactory.Default);
+            result = compileGraph(graph, cc, graph.method(), providers, backend, codeCache.getTarget(), graphBuilderSuite, Optimizations, getProfilingInfo(graph), speculationLog, suites, lirSuites,
+                            compilationResult, CompilationResultBuilderFactory.Default);
         } catch (Throwable e) {
             throw Debug.handle(e);
         }
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCompilerOptions.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleCompilerOptions.java	Fri Apr 10 16:58:26 2015 -0700
@@ -45,7 +45,7 @@
     public static final OptionValue<String> TruffleCompileOnly = new OptionValue<>(null);
 
     @Option(help = "Exclude assertion code from Truffle compilations", type = OptionType.Debug)
-    public static final StableOptionValue<Boolean> TruffleExcludeAssertions = new StableOptionValue<>(true);
+    public static final OptionValue<Boolean> TruffleExcludeAssertions = new StableOptionValue<>(true);
 
     @Option(help = "Compile call target when call count exceeds this threshold", type = OptionType.User)
     public static final OptionValue<Integer> TruffleCompilationThreshold = new OptionValue<>(1000);
@@ -65,21 +65,12 @@
     @Option(help = "Enable automatic inlining of call targets", type = OptionType.Debug)
     public static final OptionValue<Boolean> TruffleFunctionInlining = new OptionValue<>(true);
 
-    @Option(help = "Enable an expansion cache per CallTarget. Only functionable with TruffleContextSensitiveInlining enabled.", type = OptionType.Debug)
-    public static final OptionValue<Boolean> TruffleFunctionInliningCache = new OptionValue<>(true);
-
-    @Option(help = "Maximum number of Graal IR nodes during partial evaluation", type = OptionType.Expert)
-    public static final OptionValue<Integer> TruffleGraphMaxNodes = new OptionValue<>(200000);
-
     @Option(help = "Stop inlining if caller's cumulative tree size would exceed this limit", type = OptionType.Expert)
     public static final OptionValue<Integer> TruffleInliningMaxCallerSize = new OptionValue<>(2250);
 
     @Option(help = "Maximum level of recursive inlining", type = OptionType.Expert)
     public static final OptionValue<Integer> TruffleMaximumRecursiveInlining = new OptionValue<>(4);
 
-    @Option(help = "Defines the number of graal nodes that triggers a performance warning.", type = OptionType.Debug)
-    public static final OptionValue<Integer> TrufflePerformanceWarningGraalNodeCount = new OptionValue<>(1000);
-
     @Option(help = "Enable call target splitting", type = OptionType.Expert)
     public static final OptionValue<Boolean> TruffleSplitting = new OptionValue<>(true);
 
@@ -107,32 +98,23 @@
     @Option(help = "Disable call target splitting if tree size exceeds this limit", type = OptionType.Debug)
     public static final OptionValue<Integer> TruffleSplittingMaxCalleeSize = new OptionValue<>(100);
 
-    @Option(help = "Number of most recently used methods in truffle cache", type = OptionType.Debug)
-    public static final OptionValue<Integer> TruffleMaxCompilationCacheSize = new OptionValue<>(512);
-
     @Option(help = "Enable asynchronous truffle compilation in background thread", type = OptionType.Expert)
     public static final OptionValue<Boolean> TruffleBackgroundCompilation = new OptionValue<>(true);
 
     @Option(help = "Manually set the number of compiler threads", type = OptionType.Expert)
-    public static final StableOptionValue<Integer> TruffleCompilerThreads = new StableOptionValue<>(0);
+    public static final OptionValue<Integer> TruffleCompilerThreads = new StableOptionValue<>(0);
 
     @Option(help = "Enable inlining across Truffle boundary", type = OptionType.Expert)
     public static final OptionValue<Boolean> TruffleInlineAcrossTruffleBoundary = new OptionValue<>(false);
 
     @Option(help = "", type = OptionType.Debug)
-    public static final OptionValue<Integer> TruffleCompilationDecisionTime = new OptionValue<>(100);
-
-    @Option(help = "", type = OptionType.Debug)
-    public static final OptionValue<Boolean> TruffleCompilationDecisionTimePrintFail = new OptionValue<>(false);
-
-    @Option(help = "", type = OptionType.Debug)
     public static final OptionValue<Boolean> TruffleReturnTypeSpeculation = new StableOptionValue<>(true);
 
     @Option(help = "", type = OptionType.Debug)
     public static final OptionValue<Boolean> TruffleArgumentTypeSpeculation = new StableOptionValue<>(true);
 
     @Option(help = "", type = OptionType.Debug)
-    public static final StableOptionValue<Boolean> TruffleUseFrameWithoutBoxing = new StableOptionValue<>(true);
+    public static final OptionValue<Boolean> TruffleUseFrameWithoutBoxing = new StableOptionValue<>(true);
 
     // tracing
     @Option(help = "Print potential performance problems", type = OptionType.Debug)
@@ -153,18 +135,12 @@
     @Option(help = "Print the inlined call tree for each compiled method", type = OptionType.Debug)
     public static final OptionValue<Boolean> TraceTruffleCompilationCallTree = new OptionValue<>(false);
 
-    @Option(help = "Print the expansion trees for each compilation", type = OptionType.Debug)
-    public static final OptionValue<Boolean> TraceTruffleExpansion = new OptionValue<>(false);
-
     @Option(help = "Print source secions for printed expansion trees", type = OptionType.Debug)
     public static final OptionValue<Boolean> TraceTruffleExpansionSource = new OptionValue<>(false);
 
     @Option(help = "Prints a histogram of all expanded Java methods.", type = OptionType.Debug)
     public static final OptionValue<Boolean> PrintTruffleExpansionHistogram = new OptionValue<>(false);
 
-    @Option(help = "Print detailed information for the Truffle compilation cache", type = OptionType.Debug)
-    public static final OptionValue<Boolean> TraceTruffleCacheDetails = new OptionValue<>(false);
-
     @Option(help = "Treat compilation exceptions as fatal exceptions that will exit the application", type = OptionType.Debug)
     public static final OptionValue<Boolean> TruffleCompilationExceptionsAreFatal = new OptionValue<>(false);
 
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleConstantReflectionProvider.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/TruffleConstantReflectionProvider.java	Fri Apr 10 16:58:26 2015 -0700
@@ -58,14 +58,15 @@
             JavaType fieldType = field.getType();
             if (field.isFinal() || field.getAnnotation(CompilationFinal.class) != null ||
                             (fieldType.getKind() == Kind.Object && (field.getAnnotation(Child.class) != null || field.getAnnotation(Children.class) != null))) {
-                JavaConstant constant = graalConstantReflection.readFieldValue(field, receiver);
-                assert verifyFieldValue(field, constant);
-                if (constant.isNonNull() && fieldType.getKind() == Kind.Object && fieldType instanceof ResolvedJavaType && ((ResolvedJavaType) fieldType).isArray() &&
+                final JavaConstant constant;
+                if (fieldType.getKind() == Kind.Object && fieldType instanceof ResolvedJavaType && ((ResolvedJavaType) fieldType).isArray() &&
                                 (field.getAnnotation(CompilationFinal.class) != null || field.getAnnotation(Children.class) != null)) {
-                    return graalConstantReflection.readStableFieldValue(field, receiver, true);
+                    constant = graalConstantReflection.readStableFieldValue(field, receiver, true);
                 } else {
-                    return graalConstantReflection.readFieldValue(field, receiver);
+                    constant = graalConstantReflection.readFieldValue(field, receiver);
                 }
+                assert verifyFieldValue(field, constant);
+                return constant;
             }
         } else if (field.isStatic()) {
             if (field.getAnnotation(CompilationFinal.class) != null) {
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerAddExactNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.truffle.nodes.arithmetic;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.type.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.graph.spi.*;
-import com.oracle.graal.nodeinfo.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.calc.*;
-import com.oracle.graal.nodes.spi.*;
-import com.oracle.truffle.api.*;
-
-/**
- * Node representing an exact integer addition that will throw an {@link ArithmeticException} in
- * case the addition would overflow the 32 bit range.
- */
-@NodeInfo
-public final class IntegerAddExactNode extends AddNode implements IntegerExactArithmeticNode {
-    public static final NodeClass<IntegerAddExactNode> TYPE = NodeClass.create(IntegerAddExactNode.class);
-
-    public IntegerAddExactNode(ValueNode x, ValueNode y) {
-        super(TYPE, x, y);
-        assert x.stamp().isCompatible(y.stamp()) && x.stamp() instanceof IntegerStamp;
-    }
-
-    @Override
-    public boolean inferStamp() {
-        // TODO Should probably use a specialized version which understands that it can't overflow
-        return super.inferStamp();
-    }
-
-    @Override
-    public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
-        ValueNode result = findSynonym(forX, forY);
-        if (result == null) {
-            return this;
-        } else {
-            return result;
-        }
-    }
-
-    private static ValueNode findSynonym(ValueNode forX, ValueNode forY) {
-        if (forX.isConstant() && !forY.isConstant()) {
-            return new IntegerAddExactNode(forY, forX);
-        }
-        if (forX.isConstant()) {
-            ConstantNode constantNode = canonicalXconstant(forX, forY);
-            if (constantNode != null) {
-                return constantNode;
-            }
-        } else if (forY.isConstant()) {
-            long c = forY.asJavaConstant().asLong();
-            if (c == 0) {
-                return forX;
-            }
-        }
-        return null;
-    }
-
-    private static ConstantNode canonicalXconstant(ValueNode forX, ValueNode forY) {
-        JavaConstant xConst = forX.asJavaConstant();
-        JavaConstant yConst = forY.asJavaConstant();
-        if (xConst != null && yConst != null) {
-            assert xConst.getKind() == yConst.getKind();
-            try {
-                if (xConst.getKind() == Kind.Int) {
-                    return ConstantNode.forInt(ExactMath.addExact(xConst.asInt(), yConst.asInt()));
-                } else {
-                    assert xConst.getKind() == Kind.Long;
-                    return ConstantNode.forLong(ExactMath.addExact(xConst.asLong(), yConst.asLong()));
-                }
-            } catch (ArithmeticException ex) {
-                // The operation will result in an overflow exception, so do not canonicalize.
-            }
-        }
-        return null;
-    }
-
-    @Override
-    public IntegerExactArithmeticSplitNode createSplit(AbstractBeginNode next, AbstractBeginNode deopt) {
-        return graph().add(new IntegerAddExactSplitNode(stamp(), getX(), getY(), next, deopt));
-    }
-
-    @Override
-    public void lower(LoweringTool tool) {
-        IntegerExactArithmeticSplitNode.lower(tool, this);
-    }
-}
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerAddExactSplitNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.truffle.nodes.arithmetic;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.type.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.nodeinfo.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.spi.*;
-
-@NodeInfo
-public final class IntegerAddExactSplitNode extends IntegerExactArithmeticSplitNode {
-    public static final NodeClass<IntegerAddExactSplitNode> TYPE = NodeClass.create(IntegerAddExactSplitNode.class);
-
-    public IntegerAddExactSplitNode(Stamp stamp, ValueNode x, ValueNode y, AbstractBeginNode next, AbstractBeginNode overflowSuccessor) {
-        super(TYPE, stamp, x, y, next, overflowSuccessor);
-    }
-
-    @Override
-    protected Value generateArithmetic(NodeLIRBuilderTool gen) {
-        return gen.getLIRGeneratorTool().emitAdd(gen.operand(getX()), gen.operand(getY()), true);
-    }
-}
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerExactArithmeticNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.truffle.nodes.arithmetic;
-
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.spi.*;
-
-interface IntegerExactArithmeticNode extends Lowerable {
-
-    IntegerExactArithmeticSplitNode createSplit(AbstractBeginNode next, AbstractBeginNode deopt);
-}
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerExactArithmeticSplitNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.truffle.nodes.arithmetic;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.type.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.nodeinfo.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.calc.*;
-import com.oracle.graal.nodes.spi.*;
-
-@NodeInfo
-public abstract class IntegerExactArithmeticSplitNode extends ControlSplitNode implements LIRLowerable {
-    public static final NodeClass<IntegerExactArithmeticSplitNode> TYPE = NodeClass.create(IntegerExactArithmeticSplitNode.class);
-
-    @Successor AbstractBeginNode next;
-    @Successor AbstractBeginNode overflowSuccessor;
-    @Input ValueNode x;
-    @Input ValueNode y;
-
-    protected IntegerExactArithmeticSplitNode(NodeClass<? extends IntegerExactArithmeticSplitNode> c, Stamp stamp, ValueNode x, ValueNode y, AbstractBeginNode next, AbstractBeginNode overflowSuccessor) {
-        super(c, stamp);
-        this.x = x;
-        this.y = y;
-        this.overflowSuccessor = overflowSuccessor;
-        this.next = next;
-    }
-
-    @Override
-    public AbstractBeginNode getPrimarySuccessor() {
-        return next;
-    }
-
-    @Override
-    public double probability(AbstractBeginNode successor) {
-        return successor == next ? 1 : 0;
-    }
-
-    public AbstractBeginNode getNext() {
-        return next;
-    }
-
-    public AbstractBeginNode getOverflowSuccessor() {
-        return overflowSuccessor;
-    }
-
-    public ValueNode getX() {
-        return x;
-    }
-
-    public ValueNode getY() {
-        return y;
-    }
-
-    @Override
-    public void generate(NodeLIRBuilderTool generator) {
-        generator.setResult(this, generateArithmetic(generator));
-        generator.emitOverflowCheckBranch(getOverflowSuccessor(), getNext(), stamp, probability(getOverflowSuccessor()));
-    }
-
-    protected abstract Value generateArithmetic(NodeLIRBuilderTool generator);
-
-    static void lower(LoweringTool tool, IntegerExactArithmeticNode node) {
-        if (node.asNode().graph().getGuardsStage() == StructuredGraph.GuardsStage.FIXED_DEOPTS) {
-            FloatingNode floatingNode = (FloatingNode) node;
-            FixedWithNextNode previous = tool.lastFixedNode();
-            FixedNode next = previous.next();
-            previous.setNext(null);
-            DeoptimizeNode deopt = floatingNode.graph().add(new DeoptimizeNode(DeoptimizationAction.InvalidateReprofile, DeoptimizationReason.ArithmeticException));
-            AbstractBeginNode normalBegin = floatingNode.graph().add(new BeginNode());
-            normalBegin.setNext(next);
-            IntegerExactArithmeticSplitNode split = node.createSplit(normalBegin, BeginNode.begin(deopt));
-            previous.setNext(split);
-            floatingNode.replaceAndDelete(split);
-        }
-    }
-}
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerMulExactNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.truffle.nodes.arithmetic;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.type.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.graph.spi.*;
-import com.oracle.graal.nodeinfo.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.calc.*;
-import com.oracle.graal.nodes.spi.*;
-import com.oracle.truffle.api.*;
-
-/**
- * Node representing an exact integer multiplication that will throw an {@link ArithmeticException}
- * in case the addition would overflow the 32 bit range.
- */
-@NodeInfo
-public final class IntegerMulExactNode extends MulNode implements IntegerExactArithmeticNode {
-    public static final NodeClass<IntegerMulExactNode> TYPE = NodeClass.create(IntegerMulExactNode.class);
-
-    public IntegerMulExactNode(ValueNode x, ValueNode y) {
-        super(TYPE, x, y);
-        assert x.stamp().isCompatible(y.stamp()) && x.stamp() instanceof IntegerStamp;
-    }
-
-    @Override
-    public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
-        if (forX.isConstant() && !forY.isConstant()) {
-            return new IntegerMulExactNode(forY, forX);
-        }
-        if (forX.isConstant()) {
-            return canonicalXconstant(forX, forY);
-        } else if (forY.isConstant()) {
-            long c = forY.asJavaConstant().asLong();
-            if (c == 1) {
-                return forX;
-            }
-            if (c == 0) {
-                return ConstantNode.forIntegerStamp(stamp(), 0);
-            }
-        }
-        return this;
-    }
-
-    private ValueNode canonicalXconstant(ValueNode forX, ValueNode forY) {
-        JavaConstant xConst = forX.asJavaConstant();
-        JavaConstant yConst = forY.asJavaConstant();
-        assert xConst.getKind() == yConst.getKind();
-        try {
-            if (xConst.getKind() == Kind.Int) {
-                return ConstantNode.forInt(ExactMath.multiplyExact(xConst.asInt(), yConst.asInt()));
-            } else {
-                assert xConst.getKind() == Kind.Long;
-                return ConstantNode.forLong(ExactMath.multiplyExact(xConst.asLong(), yConst.asLong()));
-            }
-        } catch (ArithmeticException ex) {
-            // The operation will result in an overflow exception, so do not canonicalize.
-        }
-        return this;
-    }
-
-    @Override
-    public IntegerExactArithmeticSplitNode createSplit(AbstractBeginNode next, AbstractBeginNode deopt) {
-        return graph().add(new IntegerMulExactSplitNode(stamp(), getX(), getY(), next, deopt));
-    }
-
-    @Override
-    public void lower(LoweringTool tool) {
-        IntegerExactArithmeticSplitNode.lower(tool, this);
-    }
-}
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerMulExactSplitNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.truffle.nodes.arithmetic;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.type.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.nodeinfo.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.spi.*;
-
-@NodeInfo
-public final class IntegerMulExactSplitNode extends IntegerExactArithmeticSplitNode {
-    public static final NodeClass<IntegerMulExactSplitNode> TYPE = NodeClass.create(IntegerMulExactSplitNode.class);
-
-    public IntegerMulExactSplitNode(Stamp stamp, ValueNode x, ValueNode y, AbstractBeginNode next, AbstractBeginNode overflowSuccessor) {
-        super(TYPE, stamp, x, y, next, overflowSuccessor);
-    }
-
-    @Override
-    protected Value generateArithmetic(NodeLIRBuilderTool gen) {
-        return gen.getLIRGeneratorTool().emitMul(gen.operand(getX()), gen.operand(getY()), true);
-    }
-}
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerMulHighNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.truffle.nodes.arithmetic;
-
-import java.util.function.*;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.type.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.graph.spi.*;
-import com.oracle.graal.lir.gen.*;
-import com.oracle.graal.nodeinfo.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.calc.*;
-import com.oracle.graal.nodes.spi.*;
-import com.oracle.truffle.api.*;
-
-@NodeInfo(shortName = "*H")
-public final class IntegerMulHighNode extends BinaryNode implements ArithmeticLIRLowerable {
-    public static final NodeClass<IntegerMulHighNode> TYPE = NodeClass.create(IntegerMulHighNode.class);
-
-    public IntegerMulHighNode(ValueNode x, ValueNode y) {
-        this((IntegerStamp) x.stamp().unrestricted(), x, y);
-    }
-
-    public IntegerMulHighNode(IntegerStamp stamp, ValueNode x, ValueNode y) {
-        super(TYPE, stamp, x, y);
-    }
-
-    /**
-     * Determines the minimum and maximum result of this node for the given inputs and returns the
-     * result of the given BiFunction on the minimum and maximum values.
-     */
-    private <T> T processExtremes(ValueNode forX, ValueNode forY, BiFunction<Long, Long, T> op) {
-        IntegerStamp xStamp = (IntegerStamp) forX.stamp();
-        IntegerStamp yStamp = (IntegerStamp) forY.stamp();
-
-        Kind kind = getKind();
-        assert kind == Kind.Int || kind == Kind.Long;
-        long[] xExtremes = {xStamp.lowerBound(), xStamp.upperBound()};
-        long[] yExtremes = {yStamp.lowerBound(), yStamp.upperBound()};
-        long min = Long.MAX_VALUE;
-        long max = Long.MIN_VALUE;
-        for (long a : xExtremes) {
-            for (long b : yExtremes) {
-                long result = kind == Kind.Int ? ExactMath.multiplyHigh((int) a, (int) b) : ExactMath.multiplyHigh(a, b);
-                min = Math.min(min, result);
-                max = Math.max(max, result);
-            }
-        }
-        return op.apply(min, max);
-    }
-
-    @Override
-    public boolean inferStamp() {
-        return updateStamp(processExtremes(getX(), getY(), (min, max) -> StampFactory.forInteger(getKind(), min, max)));
-    }
-
-    @SuppressWarnings("cast")
-    @Override
-    public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
-        return processExtremes(forX, forY, (min, max) -> min == (long) max ? ConstantNode.forIntegerKind(getKind(), min) : this);
-    }
-
-    @Override
-    public void generate(NodeMappableLIRBuilder builder, ArithmeticLIRGenerator gen) {
-        Value a = builder.operand(getX());
-        Value b = builder.operand(getY());
-        builder.setResult(this, gen.emitMulHigh(a, b));
-    }
-}
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerSubExactNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.truffle.nodes.arithmetic;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.type.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.graph.spi.*;
-import com.oracle.graal.nodeinfo.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.calc.*;
-import com.oracle.graal.nodes.spi.*;
-import com.oracle.graal.nodes.util.*;
-import com.oracle.truffle.api.*;
-
-/**
- * Node representing an exact integer substraction that will throw an {@link ArithmeticException} in
- * case the addition would overflow the 32 bit range.
- */
-@NodeInfo
-public final class IntegerSubExactNode extends SubNode implements IntegerExactArithmeticNode {
-    public static final NodeClass<IntegerSubExactNode> TYPE = NodeClass.create(IntegerSubExactNode.class);
-
-    public IntegerSubExactNode(ValueNode x, ValueNode y) {
-        super(TYPE, x, y);
-        assert x.stamp().isCompatible(y.stamp()) && x.stamp() instanceof IntegerStamp;
-    }
-
-    @Override
-    public boolean inferStamp() {
-        // TODO Should probably use a specialized version which understands that it can't overflow
-        return super.inferStamp();
-    }
-
-    @Override
-    public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
-        if (GraphUtil.unproxify(forX) == GraphUtil.unproxify(forY)) {
-            return ConstantNode.forIntegerStamp(stamp(), 0);
-        }
-        if (forX.isConstant() && forY.isConstant()) {
-            return canonicalXYconstant(forX, forY);
-        } else if (forY.isConstant()) {
-            long c = forY.asJavaConstant().asLong();
-            if (c == 0) {
-                return forX;
-            }
-        }
-        return this;
-    }
-
-    private ValueNode canonicalXYconstant(ValueNode forX, ValueNode forY) {
-        JavaConstant xConst = forX.asJavaConstant();
-        JavaConstant yConst = forY.asJavaConstant();
-        assert xConst.getKind() == yConst.getKind();
-        try {
-            if (xConst.getKind() == Kind.Int) {
-                return ConstantNode.forInt(ExactMath.subtractExact(xConst.asInt(), yConst.asInt()));
-            } else {
-                assert xConst.getKind() == Kind.Long;
-                return ConstantNode.forLong(ExactMath.subtractExact(xConst.asLong(), yConst.asLong()));
-            }
-        } catch (ArithmeticException ex) {
-            // The operation will result in an overflow exception, so do not canonicalize.
-        }
-        return this;
-    }
-
-    @Override
-    public IntegerExactArithmeticSplitNode createSplit(AbstractBeginNode next, AbstractBeginNode deopt) {
-        return graph().add(new IntegerSubExactSplitNode(stamp(), getX(), getY(), next, deopt));
-    }
-
-    @Override
-    public void lower(LoweringTool tool) {
-        IntegerExactArithmeticSplitNode.lower(tool, this);
-    }
-}
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/IntegerSubExactSplitNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.truffle.nodes.arithmetic;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.type.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.nodeinfo.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.spi.*;
-
-@NodeInfo
-public final class IntegerSubExactSplitNode extends IntegerExactArithmeticSplitNode {
-    public static final NodeClass<IntegerSubExactSplitNode> TYPE = NodeClass.create(IntegerSubExactSplitNode.class);
-
-    public IntegerSubExactSplitNode(Stamp stamp, ValueNode x, ValueNode y, AbstractBeginNode next, AbstractBeginNode overflowSuccessor) {
-        super(TYPE, stamp, x, y, next, overflowSuccessor);
-    }
-
-    @Override
-    protected Value generateArithmetic(NodeLIRBuilderTool gen) {
-        return gen.getLIRGeneratorTool().emitSub(gen.operand(getX()), gen.operand(getY()), true);
-    }
-}
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/arithmetic/UnsignedMulHighNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.truffle.nodes.arithmetic;
-
-import java.util.function.*;
-
-import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.type.*;
-import com.oracle.graal.graph.*;
-import com.oracle.graal.graph.spi.*;
-import com.oracle.graal.lir.gen.*;
-import com.oracle.graal.nodeinfo.*;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.calc.*;
-import com.oracle.graal.nodes.spi.*;
-import com.oracle.truffle.api.*;
-
-@NodeInfo(shortName = "|*H|")
-public final class UnsignedMulHighNode extends BinaryNode implements ArithmeticLIRLowerable {
-
-    public static final NodeClass<UnsignedMulHighNode> TYPE = NodeClass.create(UnsignedMulHighNode.class);
-
-    public UnsignedMulHighNode(ValueNode x, ValueNode y) {
-        this((IntegerStamp) x.stamp().unrestricted(), x, y);
-    }
-
-    public UnsignedMulHighNode(IntegerStamp stamp, ValueNode x, ValueNode y) {
-        super(TYPE, stamp, x, y);
-    }
-
-    /**
-     * Determines the minimum and maximum result of this node for the given inputs and returns the
-     * result of the given BiFunction on the minimum and maximum values. Note that the minima and
-     * maxima are calculated using signed min/max functions, while the values themselves are
-     * unsigned.
-     */
-    private <T> T processExtremes(ValueNode forX, ValueNode forY, BiFunction<Long, Long, T> op) {
-        IntegerStamp xStamp = (IntegerStamp) forX.stamp();
-        IntegerStamp yStamp = (IntegerStamp) forY.stamp();
-
-        Kind kind = getKind();
-        assert kind == Kind.Int || kind == Kind.Long;
-        long[] xExtremes = {xStamp.lowerBound(), xStamp.upperBound()};
-        long[] yExtremes = {yStamp.lowerBound(), yStamp.upperBound()};
-        long min = Long.MAX_VALUE;
-        long max = Long.MIN_VALUE;
-        for (long a : xExtremes) {
-            for (long b : yExtremes) {
-                long result = kind == Kind.Int ? ExactMath.multiplyHighUnsigned((int) a, (int) b) : ExactMath.multiplyHighUnsigned(a, b);
-                min = Math.min(min, result);
-                max = Math.max(max, result);
-            }
-        }
-        return op.apply(min, max);
-    }
-
-    @SuppressWarnings("cast")
-    @Override
-    public boolean inferStamp() {
-        // if min is negative, then the value can reach into the unsigned range
-        return updateStamp(processExtremes(getX(), getY(), (min, max) -> (min == (long) max || min >= 0) ? StampFactory.forInteger(getKind(), min, max) : StampFactory.forKind(getKind())));
-    }
-
-    @SuppressWarnings("cast")
-    @Override
-    public ValueNode canonical(CanonicalizerTool tool, ValueNode forX, ValueNode forY) {
-        return processExtremes(forX, forY, (min, max) -> min == (long) max ? ConstantNode.forIntegerKind(getKind(), min) : this);
-    }
-
-    @Override
-    public void generate(NodeMappableLIRBuilder builder, ArithmeticLIRGenerator gen) {
-        Value a = builder.operand(getX());
-        Value b = builder.operand(getY());
-        builder.setResult(this, gen.emitUMulHigh(a, b));
-    }
-}
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/asserts/NeverPartOfCompilationNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/asserts/NeverPartOfCompilationNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -29,13 +29,13 @@
 import com.oracle.graal.nodes.util.*;
 
 @NodeInfo
-public final class NeverPartOfCompilationNode extends FixedWithNextNode implements IterableNodeType {
+public final class NeverPartOfCompilationNode extends AbstractStateSplit implements IterableNodeType {
 
     public static final NodeClass<NeverPartOfCompilationNode> TYPE = NodeClass.create(NeverPartOfCompilationNode.class);
     protected final String message;
 
-    public NeverPartOfCompilationNode(String message) {
-        super(TYPE, StampFactory.forVoid());
+    public NeverPartOfCompilationNode(String message, FrameState stateAfter) {
+        super(TYPE, StampFactory.forVoid(), stateAfter);
         this.message = message;
     }
 
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/frame/NewFrameNode.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/nodes/frame/NewFrameNode.java	Fri Apr 10 16:58:26 2015 -0700
@@ -236,7 +236,7 @@
 
     @Override
     public Node canonical(CanonicalizerTool tool) {
-        if (hasNoUsages()) {
+        if (tool.allUsagesAvailable() && hasNoUsages()) {
             return null;
         } else {
             return this;
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/phases/ReplaceIntrinsicsPhase.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.graal.truffle.phases;
-
-import com.oracle.graal.debug.*;
-import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
-import com.oracle.graal.nodes.*;
-import com.oracle.graal.nodes.java.*;
-import com.oracle.graal.nodes.spi.*;
-import com.oracle.graal.phases.*;
-import com.oracle.graal.phases.common.inlining.*;
-
-/**
- * Compiler phase for intrinsifying the access to the Truffle virtual frame.
- */
-public class ReplaceIntrinsicsPhase extends Phase {
-
-    private final Replacements replacements;
-
-    public ReplaceIntrinsicsPhase(Replacements replacements) {
-        this.replacements = replacements;
-    }
-
-    @Override
-    protected void run(StructuredGraph graph) {
-        for (MethodCallTargetNode methodCallTarget : graph.getNodes(MethodCallTargetNode.TYPE)) {
-            if (methodCallTarget.isAlive()) {
-                InvokeKind invokeKind = methodCallTarget.invokeKind();
-                if (invokeKind.isDirect()) {
-                    StructuredGraph inlineGraph = replacements.getSubstitution(methodCallTarget.targetMethod());
-                    if (inlineGraph != null) {
-                        InliningUtil.inline(methodCallTarget.invoke(), inlineGraph, true, null);
-                        Debug.dump(graph, "After inlining %s", methodCallTarget.targetMethod().toString());
-                    }
-                }
-            }
-        }
-    }
-}
--- a/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/substitutions/TruffleGraphBuilderPlugins.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.truffle/src/com/oracle/graal/truffle/substitutions/TruffleGraphBuilderPlugins.java	Fri Apr 10 16:58:26 2015 -0700
@@ -27,7 +27,7 @@
 import java.util.concurrent.*;
 
 import com.oracle.graal.api.meta.*;
-import com.oracle.graal.compiler.common.*;
+import com.oracle.graal.api.replacements.*;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.common.type.*;
 import com.oracle.graal.graph.*;
@@ -37,9 +37,9 @@
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.calc.*;
 import com.oracle.graal.nodes.extended.*;
+import com.oracle.graal.replacements.nodes.arithmetic.*;
 import com.oracle.graal.truffle.*;
 import com.oracle.graal.truffle.nodes.*;
-import com.oracle.graal.truffle.nodes.arithmetic.*;
 import com.oracle.graal.truffle.nodes.asserts.*;
 import com.oracle.graal.truffle.nodes.frame.*;
 import com.oracle.graal.truffle.unsafe.*;
@@ -50,30 +50,30 @@
  * Provides {@link InvocationPlugin}s for Truffle classes.
  */
 public class TruffleGraphBuilderPlugins {
-    public static void registerInvocationPlugins(MetaAccessProvider metaAccess, InvocationPlugins plugins) {
+    public static void registerInvocationPlugins(MetaAccessProvider metaAccess, InvocationPlugins plugins, boolean canDelayIntrinsification, SnippetReflectionProvider snippetReflection) {
 
-        registerOptimizedAssumptionPlugins(plugins);
+        registerOptimizedAssumptionPlugins(plugins, canDelayIntrinsification, snippetReflection);
         registerExactMathPlugins(plugins);
         registerCompilerDirectivesPlugins(plugins);
-        registerCompilerAssertsPlugins(plugins);
+        registerCompilerAssertsPlugins(plugins, canDelayIntrinsification);
         registerOptimizedCallTargetPlugins(metaAccess, plugins);
-        registerUnsafeAccessImplPlugins(plugins);
+        registerUnsafeAccessImplPlugins(plugins, canDelayIntrinsification);
 
         if (TruffleCompilerOptions.TruffleUseFrameWithoutBoxing.getValue()) {
-            registerFrameWithoutBoxingPlugins(plugins);
+            registerFrameWithoutBoxingPlugins(plugins, canDelayIntrinsification);
         } else {
-            registerFrameWithBoxingPlugins(plugins);
+            registerFrameWithBoxingPlugins(plugins, canDelayIntrinsification);
         }
 
     }
 
-    public static void registerOptimizedAssumptionPlugins(InvocationPlugins plugins) {
+    public static void registerOptimizedAssumptionPlugins(InvocationPlugins plugins, boolean canDelayIntrinsification, SnippetReflectionProvider snippetReflection) {
         Registration r = new Registration(plugins, OptimizedAssumption.class);
         InvocationPlugin plugin = new InvocationPlugin() {
             public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver) {
                 if (receiver.isConstant()) {
                     Constant constant = receiver.get().asConstant();
-                    OptimizedAssumption assumption = b.getSnippetReflection().asObject(OptimizedAssumption.class, (JavaConstant) constant);
+                    OptimizedAssumption assumption = snippetReflection.asObject(OptimizedAssumption.class, (JavaConstant) constant);
                     if (assumption.isValid()) {
                         if (targetMethod.getName().equals("isValid")) {
                             b.addPush(ConstantNode.forBoolean(true));
@@ -89,10 +89,12 @@
                             b.add(new DeoptimizeNode(DeoptimizationAction.InvalidateRecompile, DeoptimizationReason.None));
                         }
                     }
+                    return true;
+                } else if (canDelayIntrinsification) {
+                    return false;
                 } else {
                     throw b.bailout("assumption could not be reduced to a constant");
                 }
-                return true;
             }
         };
         r.register1("isValid", Receiver.class, plugin);
@@ -204,7 +206,7 @@
         });
     }
 
-    public static void registerCompilerAssertsPlugins(InvocationPlugins plugins) {
+    public static void registerCompilerAssertsPlugins(InvocationPlugins plugins, boolean canDelayIntrinsification) {
         Registration r = new Registration(plugins, CompilerAsserts.class);
         r.register1("partialEvaluationConstant", Object.class, new InvocationPlugin() {
             public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode value) {
@@ -215,6 +217,8 @@
                 }
                 if (curValue.isConstant()) {
                     return true;
+                } else if (canDelayIntrinsification) {
+                    return false;
                 } else {
                     StringBuilder sb = new StringBuilder();
                     sb.append(curValue);
@@ -235,10 +239,13 @@
             public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode message) {
                 if (message.isConstant()) {
                     String messageString = message.asConstant().toValueString();
-                    b.add(new NeverPartOfCompilationNode(messageString));
+                    b.add(new NeverPartOfCompilationNode(messageString, b.createStateAfter()));
                     return true;
+                } else if (canDelayIntrinsification) {
+                    return false;
+                } else {
+                    throw b.bailout("message for never part of compilation is non-constant");
                 }
-                throw b.bailout("message for never part of compilation is non-constant");
             }
         });
     }
@@ -260,22 +267,22 @@
         });
     }
 
-    public static void registerFrameWithoutBoxingPlugins(InvocationPlugins plugins) {
+    public static void registerFrameWithoutBoxingPlugins(InvocationPlugins plugins, boolean canDelayIntrinsification) {
         Registration r = new Registration(plugins, FrameWithoutBoxing.class);
         registerMaterialize(r);
-        registerUnsafeCast(r);
+        registerUnsafeCast(r, canDelayIntrinsification);
         registerUnsafeLoadStorePlugins(r, Kind.Int, Kind.Long, Kind.Float, Kind.Double, Kind.Object);
     }
 
-    public static void registerFrameWithBoxingPlugins(InvocationPlugins plugins) {
+    public static void registerFrameWithBoxingPlugins(InvocationPlugins plugins, boolean canDelayIntrinsification) {
         Registration r = new Registration(plugins, FrameWithBoxing.class);
         registerMaterialize(r);
-        registerUnsafeCast(r);
+        registerUnsafeCast(r, canDelayIntrinsification);
     }
 
-    public static void registerUnsafeAccessImplPlugins(InvocationPlugins plugins) {
+    public static void registerUnsafeAccessImplPlugins(InvocationPlugins plugins, boolean canDelayIntrinsification) {
         Registration r = new Registration(plugins, UnsafeAccessImpl.class);
-        registerUnsafeCast(r);
+        registerUnsafeCast(r, canDelayIntrinsification);
         registerUnsafeLoadStorePlugins(r, Kind.Boolean, Kind.Byte, Kind.Int, Kind.Short, Kind.Long, Kind.Float, Kind.Double, Kind.Object);
     }
 
@@ -288,7 +295,7 @@
         });
     }
 
-    private static void registerUnsafeCast(Registration r) {
+    private static void registerUnsafeCast(Registration r, boolean canDelayIntrinsification) {
         r.register4("unsafeCast", Object.class, Class.class, boolean.class, boolean.class, new InvocationPlugin() {
             public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode object, ValueNode clazz, ValueNode condition, ValueNode nonNull) {
                 if (clazz.isConstant() && nonNull.isConstant()) {
@@ -322,8 +329,11 @@
                         b.addPush(Kind.Object, new PiNode(object, piStamp, valueAnchorNode));
                     }
                     return true;
+                } else if (canDelayIntrinsification) {
+                    return false;
+                } else {
+                    throw b.bailout("unsafeCast arguments could not reduce to a constant: " + clazz + ", " + nonNull);
                 }
-                throw GraalInternalError.shouldNotReachHere("unsafeCast arguments could not reduce to a constant: " + clazz + ", " + nonNull);
             }
         });
     }
@@ -359,7 +369,7 @@
                 b.addPush(returnKind.getStackKind(), b.add(new UnsafeLoadNode(object, offset, returnKind, locationIdentity, compare)));
                 return true;
             }
-            // TODO: should we throw GraalInternalError.shouldNotReachHere() here?
+            // TODO: should we throw b.bailout() here?
             return false;
         }
     }
@@ -385,7 +395,7 @@
                 b.add(new UnsafeStoreNode(object, offset, value, kind, locationIdentity, null));
                 return true;
             }
-            // TODO: should we throw GraalInternalError.shouldNotReachHere() here?
+            // TODO: should we throw b.bailout() here?
             return false;
         }
     }
--- a/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/EffectsClosure.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/EffectsClosure.java	Fri Apr 10 16:58:26 2015 -0700
@@ -166,6 +166,9 @@
                 if (node instanceof FixedWithNextNode) {
                     lastFixedNode = (FixedWithNextNode) node;
                 }
+                if (state.isDead()) {
+                    break;
+                }
             }
             VirtualUtil.trace(")\n    end state: %s\n", state);
         }
--- a/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/GraphEffectList.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/GraphEffectList.java	Fri Apr 10 16:58:26 2015 -0700
@@ -65,7 +65,7 @@
             assert position.isAlive();
             if (!node.isAlive()) {
                 graph.addWithoutUniqueWithInputs(node);
-                if (node instanceof FixedNode) {
+                if (node instanceof FixedWithNextNode) {
                     graph.addBeforeFixed(position, (FixedWithNextNode) node);
                 }
             }
@@ -152,6 +152,19 @@
         });
     }
 
+    public void replaceWithSink(FixedWithNextNode node, ControlSinkNode sink) {
+        add("kill if branch", new Effect() {
+            public void apply(StructuredGraph graph, ArrayList<Node> obsoleteNodes) {
+                node.replaceAtPredecessor(sink);
+                GraphUtil.killCFG(node);
+            }
+
+            public boolean isCfgKill() {
+                return true;
+            }
+        });
+    }
+
     /**
      * Replaces the given node at its usages without deleting it. If the current node is a fixed
      * node it will be disconnected from the control flow, so that it will be deleted by a
--- a/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/PartialEscapeClosure.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/PartialEscapeClosure.java	Fri Apr 10 16:58:26 2015 -0700
@@ -207,8 +207,13 @@
                     return false;
                 }
                 effects.ensureAdded(canonicalizedValue, insertBefore);
-                effects.replaceAtUsages(node, canonicalizedValue);
-                addScalarAlias(node, canonicalizedValue);
+                if (canonicalizedValue instanceof ControlSinkNode) {
+                    effects.replaceWithSink((FixedWithNextNode) node, (ControlSinkNode) canonicalizedValue);
+                    state.markAsDead();
+                } else {
+                    effects.replaceAtUsages(node, canonicalizedValue);
+                    addScalarAlias(node, canonicalizedValue);
+                }
             }
             VirtualUtil.trace("replaced via canonicalization: %s -> %s", node, canonicalizedValue);
             return true;
--- a/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/VirtualizerToolImpl.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.graal.virtual/src/com/oracle/graal/virtual/phases/ea/VirtualizerToolImpl.java	Fri Apr 10 16:58:26 2015 -0700
@@ -200,4 +200,9 @@
     public boolean canonicalizeReads() {
         return false;
     }
+
+    @Override
+    public boolean allUsagesAvailable() {
+        return true;
+    }
 }
--- a/graal/com.oracle.truffle.api/src/com/oracle/truffle/api/frame/FrameDescriptor.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/graal/com.oracle.truffle.api/src/com/oracle/truffle/api/frame/FrameDescriptor.java	Fri Apr 10 16:58:26 2015 -0700
@@ -192,4 +192,22 @@
             }
         }
     }
+
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder();
+        sb.append("FrameDescriptor@").append(Integer.toHexString(hashCode()));
+        sb.append("{");
+        boolean comma = false;
+        for (FrameSlot slot : slots) {
+            if (comma) {
+                sb.append(", ");
+            } else {
+                comma = true;
+            }
+            sb.append(slot.getIndex()).append(":").append(slot.getIdentifier());
+        }
+        sb.append("}");
+        return sb.toString();
+    }
 }
--- a/hotspot/.project	Fri Apr 10 16:55:38 2015 -0700
+++ b/hotspot/.project	Fri Apr 10 16:58:26 2015 -0700
@@ -111,6 +111,11 @@
 			<locationURI>PARENT-1-PROJECT_LOC/build/linux/linux_amd64_graal/generated</locationURI>
 		</link>
 		<link>
+			<name>generated_java_processor</name>
+			<type>2</type>
+			<locationURI>PARENT-1-PROJECT_LOC/graal/com.oracle.graal.hotspot/src_gen/hotspot</locationURI>
+		</link>
+		<link>
 			<name>linux</name>
 			<type>2</type>
 			<locationURI>PARENT-1-PROJECT_LOC/src/os/linux/vm</locationURI>
@@ -121,16 +126,6 @@
 			<locationURI>PARENT-1-PROJECT_LOC/src/os_cpu/linux_sparc/vm</locationURI>
 		</link>
 		<link>
-			<name>solaris_sparc</name>
-			<type>2</type>
-			<locationURI>PARENT-1-PROJECT_LOC/src/os_cpu/solaris_sparc/vm</locationURI>
-		</link>
-		<link>
-			<name>solaris</name>
-			<type>2</type>
-			<locationURI>PARENT-1-PROJECT_LOC/src/os/solaris/vm</locationURI>
-		</link>
-		<link>
 			<name>linux_x86</name>
 			<type>2</type>
 			<locationURI>PARENT-1-PROJECT_LOC/src/os_cpu/linux_x86/vm</locationURI>
@@ -141,6 +136,16 @@
 			<locationURI>WORKSPACE_LOC/make</locationURI>
 		</link>
 		<link>
+			<name>solaris</name>
+			<type>2</type>
+			<locationURI>PARENT-1-PROJECT_LOC/src/os/solaris/vm</locationURI>
+		</link>
+		<link>
+			<name>solaris_sparc</name>
+			<type>2</type>
+			<locationURI>PARENT-1-PROJECT_LOC/src/os_cpu/solaris_sparc/vm</locationURI>
+		</link>
+		<link>
 			<name>sparc</name>
 			<type>2</type>
 			<locationURI>PARENT-1-PROJECT_LOC/src/cpu/sparc/vm</locationURI>
--- a/make/Makefile	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/Makefile	Fri Apr 10 16:58:26 2015 -0700
@@ -99,6 +99,7 @@
 COMMON_VM_PRODUCT_TARGETS=product product1 docs export_product
 COMMON_VM_FASTDEBUG_TARGETS=fastdebug fastdebug1 docs export_fastdebug
 COMMON_VM_DEBUG_TARGETS=debug debug1 docs export_debug
+COMMON_VM_OPTIMIZED_TARGETS=optimized optimized1 docs export_optimized
 
 # JDK directory list
 JDK_DIRS=bin include jre lib demo
@@ -115,20 +116,21 @@
 all_product:   product1 docs export_product
 all_fastdebug: fastdebug1 docs export_fastdebug
 all_debug:     debug1 docs export_debug
+all_optimized: optimized1 docs export_optimized
 else
 ifeq ($(MACOSX_UNIVERSAL),true)
 all_product:   universal_product
 all_fastdebug: universal_fastdebug
 all_debug:     universal_debug
+all_optimized: universal_optimized
 else
 all_product:   $(COMMON_VM_PRODUCT_TARGETS)
 all_fastdebug: $(COMMON_VM_FASTDEBUG_TARGETS)
 all_debug:     $(COMMON_VM_DEBUG_TARGETS)
+all_optimized: $(COMMON_VM_OPTIMIZED_TARGETS)
 endif
 endif
 
-all_optimized: optimized optimized1 docs export_optimized
-
 allzero:           all_productzero all_fastdebugzero
 all_productzero:   productzero docs export_product
 all_fastdebugzero: fastdebugzero docs export_fastdebug
@@ -336,7 +338,7 @@
 export_product_jdk::
 	$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) generic_export
 export_optimized_jdk::
-	$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) generic_export
+	$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) generic_export
 export_fastdebug_jdk::
 	$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) generic_export
 export_debug_jdk::
@@ -778,6 +780,19 @@
 	   ($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \
 	fi
 
+copy_optimized_jdk::
+	$(RM) -r $(JDK_IMAGE_DIR)/optimized
+	$(MKDIR) -p $(JDK_IMAGE_DIR)/optimized
+	if [ -d $(JDK_IMPORT_PATH)/optimized ] ; then \
+	  ($(CD) $(JDK_IMPORT_PATH)/optimized && \
+	   $(TAR) -cf - $(JDK_DIRS)) | \
+	   ($(CD) $(JDK_IMAGE_DIR)/optimized && $(TAR) -xf -) ; \
+	else \
+	  ($(CD) $(JDK_IMPORT_PATH) && \
+	   $(TAR) -cf - $(JDK_DIRS)) | \
+	   ($(CD) $(JDK_IMAGE_DIR)/optimized && $(TAR) -xf -) ; \
+	fi
+
 #
 # Check target
 #
--- a/make/aix/makefiles/adjust-mflags.sh	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/aix/makefiles/adjust-mflags.sh	Fri Apr 10 16:58:26 2015 -0700
@@ -1,6 +1,6 @@
 #! /bin/sh
 #
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -64,7 +64,7 @@
 	echo "$MFLAGS" \
 	| sed '
 		s/^-/ -/
-		s/ -\([^ 	][^ 	]*\)j/ -\1 -j/
+		s/ -\([^ 	I][^ 	I]*\)j/ -\1 -j/
 		s/ -j[0-9][0-9]*/ -j/
 		s/ -j\([^ 	]\)/ -j -\1/
 		s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/
--- a/make/aix/makefiles/fastdebug.make	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/aix/makefiles/fastdebug.make	Fri Apr 10 16:58:26 2015 -0700
@@ -67,7 +67,6 @@
 #    not justified.
 LFLAGS_QIPA=
 
-G_SUFFIX = _g
 VERSION = optimized
 SYSDEFS += -DASSERT -DFASTDEBUG
 PICFLAGS = DEFAULT
--- a/make/aix/makefiles/xlc.make	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/aix/makefiles/xlc.make	Fri Apr 10 16:58:26 2015 -0700
@@ -74,6 +74,12 @@
 # no xlc counterpart for -fcheck-new
 # CFLAGS += -fcheck-new
 
+# We need to define this on the command line if we want to use the the
+# predefined format specifiers from "inttypes.h". Otherwise system headrs
+# can indirectly include inttypes.h before we define __STDC_FORMAT_MACROS
+# in globalDefinitions.hpp
+CFLAGS += -D__STDC_FORMAT_MACROS
+
 ARCHFLAG = -q64
 
 CFLAGS     += $(ARCHFLAG)
--- a/make/bsd/makefiles/adjust-mflags.sh	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/bsd/makefiles/adjust-mflags.sh	Fri Apr 10 16:58:26 2015 -0700
@@ -64,7 +64,7 @@
 	echo "$MFLAGS" \
 	| sed '
 		s/^-/ -/
-		s/ -\([^ 	][^ 	]*\)j/ -\1 -j/
+		s/ -\([^ 	I][^ 	I]*\)j/ -\1 -j/
 		s/ -j[0-9][0-9]*/ -j/
 		s/ -j\([^ 	]\)/ -j -\1/
 		s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/
--- a/make/bsd/makefiles/mapfile-vers-debug	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/bsd/makefiles/mapfile-vers-debug	Fri Apr 10 16:58:26 2015 -0700
@@ -187,6 +187,9 @@
                 _JVM_IsSupportedJNIVersion
                 _JVM_IsThreadAlive
                 _JVM_IsVMGeneratedMethodIx
+                _JVM_KnownToNotExist
+                _JVM_GetResourceLookupCacheURLs
+                _JVM_GetResourceLookupCache
                 _JVM_LatestUserDefinedLoader
                 _JVM_Listen
                 _JVM_LoadClass0
--- a/make/bsd/makefiles/mapfile-vers-product	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/bsd/makefiles/mapfile-vers-product	Fri Apr 10 16:58:26 2015 -0700
@@ -187,6 +187,9 @@
                 _JVM_IsSupportedJNIVersion
                 _JVM_IsThreadAlive
                 _JVM_IsVMGeneratedMethodIx
+                _JVM_KnownToNotExist
+                _JVM_GetResourceLookupCacheURLs
+                _JVM_GetResourceLookupCache
                 _JVM_LatestUserDefinedLoader
                 _JVM_Listen
                 _JVM_LoadClass0
--- a/make/bsd/makefiles/universal.gmk	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/bsd/makefiles/universal.gmk	Fri Apr 10 16:58:26 2015 -0700
@@ -25,6 +25,8 @@
 # macosx universal builds
 universal_product:
 	$(MAKE) MACOSX_UNIVERSAL=true all_product_universal
+universal_optimized:
+	$(MAKE) MACOSX_UNIVERSAL=true all_optimized_universal
 universal_fastdebug:
 	$(MAKE) MACOSX_UNIVERSAL=true all_fastdebug_universal
 universal_debug:
@@ -36,6 +38,10 @@
 #	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_PRODUCT_TARGETS)
 	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_PRODUCT_TARGETS)
 	$(QUIETLY) $(MAKE) BUILD_FLAVOR=product EXPORT_SUBDIR= universalize
+all_optimized_universal:
+#	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_OPTIMIZED_TARGETS)
+	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_OPTIMIZED_TARGETS)
+	$(QUIETLY) $(MAKE) BUILD_FLAVOR=optimized EXPORT_SUBDIR=/optimized universalize
 all_fastdebug_universal:
 #	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_FASTDEBUG_TARGETS)
 	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_FASTDEBUG_TARGETS)
@@ -98,13 +104,15 @@
 export_product_jdk::
 	$(MAKE) EXPORT_SUBDIR=           export_universal
 export_optimized_jdk::
-	$(MAKE) EXPORT_SUBDIR=           export_universal
+	$(MAKE) EXPORT_SUBDIR=/optimized export_universal
 export_fastdebug_jdk::
 	$(MAKE) EXPORT_SUBDIR=/fastdebug export_universal
 export_debug_jdk::
 	$(MAKE) EXPORT_SUBDIR=/debug     export_universal
 copy_product_jdk::
 	$(MAKE) COPY_SUBDIR=             copy_universal
+copy_optimized_jdk::
+	$(MAKE) COPY_SUBDIR=/optimized   copy_universal
 copy_fastdebug_jdk::
 	$(MAKE) COPY_SUBDIR=/fastdebug   copy_universal
 copy_debug_jdk::
@@ -112,5 +120,6 @@
 
 .PHONY:	universal_product universal_fastdebug universal_debug \
 	all_product_universal all_fastdebug_universal all_debug_universal \
+	universal_optimized all_optimized_universal \
 	universalize export_universal copy_universal \
 	$(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST)
--- a/make/bsd/makefiles/vm.make	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/bsd/makefiles/vm.make	Fri Apr 10 16:58:26 2015 -0700
@@ -263,10 +263,10 @@
 
 vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
 
-mapfile : $(MAPFILE) vm.def
+mapfile : $(MAPFILE) vm.def mapfile_ext
 	rm -f $@
 	awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE")	\
-                 { system ("cat vm.def"); }		\
+                 { system ("cat mapfile_ext"); system ("cat vm.def"); } \
                else					\
                  { print $$0 }				\
              }' > $@ < $(MAPFILE)
@@ -278,6 +278,13 @@
 vm.def: $(Res_Files) $(Obj_Files)
 	sh $(GAMMADIR)/make/bsd/makefiles/build_vm_def.sh *.o > $@
 
+mapfile_ext:
+	rm -f $@
+	touch $@
+	if [ -f $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext ]; then \
+	  cat $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext > $@; \
+	fi
+
 STATIC_CXX = false
 
 ifeq ($(LINK_INTO),AOUT)
--- a/make/excludeSrc.make	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/excludeSrc.make	Fri Apr 10 16:58:26 2015 -0700
@@ -21,6 +21,9 @@
 # questions.
 #
 #
+
+include $(GAMMADIR)/make/altsrc.make
+
 ifeq ($(INCLUDE_JVMTI), false)
       CXXFLAGS += -DINCLUDE_JVMTI=0
       CFLAGS += -DINCLUDE_JVMTI=0
@@ -70,37 +73,49 @@
       CXXFLAGS += -DINCLUDE_CDS=0
       CFLAGS += -DINCLUDE_CDS=0
 
-      Src_Files_EXCLUDE += filemap.cpp metaspaceShared.cpp
+      Src_Files_EXCLUDE += filemap.cpp metaspaceShared*.cpp sharedPathsMiscInfo.cpp \
+        systemDictionaryShared.cpp classLoaderExt.cpp sharedClassUtil.cpp
 endif
 
 ifeq ($(INCLUDE_ALL_GCS), false)
       CXXFLAGS += -DINCLUDE_ALL_GCS=0
       CFLAGS += -DINCLUDE_ALL_GCS=0
 
-      Src_Files_EXCLUDE += \
-	cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
-	cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp compactibleFreeListSpace.cpp \
-	concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \
-	freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp \
-	collectionSetChooser.cpp concurrentG1Refine.cpp concurrentG1RefineThread.cpp \
-	concurrentMark.cpp concurrentMarkThread.cpp dirtyCardQueue.cpp g1AllocRegion.cpp \
-	g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
-	g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
-	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp g1OopClosures.cpp \
-	g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1StringDedup.cpp g1StringDedupStat.cpp \
-	g1StringDedupTable.cpp g1StringDedupThread.cpp g1StringDedupQueue.cpp g1_globals.cpp heapRegion.cpp \
-	g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
-	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp g1CodeCacheRemSet.cpp \
-	adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
-	cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
-	parallelScavengeHeap.cpp parMarkBitMap.cpp pcTasks.cpp psAdaptiveSizePolicy.cpp \
-	psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp psGenerationCounters.cpp \
-	psMarkSweep.cpp psMarkSweepDecorator.cpp psMemoryPool.cpp psOldGen.cpp \
-	psParallelCompact.cpp psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp \
-	psTasks.cpp psVirtualspace.cpp psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp \
-	parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \
-	gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \
-	mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp hSpaceCounters.cpp
+      gc_impl := $(HS_COMMON_SRC)/share/vm/gc_implementation
+      gc_impl_alt := $(HS_ALT_SRC)/share/vm/gc_implementation
+      gc_subdirs := concurrentMarkSweep g1 parallelScavenge parNew
+      gc_exclude := $(foreach gc,$(gc_subdirs),				\
+		     $(notdir $(wildcard $(gc_impl)/$(gc)/*.cpp))	\
+		     $(notdir $(wildcard $(gc_impl_alt)/$(gc)/*.cpp)))
+      Src_Files_EXCLUDE += $(gc_exclude)
+
+      # Exclude everything in $(gc_impl)/shared except the files listed
+      # in $(gc_shared_keep).
+      gc_shared_all := $(notdir $(wildcard $(gc_impl)/shared/*.cpp))
+      gc_shared_keep :=							\
+	adaptiveSizePolicy.cpp						\
+	ageTable.cpp							\
+	collectorCounters.cpp						\
+	cSpaceCounters.cpp						\
+	gcId.cpp							\
+	gcPolicyCounters.cpp						\
+	gcStats.cpp							\
+	gcTimer.cpp							\
+	gcTrace.cpp							\
+	gcTraceSend.cpp							\
+	gcTraceTime.cpp							\
+	gcUtil.cpp							\
+	generationCounters.cpp						\
+	markSweep.cpp							\
+	objectCountEventSender.cpp					\
+	spaceDecorator.cpp						\
+	vmGCOperations.cpp
+      Src_Files_EXCLUDE += $(filter-out $(gc_shared_keep),$(gc_shared_all))
+
+      # src/share/vm/services
+      Src_Files_EXCLUDE +=						\
+	g1MemoryPool.cpp						\
+	psMemoryPool.cpp
 endif
 
 ifeq ($(INCLUDE_NMT), false)
@@ -108,8 +123,8 @@
       CFLAGS += -DINCLUDE_NMT=0
 
       Src_Files_EXCLUDE += \
-	 memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \
-	 memTracker.cpp nmtDCmd.cpp
+	 memBaseline.cpp memReporter.cpp mallocTracker.cpp virtualMemoryTracker.cpp nmtCommon.cpp \
+	 memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
 endif
 
 -include $(HS_ALT_MAKE)/excludeSrc.make
--- a/make/hotspot_version	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/hotspot_version	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -31,11 +31,11 @@
 #
 
 # Don't put quotes (fail windows build).
-HOTSPOT_VM_COPYRIGHT=Copyright 2014
+HOTSPOT_VM_COPYRIGHT=Copyright 2015
 
 HS_MAJOR_VER=25
-HS_MINOR_VER=25
-HS_BUILD_NUMBER=02
+HS_MINOR_VER=40
+HS_BUILD_NUMBER=25
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/jprt.gmk	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/jprt.gmk	Fri Apr 10 16:58:26 2015 -0700
@@ -42,6 +42,9 @@
 jprt_build_fastdebugEmb:
 	$(MAKE) JAVASE_EMBEDDED=true MINIMIZE_RAM_USAGE=true jprt_build_fastdebug
 
+jprt_build_optimizedEmb:
+	$(MAKE) JAVASE_EMBEDDED=true MINIMIZE_RAM_USAGE=true jprt_build_optimized
+
 jprt_build_productOpen:
 	$(MAKE) OPENJDK=true jprt_build_product
 
@@ -51,6 +54,9 @@
 jprt_build_fastdebugOpen:
 	$(MAKE) OPENJDK=true jprt_build_fastdebug
 
+jprt_build_optimizedOpen:
+	$(MAKE) OPENJDK=true jprt_build_optimized
+
 jprt_build_product: all_product copy_product_jdk export_product_jdk
 	( $(CD) $(JDK_IMAGE_DIR) && \
 	  $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
@@ -63,5 +69,9 @@
 	( $(CD) $(JDK_IMAGE_DIR)/debug && \
 	  $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
 
-.PHONY: jprt_build_product jprt_build_fastdebug jprt_build_debug
+jprt_build_optimized: all_optimized copy_optimized_jdk export_optimized_jdk
+	( $(CD) $(JDK_IMAGE_DIR)/optimized && \
+	  $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
 
+.PHONY: jprt_build_product jprt_build_fastdebug jprt_build_debug jprt_build_optimized
+
--- a/make/jprt.properties	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,403 +0,0 @@
-#
-# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-#
-
-# Properties for jprt
-
-# All build result bundles are full jdks.
-jprt.need.sibling.build=false
-
-# At submit time, the release supplied will be in jprt.submit.release
-#    and will be one of the official release names defined in jprt.
-#    jprt supports property value expansion using ${property.name} syntax.
-
-# This tells jprt what default release we want to build
-
-jprt.hotspot.default.release=jdk8u20
-
-jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}}
-
-# Disable syncing the source after builds and tests are done.
-
-jprt.sync.push=false
-
-# Note: we want both embedded releases and regular releases to build and test
-#       all platforms so that regressions are not introduced (eg. change to
-#       common code by SE breaks PPC/ARM; change to common code by SE-E breaks
-#       sparc etc.
-
-# Define the Solaris platforms we want for the various releases
-jprt.my.solaris.sparcv9.jdk8u20=solaris_sparcv9_5.10
-jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
-jprt.my.solaris.sparcv9.jdk7u8=${jprt.my.solaris.sparcv9.jdk7}
-jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
-
-jprt.my.solaris.x64.jdk8u20=solaris_x64_5.10
-jprt.my.solaris.x64.jdk7=solaris_x64_5.10
-jprt.my.solaris.x64.jdk7u8=${jprt.my.solaris.x64.jdk7}
-jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
-
-jprt.my.linux.i586.jdk8u20=linux_i586_2.6
-jprt.my.linux.i586.jdk7=linux_i586_2.6
-jprt.my.linux.i586.jdk7u8=${jprt.my.linux.i586.jdk7}
-jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
-
-jprt.my.linux.x64.jdk8u20=linux_x64_2.6
-jprt.my.linux.x64.jdk7=linux_x64_2.6
-jprt.my.linux.x64.jdk7u8=${jprt.my.linux.x64.jdk7}
-jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
-
-jprt.my.linux.ppc.jdk8u20=linux_ppc_2.6
-jprt.my.linux.ppc.jdk7=linux_ppc_2.6
-jprt.my.linux.ppc.jdk7u8=${jprt.my.linux.ppc.jdk7}
-jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
-
-jprt.my.linux.ppcv2.jdk8u20=linux_ppcv2_2.6
-jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
-jprt.my.linux.ppcv2.jdk7u8=${jprt.my.linux.ppcv2.jdk7}
-jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
-
-jprt.my.linux.armvfpsflt.jdk8u20=linux_armvfpsflt_2.6
-jprt.my.linux.armvfpsflt=${jprt.my.linux.armvfpsflt.${jprt.tools.default.release}}
-
-jprt.my.linux.armvfphflt.jdk8u20=linux_armvfphflt_2.6
-jprt.my.linux.armvfphflt=${jprt.my.linux.armvfphflt.${jprt.tools.default.release}}
-
-# The ARM GP vfp-sflt build is not currently supported
-#jprt.my.linux.armvs.jdk8u20=linux_armvs_2.6
-#jprt.my.linux.armvs=${jprt.my.linux.armvs.${jprt.tools.default.release}}
-
-jprt.my.linux.armvh.jdk8u20=linux_armvh_2.6
-jprt.my.linux.armvh=${jprt.my.linux.armvh.${jprt.tools.default.release}}
-
-jprt.my.linux.armsflt.jdk8u20=linux_armsflt_2.6
-jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
-jprt.my.linux.armsflt.jdk7u8=${jprt.my.linux.armsflt.jdk7}
-jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
-
-jprt.my.macosx.x64.jdk8u20=macosx_x64_10.7
-jprt.my.macosx.x64.jdk7=macosx_x64_10.7
-jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
-jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
-
-jprt.my.windows.i586.jdk8u20=windows_i586_6.1
-jprt.my.windows.i586.jdk7=windows_i586_6.1
-jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
-jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
-
-jprt.my.windows.x64.jdk8u20=windows_x64_6.1
-jprt.my.windows.x64.jdk7=windows_x64_6.1
-jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
-jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
-
-# Standard list of jprt build targets for this source tree
-
-jprt.build.targets.standard= \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug|optimized}, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}, \
-    ${jprt.my.linux.i586}-{product|fastdebug}, \
-    ${jprt.my.linux.x64}-{product|fastdebug|optimized}, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}, \
-    ${jprt.my.windows.i586}-{product|fastdebug}, \
-    ${jprt.my.windows.x64}-{product|fastdebug|optimized}, \
-    ${jprt.my.linux.armvh}-{product|fastdebug}
-
-jprt.build.targets.open= \
-    ${jprt.my.solaris.x64}-{debugOpen}, \
-    ${jprt.my.linux.x64}-{productOpen}
-
-jprt.build.targets.embedded= \
-    ${jprt.my.linux.i586}-{productEmb|fastdebugEmb}, \
-    ${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \
-    ${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \
-    ${jprt.my.linux.armvfpsflt}-{productEmb|fastdebugEmb}, \
-    ${jprt.my.linux.armvfphflt}-{productEmb|fastdebugEmb}, \
-    ${jprt.my.linux.armsflt}-{productEmb|fastdebugEmb}
-
-jprt.build.targets.all=${jprt.build.targets.standard}, \
-    ${jprt.build.targets.embedded}, ${jprt.build.targets.open}
-
-jprt.build.targets.jdk8u20=${jprt.build.targets.all}
-jprt.build.targets.jdk7=${jprt.build.targets.all}
-jprt.build.targets.jdk7u8=${jprt.build.targets.all}
-jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
-
-# Subset lists of test targets for this source tree
-
-jprt.my.solaris.sparcv9.test.targets= \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark, \
-    ${jprt.my.solaris.sparcv9}-product-c2-runThese, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_SerialGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_CMS, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_G1, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_SerialGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParallelGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParNewGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_CMS, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_G1, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParOldGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_SerialGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParallelGC, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_CMS, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_G1, \
-    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParOldGC
-
-jprt.my.solaris.x64.test.targets= \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-scimark, \
-    ${jprt.my.solaris.x64}-product-c2-runThese, \
-    ${jprt.my.solaris.x64}-product-c2-runThese_Xcomp, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_G1, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_SerialGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
-    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC
-
-jprt.my.linux.i586.test.targets = \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-scimark, \
-    ${jprt.my.linux.i586}-product-c1-runThese_Xcomp, \
-    ${jprt.my.linux.i586}-fastdebug-c1-runThese_Xshare, \
-    ${jprt.my.linux.i586}-fastdebug-c2-runThese_Xcomp, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
-    ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_SerialGC, \
-    ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParallelGC, \
-    ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParNewGC, \
-    ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_CMS, \
-    ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_G1, \
-    ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParOldGC, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_SerialGC, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParallelGC, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_CMS, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_G1, \
-    ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParOldGC
-
-jprt.my.linux.x64.test.targets = \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-scimark, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_G1, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_CMS, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_G1, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \
-    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC
-
-jprt.my.macosx.x64.test.targets = \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-scimark, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_G1, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
-    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
-
-jprt.my.windows.i586.test.targets = \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-scimark, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-runThese, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-runThese_Xcomp, \
-    ${jprt.my.windows.i586}-fastdebug-c1-runThese_Xshare, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_SerialGC, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParallelGC, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParNewGC, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_CMS, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_G1, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParOldGC, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jbb_default, \
-    ${jprt.my.windows.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParallelGC, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-jbb_CMS, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-jbb_G1, \
-    ${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParOldGC
-
-jprt.my.windows.x64.test.targets = \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-scimark, \
-    ${jprt.my.windows.x64}-product-c2-runThese, \
-    ${jprt.my.windows.x64}-product-c2-runThese_Xcomp, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_G1, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_CMS, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_G1, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default, \
-    ${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
-    ${jprt.my.windows.x64}-product-c2-jbb_CMS, \
-    ${jprt.my.windows.x64}-product-c2-jbb_ParallelGC, \
-    ${jprt.my.windows.x64}-product-c2-jbb_G1, \
-    ${jprt.my.windows.x64}-product-c2-jbb_ParOldGC
-
-# Some basic "smoke" tests for OpenJDK builds
-jprt.test.targets.open = \
-    ${jprt.my.solaris.x64}-{productOpen|fastdebugOpen}-c2-jvm98, \
-    ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98
-
-# Testing for actual embedded builds is different to standard
-jprt.my.linux.i586.test.targets.embedded = \
-    linux_i586_2.6-product-c1-scimark
-
-# The complete list of test targets for jprt
-# Note: no PPC or ARM tests at this stage
-
-jprt.test.targets.standard = \
-  ${jprt.my.linux.i586.test.targets.embedded}, \
-  ${jprt.my.solaris.sparcv9.test.targets}, \
-  ${jprt.my.solaris.x64.test.targets}, \
-  ${jprt.my.linux.i586.test.targets}, \
-  ${jprt.my.linux.x64.test.targets}, \
-  ${jprt.my.macosx.x64.test.targets}, \
-  ${jprt.my.windows.i586.test.targets}, \
-  ${jprt.my.windows.x64.test.targets}, \
-  ${jprt.test.targets.open}
-
-jprt.test.targets.embedded= 		\
-  ${jprt.my.linux.i586.test.targets.embedded}, \
-  ${jprt.my.solaris.sparcv9.test.targets}, \
-  ${jprt.my.solaris.x64.test.targets}, \
-  ${jprt.my.linux.x64.test.targets}, \
-  ${jprt.my.windows.i586.test.targets}, \
-  ${jprt.my.windows.x64.test.targets}
-
-jprt.test.targets.jdk8u20=${jprt.test.targets.standard}
-jprt.test.targets.jdk7=${jprt.test.targets.standard}
-jprt.test.targets.jdk7u8=${jprt.test.targets.jdk7}
-jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
-
-# The default test/Makefile targets that should be run
-
-#jprt.make.rule.test.targets=*-product-*-packtest
-
-jprt.make.rule.test.targets.standard.client = \
-  ${jprt.my.linux.i586}-*-c1-clienttest, \
-  ${jprt.my.windows.i586}-*-c1-clienttest
-
-jprt.make.rule.test.targets.standard.server = \
-  ${jprt.my.solaris.sparcv9}-*-c2-servertest, \
-  ${jprt.my.solaris.x64}-*-c2-servertest, \
-  ${jprt.my.linux.i586}-*-c2-servertest, \
-  ${jprt.my.linux.x64}-*-c2-servertest, \
-  ${jprt.my.macosx.x64}-*-c2-servertest, \
-  ${jprt.my.windows.i586}-*-c2-servertest, \
-  ${jprt.my.windows.x64}-*-c2-servertest
-
-jprt.make.rule.test.targets.standard.internalvmtests = \
-  ${jprt.my.solaris.sparcv9}-fastdebug-c2-internalvmtests, \
-  ${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \
-  ${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \
-  ${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \
-  ${jprt.my.macosx.x64}-fastdebug-c2-internalvmtests, \
-  ${jprt.my.windows.i586}-fastdebug-c2-internalvmtests, \
-  ${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
-
-jprt.make.rule.test.targets.standard.wbapi = \
-  ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-wbapitest, \
-  ${jprt.my.solaris.x64}-{product|fastdebug}-c2-wbapitest, \
-  ${jprt.my.linux.i586}-{product|fastdebug}-c2-wbapitest, \
-  ${jprt.my.linux.x64}-{product|fastdebug}-c2-wbapitest, \
-  ${jprt.my.windows.i586}-{product|fastdebug}-c2-wbapitest, \
-  ${jprt.my.windows.x64}-{product|fastdebug}-c2-wbapitest, \
-  ${jprt.my.linux.i586}-{product|fastdebug}-c1-wbapitest, \
-  ${jprt.my.windows.i586}-{product|fastdebug}-c1-wbapitest
-
-jprt.make.rule.test.targets.standard = \
-  ${jprt.make.rule.test.targets.standard.client}, \
-  ${jprt.make.rule.test.targets.standard.server}, \
-  ${jprt.make.rule.test.targets.standard.internalvmtests}, \
-  ${jprt.make.rule.test.targets.standard.wbapi}
-
-jprt.make.rule.test.targets.embedded = \
-  ${jprt.make.rule.test.targets.standard.client}
-
-jprt.make.rule.test.targets.jdk8u20=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk7u8=${jprt.make.rule.test.targets.jdk7}
-jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
-
-# 7155453: Work-around to prevent popups on OSX from blocking test completion
-# but the work-around is added to all platforms to be consistent
-jprt.jbb.options=-Djava.awt.headless=true
--- a/make/linux/makefiles/adjust-mflags.sh	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/linux/makefiles/adjust-mflags.sh	Fri Apr 10 16:58:26 2015 -0700
@@ -64,7 +64,7 @@
 	echo "$MFLAGS" \
 	| sed '
 		s/^-/ -/
-		s/ -\([^ 	][^ 	]*\)j/ -\1 -j/
+		s/ -\([^ 	I][^ 	I]*\)j/ -\1 -j/
 		s/ -j[0-9][0-9]*/ -j/
 		s/ -j\([^ 	]\)/ -j -\1/
 		s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/
--- a/make/linux/makefiles/mapfile-vers-debug	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/linux/makefiles/mapfile-vers-debug	Fri Apr 10 16:58:26 2015 -0700
@@ -217,6 +217,9 @@
                 JVM_RegisterSignal;
                 JVM_ReleaseUTF;
                 JVM_ResolveClass;
+                JVM_KnownToNotExist;
+                JVM_GetResourceLookupCacheURLs;
+                JVM_GetResourceLookupCache;
                 JVM_ResumeThread;
                 JVM_Send;
                 JVM_SendTo;
--- a/make/linux/makefiles/mapfile-vers-product	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/linux/makefiles/mapfile-vers-product	Fri Apr 10 16:58:26 2015 -0700
@@ -217,6 +217,9 @@
                 JVM_RegisterSignal;
                 JVM_ReleaseUTF;
                 JVM_ResolveClass;
+                JVM_KnownToNotExist;
+                JVM_GetResourceLookupCacheURLs;
+                JVM_GetResourceLookupCache;
                 JVM_ResumeThread;
                 JVM_Send;
                 JVM_SendTo;
--- a/make/linux/makefiles/vm.make	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/linux/makefiles/vm.make	Fri Apr 10 16:58:26 2015 -0700
@@ -249,10 +249,10 @@
 
 vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
 
-mapfile : $(MAPFILE) vm.def
+mapfile : $(MAPFILE) vm.def mapfile_ext
 	rm -f $@
 	awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE")	\
-                 { system ("cat vm.def"); }		\
+                 { system ("cat mapfile_ext"); system ("cat vm.def"); } \
                else					\
                  { print $$0 }				\
              }' > $@ < $(MAPFILE)
@@ -264,6 +264,13 @@
 vm.def: $(Res_Files) $(Obj_Files)
 	sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
 
+mapfile_ext:
+	rm -f $@
+	touch $@
+	if [ -f $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext ]; then \
+	  cat $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext > $@; \
+	fi
+
 ifeq ($(JVM_VARIANT_ZEROSHARK), true)
   STATIC_CXX = false
 else
--- a/make/solaris/Makefile	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/solaris/Makefile	Fri Apr 10 16:58:26 2015 -0700
@@ -161,6 +161,7 @@
 BUILDTREE_VARS    = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) ARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH)
 BUILDTREE_VARS   += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
 BUILDTREE_VARS   += ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS) OBJCOPY=$(OBJCOPY) STRIP_POLICY=$(STRIP_POLICY) ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES) ZIPEXE=$(ZIPEXE)
+BUILDTREE_VARS   += HS_ALT_MAKE=$(HS_ALT_MAKE)
 
 BUILDTREE         = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
 
--- a/make/solaris/makefiles/add_gnu_debuglink.make	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-#
-# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#  
-#
-
-# Rules to build add_gnu_debuglink, used by vm.make on Solaris
-
-# Allow $(ADD_GNU_DEBUGLINK) to be called from any directory.
-# We don't set or use the GENERATED macro to avoid affecting
-# other HotSpot Makefiles.
-TOPDIR                    = $(shell echo `pwd`)
-ADD_GNU_DEBUGLINK         = $(TOPDIR)/../generated/add_gnu_debuglink
-
-ADD_GNU_DEBUGLINK_DIR     = $(GAMMADIR)/src/os/solaris/add_gnu_debuglink
-ADD_GNU_DEBUGLINK_SRC     = $(ADD_GNU_DEBUGLINK_DIR)/add_gnu_debuglink.c
-ADD_GNU_DEBUGLINK_FLAGS   = 
-LIBS_ADD_GNU_DEBUGLINK   += -lelf
-
-ifeq ("${Platform_compiler}", "sparcWorks")
-# Enable the following ADD_GNU_DEBUGLINK_FLAGS addition if you need to
-# compare the built ELF objects.
-#
-# The -g option makes static data global and the "-W0,-noglobal"
-# option tells the compiler to not globalize static data using a unique
-# globalization prefix. Instead force the use of a static globalization
-# prefix based on the source filepath so the objects from two identical
-# compilations are the same.
-#
-# Note: The blog says to use "-W0,-xglobalstatic", but that doesn't
-#       seem to work. I got "-W0,-noglobal" from Kelly and that works.
-#ADD_GNU_DEBUGLINK_FLAGS += -W0,-noglobal
-endif # Platform_compiler == sparcWorks
-
-$(ADD_GNU_DEBUGLINK): $(ADD_GNU_DEBUGLINK_SRC)
-	$(CC) -g -o $@ $< $(ADD_GNU_DEBUGLINK_FLAGS) $(LIBS_ADD_GNU_DEBUGLINK)
--- a/make/solaris/makefiles/adjust-mflags.sh	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/solaris/makefiles/adjust-mflags.sh	Fri Apr 10 16:58:26 2015 -0700
@@ -64,7 +64,7 @@
 	echo "$MFLAGS" \
 	| sed '
 		s/^-/ -/
-		s/ -\([^ 	][^ 	]*\)j/ -\1 -j/
+		s/ -\([^ 	I][^ 	I]*\)j/ -\1 -j/
 		s/ -j[0-9][0-9]*/ -j/
 		s/ -j\([^ 	]\)/ -j -\1/
 		s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/
--- a/make/solaris/makefiles/buildtree.make	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/solaris/makefiles/buildtree.make	Fri Apr 10 16:58:26 2015 -0700
@@ -257,6 +257,8 @@
 	    echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
 	[ -n "$(ZIPEXE)" ] && \
 	    echo && echo "ZIPEXE = $(ZIPEXE)"; \
+	[ -n "$(HS_ALT_MAKE)" ] && \
+	    echo && echo "HS_ALT_MAKE = $(HS_ALT_MAKE)"; \
 	[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
 	    echo && \
 	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
--- a/make/solaris/makefiles/defs.make	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/solaris/makefiles/defs.make	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -133,6 +133,55 @@
         OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
       endif
 
+      ifneq ($(OBJCOPY),)
+        # OBJCOPY version check:
+        # - version number is last blank separate word on first line
+        # - version number formats that have been seen:
+        #   - <major>.<minor>
+        #   - <major>.<minor>.<micro>
+        #
+        # Full Debug Symbols on Solaris needs version 2.21.1 or newer.
+        #
+        OBJCOPY_VERS_CHK := $(shell \
+          $(OBJCOPY) --version \
+            | sed -n \
+                  -e 's/.* //' \
+                  -e '/^[01]\./b bad' \
+                  -e '/^2\./{' \
+                  -e '  s/^2\.//' \
+                  -e '  /^[0-9]$$/b bad' \
+                  -e '  /^[0-9]\./b bad' \
+                  -e '  /^1[0-9]$$/b bad' \
+                  -e '  /^1[0-9]\./b bad' \
+                  -e '  /^20\./b bad' \
+                  -e '  /^21\.0$$/b bad' \
+                  -e '  /^21\.0\./b bad' \
+                  -e '}' \
+                  -e ':good' \
+                  -e 's/.*/VALID_VERSION/p' \
+                  -e 'q' \
+                  -e ':bad' \
+                  -e 's/.*/BAD_VERSION/p' \
+                  -e 'q' \
+          )
+        ifeq ($(OBJCOPY_VERS_CHK),BAD_VERSION)
+          _JUNK_ := $(shell \
+            echo >&2 "WARNING: $(OBJCOPY) --version info:"; \
+            $(OBJCOPY) --version | sed -n -e 's/^/WARNING: /p' -e 'q' >&2; \
+            echo >&2 "WARNING: an objcopy version of 2.21.1 or newer" \
+              "is needed to create valid .debuginfo files."; \
+            echo >&2 "WARNING: ignoring above objcopy command."; \
+            echo >&2 "WARNING: patch 149063-01 or newer contains the" \
+              "correct Solaris 10 SPARC version."; \
+            echo >&2 "WARNING: patch 149064-01 or newer contains the" \
+              "correct Solaris 10 X86 version."; \
+            echo >&2 "WARNING: Solaris 11 Update 1 contains the" \
+              "correct version."; \
+            )
+          OBJCOPY=
+        endif
+      endif
+
       ifeq ($(OBJCOPY),)
         _JUNK_ := $(shell \
           echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo files.")
--- a/make/solaris/makefiles/dtrace.make	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/solaris/makefiles/dtrace.make	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -96,25 +96,16 @@
 XLIBJVM_DTRACE_DEBUGINFO   = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DEBUGINFO)
 XLIBJVM_DTRACE_DIZ         = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DIZ)
 
-$(XLIBJVM_DB): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
+$(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
 	$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
-# Clear the SHF_ALLOC flag (if set) from empty section headers.
-# An empty section header has sh_addr == 0 and sh_size == 0.
-# This problem has only been seen on Solaris X64, but we call this tool
-# on all Solaris builds just in case.
-	$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DB_DEBUGINFO)
-# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
-# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available.
-#         $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) ;
 # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
 # in the link name:
-	( cd $(XLIBJVM_DIR) && $(ADD_GNU_DEBUGLINK) $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) )
+	( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) )
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
@@ -131,20 +122,16 @@
   endif
 endif
 
-$(XLIBJVM_DTRACE): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
+$(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
 	$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-# Clear the SHF_ALLOC flag (if set) from empty section headers.
-	$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO)
-# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
-#         $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) ;
 # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
 # in the link name:
-	( cd $(XLIBJVM_DIR) && $(ADD_GNU_DEBUGLINK) $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) )
+	( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) )
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
@@ -201,17 +188,13 @@
 $(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp 
 	$(QUIETLY) $(CXX) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp
 
-$(LIBJVM_DB): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_DB_MAPFILE)
+$(LIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_DB_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-# Clear the SHF_ALLOC flag (if set) from empty section headers.
-	$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DB_DEBUGINFO)
-# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
-#	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@
-	$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DB_DEBUGINFO) $@
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
@@ -226,17 +209,13 @@
   endif
 endif
 
-$(LIBJVM_DTRACE): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
+$(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
 	@echo Making $@
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I.  \
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-# Clear the SHF_ALLOC flag (if set) from empty section headers.
-	$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO)
-# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
-#	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@
-	$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DTRACE_DEBUGINFO) $@
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
--- a/make/solaris/makefiles/fix_empty_sec_hdr_flags.make	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-#
-# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#  
-#
-
-# Rules to build fix_empty_sec_hdr_flags, used by vm.make on Solaris
-
-# Allow $(FIX_EMPTY_SEC_HDR_FLAGS) to be called from any directory.
-# We don't set or use the GENERATED macro to avoid affecting
-# other HotSpot Makefiles.
-TOPDIR                          = $(shell echo `pwd`)
-FIX_EMPTY_SEC_HDR_FLAGS         = $(TOPDIR)/../generated/fix_empty_sec_hdr_flags
-
-FIX_EMPTY_SEC_HDR_FLAGS_DIR     = $(GAMMADIR)/src/os/solaris/fix_empty_sec_hdr_flags
-FIX_EMPTY_SEC_HDR_FLAGS_SRC     = $(FIX_EMPTY_SEC_HDR_FLAGS_DIR)/fix_empty_sec_hdr_flags.c
-FIX_EMPTY_SEC_HDR_FLAGS_FLAGS   = 
-LIBS_FIX_EMPTY_SEC_HDR_FLAGS   += -lelf
-
-ifeq ("${Platform_compiler}", "sparcWorks")
-# Enable the following FIX_EMPTY_SEC_HDR_FLAGS_FLAGS addition if you need to
-# compare the built ELF objects.
-#
-# The -g option makes static data global and the "-W0,-noglobal"
-# option tells the compiler to not globalize static data using a unique
-# globalization prefix. Instead force the use of a static globalization
-# prefix based on the source filepath so the objects from two identical
-# compilations are the same.
-#
-# Note: The blog says to use "-W0,-xglobalstatic", but that doesn't
-#       seem to work. I got "-W0,-noglobal" from Kelly and that works.
-#FIX_EMPTY_SEC_HDR_FLAGS_FLAGS += -W0,-noglobal
-endif # Platform_compiler == sparcWorks
-
-$(FIX_EMPTY_SEC_HDR_FLAGS): $(FIX_EMPTY_SEC_HDR_FLAGS_SRC)
-	$(CC) -g -o $@ $< $(FIX_EMPTY_SEC_HDR_FLAGS_FLAGS) $(LIBS_FIX_EMPTY_SEC_HDR_FLAGS)
--- a/make/solaris/makefiles/jsig.make	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/solaris/makefiles/jsig.make	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -47,22 +47,13 @@
 LFLAGS_JSIG += -mt -xnolib
 endif
 
-$(LIBJSIG): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
+$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
 	@echo Making signal interposition lib...
 	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
                          $(LFLAGS_JSIG) -o $@ $(JSIGSRCDIR)/jsig.c -ldl
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
-# Clear the SHF_ALLOC flag (if set) from empty section headers.
-# An empty section header has sh_addr == 0 and sh_size == 0.
-# This problem has only been seen on Solaris X64, but we call this tool
-# on all Solaris builds just in case.
-	$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
-# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
-# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available.
-#	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
-	$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJSIG_DEBUGINFO) $@
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
--- a/make/solaris/makefiles/mapfile-vers	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/solaris/makefiles/mapfile-vers	Fri Apr 10 16:58:26 2015 -0700
@@ -189,6 +189,9 @@
                 JVM_IsSupportedJNIVersion;
                 JVM_IsThreadAlive;
                 JVM_IsVMGeneratedMethodIx;
+                JVM_KnownToNotExist;
+                JVM_GetResourceLookupCacheURLs;
+                JVM_GetResourceLookupCache;
                 JVM_LatestUserDefinedLoader;
                 JVM_Listen;
                 JVM_LoadClass0;
--- a/make/solaris/makefiles/saproc.make	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/solaris/makefiles/saproc.make	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -90,7 +90,7 @@
 #SOLARIS_11_B159_OR_LATER=-DSOLARIS_11_B159_OR_LATER
 
 
-$(LIBSAPROC): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(SASRCFILES) $(SADISOBJ) $(SAMAPFILE)
+$(LIBSAPROC): $(SASRCFILES) $(SADISOBJ) $(SAMAPFILE)
 	$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
 	  echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
 	  exit 1; \
@@ -121,17 +121,8 @@
 	           -c -o $(SADISOBJ)
 	
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
-# Clear the SHF_ALLOC flag (if set) from empty section headers.
-# An empty section header has sh_addr == 0 and sh_size == 0.
-# This problem has only been seen on Solaris X64, but we call this tool
-# on all Solaris builds just in case.
-	$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
-# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
-# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available.
-#	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
-	$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBSAPROC_DEBUGINFO) $@
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
--- a/make/solaris/makefiles/vm.make	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/solaris/makefiles/vm.make	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -157,14 +157,6 @@
 include $(MAKEFILES_DIR)/dtrace.make
 
 #----------------------------------------------------------------------
-# add_gnu_debuglink tool
-include $(MAKEFILES_DIR)/add_gnu_debuglink.make
-
-#----------------------------------------------------------------------
-# fix_empty_sec_hdr_flags tool
-include $(MAKEFILES_DIR)/fix_empty_sec_hdr_flags.make
-
-#----------------------------------------------------------------------
 # JVM
 
 JVM      = jvm
@@ -263,11 +255,12 @@
 
 vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
 
-mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def
+mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def mapfile_ext
 	rm -f $@
 	cat $(MAPFILE) $(MAPFILE_DTRACE_OPT) \
 	    | $(NAWK) '{                                         \
 	              if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") {  \
+	                  system ("cat mapfile_ext");            \
 	                  system ("cat vm.def");                 \
 	              } else {                                   \
 	                  print $$0;                             \
@@ -281,6 +274,13 @@
 vm.def: $(Obj_Files)
 	sh $(GAMMADIR)/make/solaris/makefiles/build_vm_def.sh *.o > $@
 
+mapfile_ext:
+	rm -f $@
+	touch $@
+	if [ -f $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext ]; then \
+	  cat $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext > $@; \
+	fi
+
 ifeq ($(LINK_INTO),AOUT)
   LIBJVM.o                 =
   LIBJVM_MAPFILE           =
@@ -307,7 +307,7 @@
 LINK_VM = $(LINK_LIB.CXX)
 endif
 # making the library:
-$(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE)
+$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE)
 ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)
 	@echo Linking vm...
 	$(QUIETLY) $(LINK_LIB.CXX/PRE_HOOK)
@@ -315,17 +315,8 @@
 	$(QUIETLY) $(LINK_LIB.CXX/POST_HOOK)
 	$(QUIETLY) rm -f $@.1 && ln -s $@ $@.1
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
-# Clear the SHF_ALLOC flag (if set) from empty section headers.
-# An empty section header has sh_addr == 0 and sh_size == 0.
-# This problem has only been seen on Solaris X64, but we call this tool
-# on all Solaris builds just in case.
-	$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
-# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
-# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available.
-#	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
-	$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DEBUGINFO) $@
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
--- a/make/windows/makefiles/compile.make	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/windows/makefiles/compile.make	Fri Apr 10 16:58:26 2015 -0700
@@ -268,7 +268,7 @@
 !endif
 LD_FLAGS= $(LD_FLAGS) kernel32.lib user32.lib gdi32.lib winspool.lib \
  comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib \
- uuid.lib Wsock32.lib winmm.lib /nologo /machine:$(MACHINE) /opt:REF \
+ uuid.lib Wsock32.lib winmm.lib version.lib /nologo /machine:$(MACHINE) /opt:REF \
  /opt:ICF,8
 !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
 LD_FLAGS= $(LD_FLAGS) /map /debug
--- a/make/windows/makefiles/sa.make	Fri Apr 10 16:55:38 2015 -0700
+++ b/make/windows/makefiles/sa.make	Fri Apr 10 16:58:26 2015 -0700
@@ -111,7 +111,7 @@
 SA_LFLAGS = $(SA_LFLAGS) -map -debug
 !endif
 !if "$(BUILDARCH)" == "i486"
-SA_LFLAGS = $(SAFESEH_FLAG) $(SA_LFLAGS)
+SA_LFLAGS = /SAFESEH $(SA_LFLAGS)
 !endif
 
 SA_CFLAGS = $(SA_CFLAGS) $(MP_FLAG)
--- a/mx/mx_graal.py	Fri Apr 10 16:55:38 2015 -0700
+++ b/mx/mx_graal.py	Fri Apr 10 16:58:26 2015 -0700
@@ -84,7 +84,7 @@
 _minVersion = mx.VersionSpec('1.8')
 
 # max version (first _unsupported_ version)
-_untilVersion = mx.VersionSpec('1.8.0_40')
+_untilVersion = None
 
 class JDKDeployedDist:
     def __init__(self, name, isExtension):
@@ -2531,7 +2531,7 @@
     # TODO _minVersion check could probably be part of a Suite in mx?
     if mx.java().version < _minVersion:
         mx.abort('Requires Java version ' + str(_minVersion) + ' or greater for JAVA_HOME, got version ' + str(mx.java().version))
-    if mx.java().version >= _untilVersion:
+    if _untilVersion and mx.java().version >= _untilVersion:
         mx.abort('Requires Java version strictly before ' + str(_untilVersion) + ' for JAVA_HOME, got version ' + str(mx.java().version))
 
     if _vmSourcesAvailable:
--- a/mx/suite.py	Fri Apr 10 16:55:38 2015 -0700
+++ b/mx/suite.py	Fri Apr 10 16:58:26 2015 -0700
@@ -329,7 +329,6 @@
         "com.oracle.graal.runtime",
         "com.oracle.graal.printer",
         "com.oracle.graal.hotspotvmconfig",
-        "com.oracle.nfi",
       ],
       "checkstyle" : "com.oracle.graal.graph",
       "annotationProcessors" : [
@@ -389,7 +388,10 @@
     "com.oracle.graal.hotspot.sparc" : {
       "subDir" : "graal",
       "sourceDirs" : ["src"],
-      "dependencies" : ["com.oracle.graal.compiler.sparc"],
+      "dependencies" : [
+        "com.oracle.graal.compiler.sparc",
+        "com.oracle.graal.replacements.sparc",
+      ],
       "checkstyle" : "com.oracle.graal.graph",
       "annotationProcessors" : ["com.oracle.graal.service.processor"],
       "javaCompliance" : "1.8",
@@ -594,7 +596,7 @@
       "sourceDirs" : ["src"],
       "dependencies" : [
           "com.oracle.graal.replacements",
-          "com.oracle.graal.amd64",
+          "com.oracle.graal.lir.amd64",
           ],
       "checkstyle" : "com.oracle.graal.graph",
       "javaCompliance" : "1.8",
@@ -602,6 +604,17 @@
       "workingSets" : "Graal,Replacements,AMD64",
     },
 
+    "com.oracle.graal.replacements.sparc" : {
+      "subDir" : "graal",
+      "sourceDirs" : ["src"],
+      "dependencies" : [
+          "com.oracle.graal.replacements",
+          ],
+      "checkstyle" : "com.oracle.graal.graph",
+      "javaCompliance" : "1.8",
+      "workingSets" : "Graal,Replacements,SPARC",
+    },
+
     "com.oracle.graal.replacements.test" : {
       "subDir" : "graal",
       "sourceDirs" : ["src"],
@@ -1050,6 +1063,7 @@
         "com.oracle.truffle.api",
         "com.oracle.graal.runtime",
         "com.oracle.graal.printer",
+        "com.oracle.graal.replacements",
       ],
       "checkstyle" : "com.oracle.graal.graph",
       "javaCompliance" : "1.8",
@@ -1078,6 +1092,7 @@
       "dependencies" : [
         "com.oracle.graal.truffle",
         "com.oracle.graal.hotspot",
+        "com.oracle.nfi",
       ],
       "checkstyle" : "com.oracle.graal.graph",
       "javaCompliance" : "1.8",
@@ -1090,7 +1105,7 @@
       "sourceDirs" : ["src"],
       "dependencies" : [
         "com.oracle.graal.truffle.hotspot",
-        "com.oracle.graal.asm.amd64",
+        "com.oracle.graal.hotspot.amd64",
       ],
       "checkstyle" : "com.oracle.graal.graph",
       "javaCompliance" : "1.8",
--- a/mxtool/mx.py	Fri Apr 10 16:55:38 2015 -0700
+++ b/mxtool/mx.py	Fri Apr 10 16:58:26 2015 -0700
@@ -1674,18 +1674,23 @@
     candidateJdks = []
     if get_os() == 'darwin':
         base = '/Library/Java/JavaVirtualMachines'
-        candidateJdks = [join(base, n, 'Contents/Home') for n in os.listdir(base) if exists(join(base, n, 'Contents/Home'))]
+        if exists(base):
+            candidateJdks = [join(base, n, 'Contents/Home') for n in os.listdir(base) if exists(join(base, n, 'Contents/Home'))]
     elif get_os() == 'linux':
         base = '/usr/lib/jvm'
-        candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, 'jre/lib/rt.jar'))]
+        if exists(base):
+            candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, 'jre/lib/rt.jar'))]
         base = '/usr/java'
-        candidateJdks += [join(base, n) for n in os.listdir(base) if exists(join(base, n, 'jre/lib/rt.jar'))]
+        if exists(base):
+            candidateJdks += [join(base, n) for n in os.listdir(base) if exists(join(base, n, 'jre/lib/rt.jar'))]
     elif get_os() == 'solaris':
         base = '/usr/jdk/instances'
-        candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, 'jre/lib/rt.jar'))]
+        if exists(base):
+            candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, 'jre/lib/rt.jar'))]
     elif get_os() == 'windows':
         base = r'C:\Program Files\Java'
-        candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, r'jre\lib\rt.jar'))]
+        if exists(base):
+            candidateJdks = [join(base, n) for n in os.listdir(base) if exists(join(base, n, r'jre\lib\rt.jar'))]
 
     javaHome = None
     if len(candidateJdks) != 0:
--- a/src/cpu/ppc/vm/assembler_ppc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/assembler_ppc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -693,7 +693,7 @@
   // PPC 1, section 4.6.7 Floating-Point Compare Instructions
   fcmpu( CCR7, F24, F25);
 
-  tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", code()->insts_begin(), code()->insts_end());
+  tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", p2i(code()->insts_begin()), p2i(code()->insts_end()));
   code()->decode();
 }
 
--- a/src/cpu/ppc/vm/assembler_ppc.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/assembler_ppc.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -268,8 +268,35 @@
 
     ISEL_OPCODE   = (31u << OPCODE_SHIFT |  15u << 1),
 
-    MTLR_OPCODE   = (31u << OPCODE_SHIFT | 467u << 1 | 8 << SPR_0_4_SHIFT),
-    MFLR_OPCODE   = (31u << OPCODE_SHIFT | 339u << 1 | 8 << SPR_0_4_SHIFT),
+    // Special purpose registers
+    MTSPR_OPCODE  = (31u << OPCODE_SHIFT | 467u << 1),
+    MFSPR_OPCODE  = (31u << OPCODE_SHIFT | 339u << 1),
+
+    MTXER_OPCODE  = (MTSPR_OPCODE | 1 << SPR_0_4_SHIFT),
+    MFXER_OPCODE  = (MFSPR_OPCODE | 1 << SPR_0_4_SHIFT),
+
+    MTDSCR_OPCODE = (MTSPR_OPCODE | 3 << SPR_0_4_SHIFT),
+    MFDSCR_OPCODE = (MFSPR_OPCODE | 3 << SPR_0_4_SHIFT),
+
+    MTLR_OPCODE   = (MTSPR_OPCODE | 8 << SPR_0_4_SHIFT),
+    MFLR_OPCODE   = (MFSPR_OPCODE | 8 << SPR_0_4_SHIFT),
+
+    MTCTR_OPCODE  = (MTSPR_OPCODE | 9 << SPR_0_4_SHIFT),
+    MFCTR_OPCODE  = (MFSPR_OPCODE | 9 << SPR_0_4_SHIFT),
+
+    MTTFHAR_OPCODE   = (MTSPR_OPCODE | 128 << SPR_0_4_SHIFT),
+    MFTFHAR_OPCODE   = (MFSPR_OPCODE | 128 << SPR_0_4_SHIFT),
+    MTTFIAR_OPCODE   = (MTSPR_OPCODE | 129 << SPR_0_4_SHIFT),
+    MFTFIAR_OPCODE   = (MFSPR_OPCODE | 129 << SPR_0_4_SHIFT),
+    MTTEXASR_OPCODE  = (MTSPR_OPCODE | 130 << SPR_0_4_SHIFT),
+    MFTEXASR_OPCODE  = (MFSPR_OPCODE | 130 << SPR_0_4_SHIFT),
+    MTTEXASRU_OPCODE = (MTSPR_OPCODE | 131 << SPR_0_4_SHIFT),
+    MFTEXASRU_OPCODE = (MFSPR_OPCODE | 131 << SPR_0_4_SHIFT),
+
+    MTVRSAVE_OPCODE  = (MTSPR_OPCODE | 256 << SPR_0_4_SHIFT),
+    MFVRSAVE_OPCODE  = (MFSPR_OPCODE | 256 << SPR_0_4_SHIFT),
+
+    MFTB_OPCODE   = (MFSPR_OPCODE | 268 << SPR_0_4_SHIFT),
 
     MTCRF_OPCODE  = (31u << OPCODE_SHIFT | 144u << 1),
     MFCR_OPCODE   = (31u << OPCODE_SHIFT | 19u << 1),
@@ -291,13 +318,11 @@
 
     // CTR-related opcodes
     BCCTR_OPCODE  = (19u << OPCODE_SHIFT | 528u << 1),
-    MTCTR_OPCODE  = (31u << OPCODE_SHIFT | 467u << 1 | 9 << SPR_0_4_SHIFT),
-    MFCTR_OPCODE  = (31u << OPCODE_SHIFT | 339u << 1 | 9 << SPR_0_4_SHIFT),
-
 
     LWZ_OPCODE   = (32u << OPCODE_SHIFT),
     LWZX_OPCODE  = (31u << OPCODE_SHIFT |  23u << 1),
     LWZU_OPCODE  = (33u << OPCODE_SHIFT),
+    LWBRX_OPCODE = (31u << OPCODE_SHIFT |  534 << 1),
 
     LHA_OPCODE   = (42u << OPCODE_SHIFT),
     LHAX_OPCODE  = (31u << OPCODE_SHIFT | 343u << 1),
@@ -306,6 +331,7 @@
     LHZ_OPCODE   = (40u << OPCODE_SHIFT),
     LHZX_OPCODE  = (31u << OPCODE_SHIFT | 279u << 1),
     LHZU_OPCODE  = (41u << OPCODE_SHIFT),
+    LHBRX_OPCODE = (31u << OPCODE_SHIFT |  790 << 1),
 
     LBZ_OPCODE   = (34u << OPCODE_SHIFT),
     LBZX_OPCODE  = (31u << OPCODE_SHIFT |  87u << 1),
@@ -583,6 +609,37 @@
     MTVSCR_OPCODE  = (4u  << OPCODE_SHIFT | 1604u     ),
     MFVSCR_OPCODE  = (4u  << OPCODE_SHIFT | 1540u     ),
 
+    // AES (introduced with Power 8)
+    VCIPHER_OPCODE      = (4u  << OPCODE_SHIFT | 1288u),
+    VCIPHERLAST_OPCODE  = (4u  << OPCODE_SHIFT | 1289u),
+    VNCIPHER_OPCODE     = (4u  << OPCODE_SHIFT | 1352u),
+    VNCIPHERLAST_OPCODE = (4u  << OPCODE_SHIFT | 1353u),
+    VSBOX_OPCODE        = (4u  << OPCODE_SHIFT | 1480u),
+
+    // SHA (introduced with Power 8)
+    VSHASIGMAD_OPCODE   = (4u  << OPCODE_SHIFT | 1730u),
+    VSHASIGMAW_OPCODE   = (4u  << OPCODE_SHIFT | 1666u),
+
+    // Vector Binary Polynomial Multiplication (introduced with Power 8)
+    VPMSUMB_OPCODE      = (4u  << OPCODE_SHIFT | 1032u),
+    VPMSUMD_OPCODE      = (4u  << OPCODE_SHIFT | 1224u),
+    VPMSUMH_OPCODE      = (4u  << OPCODE_SHIFT | 1096u),
+    VPMSUMW_OPCODE      = (4u  << OPCODE_SHIFT | 1160u),
+
+    // Vector Permute and Xor (introduced with Power 8)
+    VPERMXOR_OPCODE     = (4u  << OPCODE_SHIFT |   45u),
+
+    // Transactional Memory instructions (introduced with Power 8)
+    TBEGIN_OPCODE    = (31u << OPCODE_SHIFT |  654u << 1),
+    TEND_OPCODE      = (31u << OPCODE_SHIFT |  686u << 1),
+    TABORT_OPCODE    = (31u << OPCODE_SHIFT |  910u << 1),
+    TABORTWC_OPCODE  = (31u << OPCODE_SHIFT |  782u << 1),
+    TABORTWCI_OPCODE = (31u << OPCODE_SHIFT |  846u << 1),
+    TABORTDC_OPCODE  = (31u << OPCODE_SHIFT |  814u << 1),
+    TABORTDCI_OPCODE = (31u << OPCODE_SHIFT |  878u << 1),
+    TSR_OPCODE       = (31u << OPCODE_SHIFT |  750u << 1),
+    TCHECK_OPCODE    = (31u << OPCODE_SHIFT |  718u << 1),
+
     // Icache and dcache related instructions
     DCBA_OPCODE    = (31u << OPCODE_SHIFT |  758u << 1),
     DCBZ_OPCODE    = (31u << OPCODE_SHIFT | 1014u << 1),
@@ -1364,11 +1421,17 @@
   inline void lwax( Register d, Register s1, Register s2);
   inline void lwa(  Register d, int si16,    Register s1);
 
+  // 4 bytes reversed
+  inline void lwbrx( Register d, Register s1, Register s2);
+
   // 2 bytes
   inline void lhzx( Register d, Register s1, Register s2);
   inline void lhz(  Register d, int si16,    Register s1);
   inline void lhzu( Register d, int si16,    Register s1);
 
+  // 2 bytes reversed
+  inline void lhbrx( Register d, Register s1, Register s2);
+
   // 2 bytes
   inline void lhax( Register d, Register s1, Register s2);
   inline void lha(  Register d, int si16,    Register s1);
@@ -1412,6 +1475,25 @@
   inline void mcrf( ConditionRegister crd, ConditionRegister cra);
   inline void mtcr( Register s);
 
+  // Special purpose registers
+  // Exception Register
+  inline void mtxer(Register s1);
+  inline void mfxer(Register d);
+  // Vector Register Save Register
+  inline void mtvrsave(Register s1);
+  inline void mfvrsave(Register d);
+  // Timebase
+  inline void mftb(Register d);
+  // Introduced with Power 8:
+  // Data Stream Control Register
+  inline void mtdscr(Register s1);
+  inline void mfdscr(Register d );
+  // Transactional Memory Registers
+  inline void mftfhar(Register d);
+  inline void mftfiar(Register d);
+  inline void mftexasr(Register d);
+  inline void mftexasru(Register d);
+
   // PPC 1, section 2.4.1 Branch Instructions
   inline void b(  address a, relocInfo::relocType rt = relocInfo::none);
   inline void b(  Label& L);
@@ -1852,16 +1934,51 @@
   inline void mtvscr(   VectorRegister b);
   inline void mfvscr(   VectorRegister d);
 
+  // AES (introduced with Power 8)
+  inline void vcipher(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcipherlast( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vncipher(    VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vncipherlast(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsbox(       VectorRegister d, VectorRegister a);
+
+  // SHA (introduced with Power 8)
+  // Not yet implemented.
+
+  // Vector Binary Polynomial Multiplication (introduced with Power 8)
+  inline void vpmsumb(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpmsumd(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpmsumh(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpmsumw(  VectorRegister d, VectorRegister a, VectorRegister b);
+
+  // Vector Permute and Xor (introduced with Power 8)
+  inline void vpermxor( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+
+  // Transactional Memory instructions (introduced with Power 8)
+  inline void tbegin_();    // R=0
+  inline void tbeginrot_(); // R=1 Rollback-Only Transaction
+  inline void tend_();    // A=0
+  inline void tendall_(); // A=1
+  inline void tabort_(Register a);
+  inline void tabortwc_(int t, Register a, Register b);
+  inline void tabortwci_(int t, Register a, int si);
+  inline void tabortdc_(int t, Register a, Register b);
+  inline void tabortdci_(int t, Register a, int si);
+  inline void tsuspend_(); // tsr with L=0
+  inline void tresume_();  // tsr with L=1
+  inline void tcheck(int f);
+
   // The following encoders use r0 as second operand. These instructions
   // read r0 as '0'.
   inline void lwzx( Register d, Register s2);
   inline void lwz(  Register d, int si16);
   inline void lwax( Register d, Register s2);
   inline void lwa(  Register d, int si16);
+  inline void lwbrx(Register d, Register s2);
   inline void lhzx( Register d, Register s2);
   inline void lhz(  Register d, int si16);
   inline void lhax( Register d, Register s2);
   inline void lha(  Register d, int si16);
+  inline void lhbrx(Register d, Register s2);
   inline void lbzx( Register d, Register s2);
   inline void lbz(  Register d, int si16);
   inline void ldx(  Register d, Register s2);
--- a/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -263,10 +263,14 @@
 inline void Assembler::lwax( Register d, Register s1, Register s2) { emit_int32(LWAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
 inline void Assembler::lwa(  Register d, int si16,    Register s1) { emit_int32(LWA_OPCODE  | rt(d) | ds(si16)   | ra0mem(s1));}
 
+inline void Assembler::lwbrx( Register d, Register s1, Register s2) { emit_int32(LWBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+
 inline void Assembler::lhzx( Register d, Register s1, Register s2) { emit_int32(LHZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
 inline void Assembler::lhz(  Register d, int si16,    Register s1) { emit_int32(LHZ_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
 inline void Assembler::lhzu( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
 
+inline void Assembler::lhbrx( Register d, Register s1, Register s2) { emit_int32(LHBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+
 inline void Assembler::lhax( Register d, Register s1, Register s2) { emit_int32(LHAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
 inline void Assembler::lha(  Register d, int si16,    Register s1) { emit_int32(LHA_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
 inline void Assembler::lhau( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHAU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
@@ -308,6 +312,25 @@
                                                       { emit_int32(MCRF_OPCODE | bf(crd) | bfa(cra)); }
 inline void Assembler::mtcr( Register s)          { Assembler::mtcrf(0xff, s); }
 
+// Special purpose registers
+// Exception Register
+inline void Assembler::mtxer(Register s1)         { emit_int32(MTXER_OPCODE | rs(s1)); }
+inline void Assembler::mfxer(Register d )         { emit_int32(MFXER_OPCODE | rt(d)); }
+// Vector Register Save Register
+inline void Assembler::mtvrsave(Register s1)      { emit_int32(MTVRSAVE_OPCODE | rs(s1)); }
+inline void Assembler::mfvrsave(Register d )      { emit_int32(MFVRSAVE_OPCODE | rt(d)); }
+// Timebase
+inline void Assembler::mftb(Register d )          { emit_int32(MFTB_OPCODE  | rt(d)); }
+// Introduced with Power 8:
+// Data Stream Control Register
+inline void Assembler::mtdscr(Register s1)        { emit_int32(MTDSCR_OPCODE | rs(s1)); }
+inline void Assembler::mfdscr(Register d )        { emit_int32(MFDSCR_OPCODE | rt(d)); }
+// Transactional Memory Registers
+inline void Assembler::mftfhar(Register d )       { emit_int32(MFTFHAR_OPCODE   | rt(d)); }
+inline void Assembler::mftfiar(Register d )       { emit_int32(MFTFIAR_OPCODE   | rt(d)); }
+inline void Assembler::mftexasr(Register d )      { emit_int32(MFTEXASR_OPCODE  | rt(d)); }
+inline void Assembler::mftexasru(Register d )     { emit_int32(MFTEXASRU_OPCODE | rt(d)); }
+
 // SAP JVM 2006-02-13 PPC branch instruction.
 // PPC 1, section 2.4.1 Branch Instructions
 inline void Assembler::b( address a, relocInfo::relocType rt) { emit_data(BXX_OPCODE| li(disp( intptr_t(a), intptr_t(pc()))) |aa(0)|lk(0), rt); }
@@ -731,15 +754,50 @@
 inline void Assembler::mtvscr(  VectorRegister b)                                     { emit_int32( MTVSCR_OPCODE   | vrb(b)); }
 inline void Assembler::mfvscr(  VectorRegister d)                                     { emit_int32( MFVSCR_OPCODE   | vrt(d)); }
 
+// AES (introduced with Power 8)
+inline void Assembler::vcipher(     VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCIPHER_OPCODE      | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vcipherlast( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCIPHERLAST_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vncipher(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNCIPHER_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vncipherlast(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNCIPHERLAST_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsbox(       VectorRegister d, VectorRegister a)                   { emit_int32( VSBOX_OPCODE        | vrt(d) | vra(a)         ); }
+
+// SHA (introduced with Power 8)
+// Not yet implemented.
+
+// Vector Binary Polynomial Multiplication (introduced with Power 8)
+inline void Assembler::vpmsumb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpmsumd(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMD_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpmsumh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpmsumw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+
+// Vector Permute and Xor (introduced with Power 8)
+inline void Assembler::vpermxor( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VPMSUMW_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
+
+// Transactional Memory instructions (introduced with Power 8)
+inline void Assembler::tbegin_()                                { emit_int32( TBEGIN_OPCODE | rc(1)); }
+inline void Assembler::tbeginrot_()                             { emit_int32( TBEGIN_OPCODE | /*R=1*/ 1u << (31-10) | rc(1)); }
+inline void Assembler::tend_()                                  { emit_int32( TEND_OPCODE | rc(1)); }
+inline void Assembler::tendall_()                               { emit_int32( TEND_OPCODE | /*A=1*/ 1u << (31-6) | rc(1)); }
+inline void Assembler::tabort_(Register a)                      { emit_int32( TABORT_OPCODE | ra(a) | rc(1)); }
+inline void Assembler::tabortwc_(int t, Register a, Register b) { emit_int32( TABORTWC_OPCODE | to(t) | ra(a) | rb(b) | rc(1)); }
+inline void Assembler::tabortwci_(int t, Register a, int si)    { emit_int32( TABORTWCI_OPCODE | to(t) | ra(a) | sh1620(si) | rc(1)); }
+inline void Assembler::tabortdc_(int t, Register a, Register b) { emit_int32( TABORTDC_OPCODE | to(t) | ra(a) | rb(b) | rc(1)); }
+inline void Assembler::tabortdci_(int t, Register a, int si)    { emit_int32( TABORTDCI_OPCODE | to(t) | ra(a) | sh1620(si) | rc(1)); }
+inline void Assembler::tsuspend_()                              { emit_int32( TSR_OPCODE | rc(1)); }
+inline void Assembler::tresume_()                               { emit_int32( TSR_OPCODE | /*L=1*/ 1u << (31-10) | rc(1)); }
+inline void Assembler::tcheck(int f)                            { emit_int32( TCHECK_OPCODE | bf(f)); }
+
 // ra0 version
 inline void Assembler::lwzx( Register d, Register s2) { emit_int32( LWZX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lwz(  Register d, int si16   ) { emit_int32( LWZ_OPCODE  | rt(d) | d1(si16));}
 inline void Assembler::lwax( Register d, Register s2) { emit_int32( LWAX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lwa(  Register d, int si16   ) { emit_int32( LWA_OPCODE  | rt(d) | ds(si16));}
+inline void Assembler::lwbrx(Register d, Register s2) { emit_int32( LWBRX_OPCODE| rt(d) | rb(s2));}
 inline void Assembler::lhzx( Register d, Register s2) { emit_int32( LHZX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lhz(  Register d, int si16   ) { emit_int32( LHZ_OPCODE  | rt(d) | d1(si16));}
 inline void Assembler::lhax( Register d, Register s2) { emit_int32( LHAX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lha(  Register d, int si16   ) { emit_int32( LHA_OPCODE  | rt(d) | d1(si16));}
+inline void Assembler::lhbrx(Register d, Register s2) { emit_int32( LHBRX_OPCODE| rt(d) | rb(s2));}
 inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE | rt(d) | rb(s2));}
 inline void Assembler::lbz(  Register d, int si16   ) { emit_int32( LBZ_OPCODE  | rt(d) | d1(si16));}
 inline void Assembler::ld(   Register d, int si16   ) { emit_int32( LD_OPCODE   | rt(d) | ds(si16));}
--- a/src/cpu/ppc/vm/compiledIC_ppc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/compiledIC_ppc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,34 +50,6 @@
   return is_icholder_entry(call->destination());
 }
 
-//-----------------------------------------------------------------------------
-// High-level access to an inline cache. Guaranteed to be MT-safe.
-
-CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
-  : _ic_call(call)
-{
-  address ic_call = call->instruction_address();
-
-  assert(ic_call != NULL, "ic_call address must be set");
-  assert(nm != NULL, "must pass nmethod");
-  assert(nm->contains(ic_call), "must be in nmethod");
-
-  // Search for the ic_call at the given address.
-  RelocIterator iter(nm, ic_call, ic_call+1);
-  bool ret = iter.next();
-  assert(ret == true, "relocInfo must exist at this address");
-  assert(iter.addr() == ic_call, "must find ic_call");
-  if (iter.type() == relocInfo::virtual_call_type) {
-    virtual_call_Relocation* r = iter.virtual_call_reloc();
-    _is_optimized = false;
-    _value = nativeMovConstReg_at(r->cached_value());
-  } else {
-    assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
-    _is_optimized = true;
-    _value = NULL;
-  }
-}
-
 // ----------------------------------------------------------------------------
 
 // A PPC CompiledStaticCall looks like this:
@@ -203,7 +175,7 @@
   if (TraceICs) {
     ResourceMark rm;
     tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
-                  instruction_address(),
+                  p2i(instruction_address()),
                   callee->name_and_sig_as_C_string());
   }
 
--- a/src/cpu/ppc/vm/frame_ppc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/frame_ppc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -140,7 +140,7 @@
 void frame::patch_pc(Thread* thread, address pc) {
   if (TracePcPatching) {
     tty->print_cr("patch_pc at address " PTR_FORMAT " [" PTR_FORMAT " -> " PTR_FORMAT "]",
-                  &((address*) _sp)[-1], ((address*) _sp)[-1], pc);
+                  p2i(&((address*) _sp)[-1]), p2i(((address*) _sp)[-1]), p2i(pc));
   }
   own_abi()->lr = (uint64_t)pc;
   _cb = CodeCache::find_blob(pc);
--- a/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -37,6 +37,8 @@
 // signatures accordingly.
 const bool CCallingConventionRequiresIntsAsLongs = true;
 
+#define SUPPORTS_NATIVE_CX8
+
 // The PPC CPUs are NOT multiple-copy-atomic.
 #define CPU_NOT_MULTIPLE_COPY_ATOMIC
 
--- a/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,7 +25,6 @@
 
 
 #include "precompiled.hpp"
-#include "asm/assembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "interp_masm_ppc_64.hpp"
 #include "interpreter/interpreterRuntime.hpp"
@@ -119,9 +118,15 @@
     // Call the Interpreter::remove_activation_preserving_args_entry()
     // func to get the address of the same-named entrypoint in the
     // generated interpreter code.
+#if defined(ABI_ELFv2)
+    call_c(CAST_FROM_FN_PTR(address,
+                            Interpreter::remove_activation_preserving_args_entry),
+           relocInfo::none);
+#else
     call_c(CAST_FROM_FN_PTR(FunctionDescriptor*,
                             Interpreter::remove_activation_preserving_args_entry),
            relocInfo::none);
+#endif
 
     // Jump to Interpreter::_remove_activation_preserving_args_entry.
     mtctr(R3_RET);
@@ -331,29 +336,40 @@
 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(int         bcp_offset,
                                                           Register    Rdst,
                                                           signedOrNot is_signed) {
+#if defined(VM_LITTLE_ENDIAN)
+  if (bcp_offset) {
+    load_const_optimized(Rdst, bcp_offset);
+    lhbrx(Rdst, R14_bcp, Rdst);
+  } else {
+    lhbrx(Rdst, R14_bcp);
+  }
+  if (is_signed == Signed) {
+    extsh(Rdst, Rdst);
+  }
+#else
   // Read Java big endian format.
   if (is_signed == Signed) {
     lha(Rdst, bcp_offset, R14_bcp);
   } else {
     lhz(Rdst, bcp_offset, R14_bcp);
   }
-#if 0
-  assert(Rtmp != Rdst, "need separate temp register");
-  Register Rfirst = Rtmp;
-  lbz(Rfirst, bcp_offset, R14_bcp); // first byte
-  lbz(Rdst, bcp_offset+1, R14_bcp); // second byte
-
-  // Rdst = ((Rfirst<<8) & 0xFF00) | (Rdst &~ 0xFF00)
-  rldimi(/*RA=*/Rdst, /*RS=*/Rfirst, /*sh=*/8, /*mb=*/48);
-  if (is_signed == Signed) {
-    extsh(Rdst, Rdst);
-  }
 #endif
 }
 
 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(int         bcp_offset,
                                                           Register    Rdst,
                                                           signedOrNot is_signed) {
+#if defined(VM_LITTLE_ENDIAN)
+  if (bcp_offset) {
+    load_const_optimized(Rdst, bcp_offset);
+    lwbrx(Rdst, R14_bcp, Rdst);
+  } else {
+    lwbrx(Rdst, R14_bcp);
+  }
+  if (is_signed == Signed) {
+    extsw(Rdst, Rdst);
+  }
+#else
   // Read Java big endian format.
   if (bcp_offset & 3) { // Offset unaligned?
     load_const_optimized(Rdst, bcp_offset);
@@ -369,19 +385,27 @@
       lwz(Rdst, bcp_offset, R14_bcp);
     }
   }
+#endif
 }
 
+
 // Load the constant pool cache index from the bytecode stream.
 //
 // Kills / writes:
 //   - Rdst, Rscratch
 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size) {
   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+  // Cache index is always in the native format, courtesy of Rewriter.
   if (index_size == sizeof(u2)) {
-    get_2_byte_integer_at_bcp(bcp_offset, Rdst, Unsigned);
+    lhz(Rdst, bcp_offset, R14_bcp);
   } else if (index_size == sizeof(u4)) {
     assert(EnableInvokeDynamic, "giant index used only for JSR 292");
-    get_4_byte_integer_at_bcp(bcp_offset, Rdst, Signed);
+    if (bcp_offset & 3) {
+      load_const_optimized(Rdst, bcp_offset);
+      lwax(Rdst, R14_bcp, Rdst);
+    } else {
+      lwa(Rdst, bcp_offset, R14_bcp);
+    }
     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
     nand(Rdst, Rdst, Rdst); // convert to plain index
   } else if (index_size == sizeof(u1)) {
@@ -398,6 +422,29 @@
   add(cache, R27_constPoolCache, cache);
 }
 
+// Load 4-byte signed or unsigned integer in Java format (that is, big-endian format)
+// from (Rsrc)+offset.
+void InterpreterMacroAssembler::get_u4(Register Rdst, Register Rsrc, int offset,
+                                       signedOrNot is_signed) {
+#if defined(VM_LITTLE_ENDIAN)
+  if (offset) {
+    load_const_optimized(Rdst, offset);
+    lwbrx(Rdst, Rdst, Rsrc);
+  } else {
+    lwbrx(Rdst, Rsrc);
+  }
+  if (is_signed == Signed) {
+    extsw(Rdst, Rdst);
+  }
+#else
+  if (is_signed == Signed) {
+    lwa(Rdst, offset, Rsrc);
+  } else {
+    lwz(Rdst, offset, Rsrc);
+  }
+#endif
+}
+
 // Load object from cpool->resolved_references(index).
 void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index) {
   assert_different_registers(result, index);
@@ -498,6 +545,9 @@
   cmplw(CCR0, Rindex, Rlength);
   sldi(RsxtIndex, RsxtIndex, index_shift);
   blt(CCR0, LnotOOR);
+  // Index should be in R17_tos, array should be in R4_ARG2.
+  mr(R17_tos, Rindex);
+  mr(R4_ARG2, Rarray);
   load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
   mtctr(Rtmp);
   bctr();
@@ -1632,6 +1682,228 @@
   }
 }
 
+// Argument and return type profilig.
+// kills: tmp, tmp2, R0, CR0, CR1
+void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr_base,
+                                                 RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2) {
+  Label do_nothing, do_update;
+
+  // tmp2 = obj is allowed
+  assert_different_registers(obj, mdo_addr_base, tmp, R0);
+  assert_different_registers(tmp2, mdo_addr_base, tmp, R0);
+  const Register klass = tmp2;
+
+  verify_oop(obj);
+
+  ld(tmp, mdo_addr_offs, mdo_addr_base);
+
+  // Set null_seen if obj is 0.
+  cmpdi(CCR0, obj, 0);
+  ori(R0, tmp, TypeEntries::null_seen);
+  beq(CCR0, do_update);
+
+  load_klass(klass, obj);
+
+  clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
+  // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
+  cmpd(CCR1, R0, klass);
+  // Klass seen before, nothing to do (regardless of unknown bit).
+  //beq(CCR1, do_nothing);
+
+  andi_(R0, klass, TypeEntries::type_unknown);
+  // Already unknown. Nothing to do anymore.
+  //bne(CCR0, do_nothing);
+  crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2); // cr0 eq = cr1 eq or cr0 ne
+  beq(CCR0, do_nothing);
+
+  clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
+  orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
+  beq(CCR0, do_update); // First time here. Set profile type.
+
+  // Different than before. Cannot keep accurate profile.
+  ori(R0, tmp, TypeEntries::type_unknown);
+
+  bind(do_update);
+  // update profile
+  std(R0, mdo_addr_offs, mdo_addr_base);
+
+  align(32, 12);
+  bind(do_nothing);
+}
+
+void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) {
+  if (!ProfileInterpreter) {
+    return;
+  }
+
+  assert_different_registers(callee, tmp1, tmp2, R28_mdx);
+
+  if (MethodData::profile_arguments() || MethodData::profile_return()) {
+    Label profile_continue;
+
+    test_method_data_pointer(profile_continue);
+
+    int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
+
+    lbz(tmp1, in_bytes(DataLayout::tag_offset()) - off_to_start, R28_mdx);
+    cmpwi(CCR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
+    bne(CCR0, profile_continue);
+
+    if (MethodData::profile_arguments()) {
+      Label done;
+      int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
+      add(R28_mdx, off_to_args, R28_mdx);
+
+      for (int i = 0; i < TypeProfileArgsLimit; i++) {
+        if (i > 0 || MethodData::profile_return()) {
+          // If return value type is profiled we may have no argument to profile.
+          ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
+          cmpdi(CCR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count());
+          addi(tmp1, tmp1, -i*TypeStackSlotEntries::per_arg_count());
+          blt(CCR0, done);
+        }
+        ld(tmp1, in_bytes(Method::const_offset()), callee);
+        lhz(tmp1, in_bytes(ConstMethod::size_of_parameters_offset()), tmp1);
+        // Stack offset o (zero based) from the start of the argument
+        // list, for n arguments translates into offset n - o - 1 from
+        // the end of the argument list. But there's an extra slot at
+        // the top of the stack. So the offset is n - o from Lesp.
+        ld(tmp2, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, R28_mdx);
+        subf(tmp1, tmp2, tmp1);
+
+        sldi(tmp1, tmp1, Interpreter::logStackElementSize);
+        ldx(tmp1, tmp1, R15_esp);
+
+        profile_obj_type(tmp1, R28_mdx, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args, tmp2, tmp1);
+
+        int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
+        addi(R28_mdx, R28_mdx, to_add);
+        off_to_args += to_add;
+      }
+
+      if (MethodData::profile_return()) {
+        ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
+        addi(tmp1, tmp1, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
+      }
+
+      bind(done);
+
+      if (MethodData::profile_return()) {
+        // We're right after the type profile for the last
+        // argument. tmp1 is the number of cells left in the
+        // CallTypeData/VirtualCallTypeData to reach its end. Non null
+        // if there's a return to profile.
+        assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
+        sldi(tmp1, tmp1, exact_log2(DataLayout::cell_size));
+        add(R28_mdx, tmp1, R28_mdx);
+      }
+    } else {
+      assert(MethodData::profile_return(), "either profile call args or call ret");
+      update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size()));
+    }
+
+    // Mdp points right after the end of the
+    // CallTypeData/VirtualCallTypeData, right after the cells for the
+    // return value type if there's one.
+    align(32, 12);
+    bind(profile_continue);
+  }
+}
+
+void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) {
+  assert_different_registers(ret, tmp1, tmp2);
+  if (ProfileInterpreter && MethodData::profile_return()) {
+    Label profile_continue;
+
+    test_method_data_pointer(profile_continue);
+
+    if (MethodData::profile_return_jsr292_only()) {
+      // If we don't profile all invoke bytecodes we must make sure
+      // it's a bytecode we indeed profile. We can't go back to the
+      // begining of the ProfileData we intend to update to check its
+      // type because we're right after it and we don't known its
+      // length.
+      lbz(tmp1, 0, R14_bcp);
+      lbz(tmp2, Method::intrinsic_id_offset_in_bytes(), R19_method);
+      cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic);
+      cmpwi(CCR1, tmp1, Bytecodes::_invokehandle);
+      cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
+      cmpwi(CCR1, tmp2, vmIntrinsics::_compiledLambdaForm);
+      cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
+      bne(CCR0, profile_continue);
+    }
+
+    profile_obj_type(ret, R28_mdx, -in_bytes(ReturnTypeEntry::size()), tmp1, tmp2);
+
+    align(32, 12);
+    bind(profile_continue);
+  }
+}
+
+void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
+  if (ProfileInterpreter && MethodData::profile_parameters()) {
+    Label profile_continue, done;
+
+    test_method_data_pointer(profile_continue);
+
+    // Load the offset of the area within the MDO used for
+    // parameters. If it's negative we're not profiling any parameters.
+    lwz(tmp1, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), R28_mdx);
+    cmpwi(CCR0, tmp1, 0);
+    blt(CCR0, profile_continue);
+
+    // Compute a pointer to the area for parameters from the offset
+    // and move the pointer to the slot for the last
+    // parameters. Collect profiling from last parameter down.
+    // mdo start + parameters offset + array length - 1
+
+    // Pointer to the parameter area in the MDO.
+    const Register mdp = tmp1;
+    add(mdp, tmp1, R28_mdx);
+
+    // Pffset of the current profile entry to update.
+    const Register entry_offset = tmp2;
+    // entry_offset = array len in number of cells
+    ld(entry_offset, in_bytes(ArrayData::array_len_offset()), mdp);
+
+    int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
+    assert(off_base % DataLayout::cell_size == 0, "should be a number of cells");
+
+    // entry_offset (number of cells)  = array len - size of 1 entry + offset of the stack slot field
+    addi(entry_offset, entry_offset, -TypeStackSlotEntries::per_arg_count() + (off_base / DataLayout::cell_size));
+    // entry_offset in bytes
+    sldi(entry_offset, entry_offset, exact_log2(DataLayout::cell_size));
+
+    Label loop;
+    align(32, 12);
+    bind(loop);
+
+    // Load offset on the stack from the slot for this parameter.
+    ld(tmp3, entry_offset, mdp);
+    sldi(tmp3, tmp3, Interpreter::logStackElementSize);
+    neg(tmp3, tmp3);
+    // Read the parameter from the local area.
+    ldx(tmp3, tmp3, R18_locals);
+
+    // Make entry_offset now point to the type field for this parameter.
+    int type_base = in_bytes(ParametersTypeData::type_offset(0));
+    assert(type_base > off_base, "unexpected");
+    addi(entry_offset, entry_offset, type_base - off_base);
+
+    // Profile the parameter.
+    profile_obj_type(tmp3, mdp, entry_offset, tmp4, tmp3);
+
+    // Go to next parameter.
+    int delta = TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base);
+    cmpdi(CCR0, entry_offset, off_base + delta);
+    addi(entry_offset, entry_offset, -delta);
+    bge(CCR0, loop);
+
+    align(32, 12);
+    bind(profile_continue);
+  }
+}
+
 // Add a InterpMonitorElem to stack (see frame_sparc.hpp).
 void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) {
 
@@ -1993,20 +2265,19 @@
   bne(CCR0, test);
 
   address fd = CAST_FROM_FN_PTR(address, verify_return_address);
-  unsigned int nbytes_save = 10*8; // 10 volatile gprs
-
-  save_LR_CR(Rtmp);
+  const int nbytes_save = 11*8; // volatile gprs except R0
+  save_volatile_gprs(R1_SP, -nbytes_save); // except R0
+  save_LR_CR(Rtmp); // Save in old frame.
   push_frame_reg_args(nbytes_save, Rtmp);
-  save_volatile_gprs(R1_SP, 112); // except R0
 
   load_const_optimized(Rtmp, fd, R0);
   mr_if_needed(R4_ARG2, reg);
   mr(R3_ARG1, R19_method);
   call_c(Rtmp); // call C
 
-  restore_volatile_gprs(R1_SP, 112); // except R0
   pop_frame();
   restore_LR_CR(Rtmp);
+  restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
   b(skip);
 
   // Perform a more elaborate out-of-line call.
--- a/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -130,6 +130,7 @@
 
   void get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size = sizeof(u2));
 
+  void get_u4(Register Rdst, Register Rsrc, int offset, signedOrNot is_signed);
 
   // common code
 
@@ -254,6 +255,12 @@
   void record_klass_in_profile(Register receiver, Register scratch1, Register scratch2, bool is_virtual_call);
   void record_klass_in_profile_helper(Register receiver, Register scratch1, Register scratch2, int start_row, Label& done, bool is_virtual_call);
 
+  // Argument and return type profiling.
+  void profile_obj_type(Register obj, Register mdo_addr_base, RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2);
+  void profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual);
+  void profile_return_type(Register ret, Register tmp1, Register tmp2);
+  void profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4);
+
 #endif // !CC_INTERP
 
   // Debugging
--- a/src/cpu/ppc/vm/interpreter_ppc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/interpreter_ppc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -24,7 +24,6 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/assembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
--- a/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -806,6 +806,7 @@
 
 // For verify_oops.
 void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
+  std(R2,  offset, dst);   offset += 8;
   std(R3,  offset, dst);   offset += 8;
   std(R4,  offset, dst);   offset += 8;
   std(R5,  offset, dst);   offset += 8;
@@ -820,6 +821,7 @@
 
 // For verify_oops.
 void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
+  ld(R2,  offset, src);   offset += 8;
   ld(R3,  offset, src);   offset += 8;
   ld(R4,  offset, src);   offset += 8;
   ld(R5,  offset, src);   offset += 8;
@@ -1186,6 +1188,16 @@
   call_VM(oop_result, entry_point, check_exceptions);
 }
 
+void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3,
+                             bool check_exceptions) {
+  // R3_ARG1 is reserved for the thread
+  mr_if_needed(R4_ARG2, arg_1);
+  assert(arg_2 != R4_ARG2, "smashed argument");
+  mr_if_needed(R5_ARG3, arg_2);
+  mr_if_needed(R6_ARG4, arg_3);
+  call_VM(oop_result, entry_point, check_exceptions);
+}
+
 void MacroAssembler::call_VM_leaf(address entry_point) {
   call_VM_leaf_base(entry_point);
 }
@@ -2365,7 +2377,7 @@
 #endif // INCLUDE_ALL_GCS
 
 // Values for last_Java_pc, and last_Java_sp must comply to the rules
-// in frame_ppc64.hpp.
+// in frame_ppc.hpp.
 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) {
   // Always set last_Java_pc and flags first because once last_Java_sp
   // is visible has_last_Java_frame is true and users will look at the
@@ -2492,6 +2504,7 @@
 }
 
 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
+  assert(dst != R0, "Dst reg may not be R0, as R0 is used here.");
   if (src == noreg) src = dst;
   Register shifted_src = src;
   if (Universe::narrow_klass_shift() != 0 ||
@@ -2526,14 +2539,11 @@
 
 void MacroAssembler::reinit_heapbase(Register d, Register tmp) {
   if (Universe::heap() != NULL) {
-    if (Universe::narrow_oop_base() == NULL) {
-      Assembler::xorr(R30, R30, R30);
-    } else {
-      load_const(R30, Universe::narrow_ptrs_base(), tmp);
-    }
+    load_const_optimized(R30, Universe::narrow_ptrs_base(), tmp);
   } else {
-    load_const(R30, Universe::narrow_ptrs_base_addr(), tmp);
-    ld(R30, 0, R30);
+    // Heap not yet allocated. Load indirectly.
+    int simm16_offset = load_const_optimized(R30, Universe::narrow_ptrs_base_addr(), tmp, true);
+    ld(R30, simm16_offset, R30);
   }
 }
 
@@ -3060,35 +3070,27 @@
   if (!VerifyOops) {
     return;
   }
-  // Will be preserved.
-  Register tmp = R11;
-  assert(oop != tmp, "precondition");
-  unsigned int nbytes_save = 10*8; // 10 volatile gprs
+
   address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
-  // save tmp
-  mr(R0, tmp);
-  // kill tmp
-  save_LR_CR(tmp);
+  const Register tmp = R11; // Will be preserved.
+  const int nbytes_save = 11*8; // Volatile gprs except R0.
+  save_volatile_gprs(R1_SP, -nbytes_save); // except R0
+
+  if (oop == tmp) mr(R4_ARG2, oop);
+  save_LR_CR(tmp); // save in old frame
   push_frame_reg_args(nbytes_save, tmp);
-  // restore tmp
-  mr(tmp, R0);
-  save_volatile_gprs(R1_SP, 112); // except R0
   // load FunctionDescriptor** / entry_address *
-  load_const(tmp, fd);
+  load_const_optimized(tmp, fd, R0);
   // load FunctionDescriptor* / entry_address
   ld(tmp, 0, tmp);
-  mr(R4_ARG2, oop);
-  load_const(R3_ARG1, (address)msg);
-  // call destination for its side effect
+  if (oop != tmp) mr_if_needed(R4_ARG2, oop);
+  load_const_optimized(R3_ARG1, (address)msg, R0);
+  // Call destination for its side effect.
   call_c(tmp);
-  restore_volatile_gprs(R1_SP, 112); // except R0
+
   pop_frame();
-  // save tmp
-  mr(R0, tmp);
-  // kill tmp
   restore_LR_CR(tmp);
-  // restore tmp
-  mr(tmp, R0);
+  restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
 }
 
 const char* stop_types[] = {
@@ -3099,7 +3101,7 @@
 };
 
 static void stop_on_request(int tp, const char* msg) {
-  tty->print("PPC assembly code requires stop: (%s) %s\n", (void *)stop_types[tp%/*stop_end*/4], msg);
+  tty->print("PPC assembly code requires stop: (%s) %s\n", stop_types[tp%/*stop_end*/4], msg);
   guarantee(false, err_msg("PPC assembly code requires stop: %s", msg));
 }
 
--- a/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -368,6 +368,7 @@
   void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
   void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
+  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true);
   void call_VM_leaf(address entry_point);
   void call_VM_leaf(address entry_point, Register arg_1);
   void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
--- a/src/cpu/ppc/vm/methodHandles_ppc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/methodHandles_ppc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -464,7 +464,7 @@
                  strstr(adaptername, "linkTo") == NULL);    // static linkers don't have MH
   const char* mh_reg_name = has_mh ? "R23_method_handle" : "G23";
   tty->print_cr("MH %s %s="INTPTR_FORMAT " sp=" INTPTR_FORMAT,
-                adaptername, mh_reg_name, (intptr_t) mh, entry_sp);
+                adaptername, mh_reg_name, (intptr_t) mh, (intptr_t) entry_sp);
 
   if (Verbose) {
     tty->print_cr("Registers:");
--- a/src/cpu/ppc/vm/nativeInst_ppc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/nativeInst_ppc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -100,10 +100,7 @@
   MacroAssembler* a = new MacroAssembler(&cb);
 
   // Patch the call.
-  if (ReoptimizeCallSequences &&
-      a->is_within_range_of_b(dest, addr_call)) {
-    a->bl(dest);
-  } else {
+  if (!ReoptimizeCallSequences || !a->is_within_range_of_b(dest, addr_call)) {
     address trampoline_stub_addr = get_trampoline();
 
     // We did not find a trampoline stub because the current codeblob
@@ -115,9 +112,12 @@
 
     // Patch the constant in the call's trampoline stub.
     NativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
+    dest = trampoline_stub_addr;
+  }
 
-    a->bl(trampoline_stub_addr);
-  }
+  OrderAccess::release();
+  a->bl(dest);
+
   ICache::ppc64_flush_icache_bytes(addr_call, code_size);
 }
 
@@ -147,9 +147,9 @@
   address addr = addr_at(0);
 
   if (!NativeCall::is_call_at(addr)) {
-    tty->print_cr("not a NativeCall at " PTR_FORMAT, addr);
+    tty->print_cr("not a NativeCall at " PTR_FORMAT, p2i(addr));
     // TODO: PPC port: Disassembler::decode(addr - 20, addr + 20, tty);
-    fatal(err_msg("not a NativeCall at " PTR_FORMAT, addr));
+    fatal(err_msg("not a NativeCall at " PTR_FORMAT, p2i(addr)));
   }
 }
 #endif // ASSERT
@@ -160,9 +160,9 @@
 
   NativeInstruction::verify();
   if (!NativeFarCall::is_far_call_at(addr)) {
-    tty->print_cr("not a NativeFarCall at " PTR_FORMAT, addr);
+    tty->print_cr("not a NativeFarCall at " PTR_FORMAT, p2i(addr));
     // TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
-    fatal(err_msg("not a NativeFarCall at " PTR_FORMAT, addr));
+    fatal(err_msg("not a NativeFarCall at " PTR_FORMAT, p2i(addr)));
   }
 }
 #endif // ASSERT
@@ -306,9 +306,9 @@
     if (! (cb != NULL && MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) &&
         ! (cb != NULL && MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) &&
         ! MacroAssembler::is_bl(*((int*) addr))) {
-      tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, addr);
+      tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr));
       // TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
-      fatal(err_msg("not a NativeMovConstReg at " PTR_FORMAT, addr));
+      fatal(err_msg("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr)));
     }
   }
 }
@@ -344,9 +344,9 @@
 
   NativeInstruction::verify();
   if (!NativeJump::is_jump_at(addr)) {
-    tty->print_cr("not a NativeJump at " PTR_FORMAT, addr);
+    tty->print_cr("not a NativeJump at " PTR_FORMAT, p2i(addr));
     // TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
-    fatal(err_msg("not a NativeJump at " PTR_FORMAT, addr));
+    fatal(err_msg("not a NativeJump at " PTR_FORMAT, p2i(addr)));
   }
 }
 #endif // ASSERT
--- a/src/cpu/ppc/vm/ppc.ad	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/ppc.ad	Fri Apr 10 16:58:26 2015 -0700
@@ -1249,6 +1249,7 @@
 
     // Emit the trampoline stub which will be related to the branch-and-link below.
     CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
+    if (Compile::current()->env()->failing()) { return offsets; } // Code cache may be full.
     __ relocate(rtype);
   }
 
@@ -1329,7 +1330,7 @@
 
   if (!false /* TODO: PPC port C->is_frameless_method()*/) {
     st->print("save return pc\n\t");
-    st->print("push frame %d\n\t", -framesize);
+    st->print("push frame %ld\n\t", -framesize);
   }
 }
 #endif
@@ -1412,7 +1413,7 @@
     while (bang_offset <= bang_end) {
       // Need at least one stack bang at end of shadow zone.
 
-      // Again I had to copy code, this time from assembler_ppc64.cpp,
+      // Again I had to copy code, this time from assembler_ppc.cpp,
       // bang_stack_with_offset - see there for comments.
 
       // Stack grows down, caller passes positive offset.
@@ -1937,8 +1938,9 @@
   // --------------------------------------------------------------------
   // Check for hi bits still needing moving. Only happens for misaligned
   // arguments to native calls.
-  if (src_hi == dst_hi)
+  if (src_hi == dst_hi) {
     return ppc64Opcode_none;               // Self copy; no move.
+  }
 
   ShouldNotReachHere();
   return ppc64Opcode_undefined;
@@ -1960,14 +1962,15 @@
 }
 
 uint MachNopNode::size(PhaseRegAlloc *ra_) const {
-   return _count * 4;
+  return _count * 4;
 }
 
 #ifndef PRODUCT
 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
-  int reg = ra_->get_reg_first(this);
-  st->print("ADDI %s, SP, %d \t// box node", Matcher::regName[reg], offset);
+  char reg_str[128];
+  ra_->dump_register(this, reg_str);
+  st->print("ADDI    %s, SP, %d \t// box node", reg_str, offset);
 }
 #endif
 
@@ -2002,7 +2005,7 @@
 
   // Inline_cache contains a klass.
   Register ic_klass       = as_Register(Matcher::inline_cache_reg_encode());
-  Register receiver_klass = R0;  // tmp
+  Register receiver_klass = R12_scratch2;  // tmp
 
   assert_different_registers(ic_klass, receiver_klass, R11_scratch1, R3_ARG1);
   assert(R11_scratch1 == R11, "need prologue scratch register");
@@ -3486,6 +3489,7 @@
 
         // Emit the trampoline stub which will be related to the branch-and-link below.
         CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
+        if (Compile::current()->env()->failing()) { return; } // Code cache may be full.
         __ relocate(_optimized_virtual ?
                     relocInfo::opt_virtual_call_type : relocInfo::static_call_type);
       }
@@ -3529,6 +3533,7 @@
 
       // Emit the trampoline stub which will be related to the branch-and-link below.
       CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
+      if (ra_->C->env()->failing()) { return; } // Code cache may be full.
       assert(_optimized_virtual, "methodHandle call should be a virtual call");
       __ relocate(relocInfo::opt_virtual_call_type);
     }
@@ -3579,9 +3584,7 @@
       const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
       const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
       CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
-
-      if (ra_->C->env()->failing())
-        return;
+      if (ra_->C->env()->failing()) { return; } // Code cache may be full.
 
       // Build relocation at call site with ic position as data.
       assert((_load_ic_hi_node != NULL && _load_ic_node == NULL) ||
@@ -5640,19 +5643,6 @@
   ins_pipe(pipe_class_memory);
 %}
 
-//// Load compressed klass and decode it if narrow_klass_shift == 0.
-//// TODO: will narrow_klass_shift ever be 0?
-//instruct decodeNKlass2Klass(iRegPdst dst, memory mem) %{
-//  match(Set dst (DecodeNKlass (LoadNKlass mem)));
-//  predicate(false /* TODO: PPC port Universe::narrow_klass_shift() == 0*);
-//  ins_cost(MEMORY_REF_COST);
-//
-//  format %{ "LWZ     $dst, $mem \t// DecodeNKlass (unscaled)" %}
-//  size(4);
-//  ins_encode( enc_lwz(dst, mem) );
-//  ins_pipe(pipe_class_memory);
-//%}
-
 // Load Klass Pointer
 instruct loadKlass(iRegPdst dst, memoryAlg4 mem) %{
   match(Set dst (LoadKlass mem));
@@ -6072,11 +6062,15 @@
   %}
 %}
 
-instruct loadConNKlass_hi(iRegNdst dst, immNKlass src) %{
+// We have seen a safepoint between the hi and lo parts, and this node was handled
+// as an oop. Therefore this needs a match rule so that build_oop_map knows this is
+// not a narrow oop.
+instruct loadConNKlass_hi(iRegNdst dst, immNKlass_NM src) %{
+  match(Set dst src);
   effect(DEF dst, USE src);
   ins_cost(DEFAULT_COST);
 
-  format %{ "LIS     $dst, $src \t// narrow oop hi" %}
+  format %{ "LIS     $dst, $src \t// narrow klass hi" %}
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_addis);
@@ -6086,6 +6080,21 @@
   ins_pipe(pipe_class_default);
 %}
 
+// As loadConNKlass_hi this must be recognized as narrow klass, not oop!
+instruct loadConNKlass_mask(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
+  match(Set dst src1);
+  effect(TEMP src2);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "MASK    $dst, $src2, 0xFFFFFFFF" %} // mask
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ clrldi($dst$$Register, $src2$$Register, 0x20);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
 // This needs a match rule so that build_oop_map knows this is
 // not a narrow oop.
 instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
@@ -6093,10 +6102,10 @@
   effect(TEMP src2);
   ins_cost(DEFAULT_COST);
 
-  format %{ "ADDI    $dst, $src1, $src2 \t// narrow oop lo" %}
-  size(4);
-  ins_encode %{
-    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+  format %{ "ORI    $dst, $src1, $src2 \t// narrow klass lo" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ori);
     intptr_t Csrc = Klass::encode_klass((Klass *)$src1$$constant);
     assert(__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
     int klass_index = __ oop_recorder()->find_index((Klass *)$src1$$constant);
@@ -6127,10 +6136,11 @@
     MachNode *m2 = m1;
     if (!Assembler::is_uimm((jlong)Klass::encode_klass((Klass *)op_src->constant()), 31)) {
       // Value might be 1-extended. Mask out these bits.
-      m2 = new (C) clearMs32bNode();
+      m2 = new (C) loadConNKlass_maskNode();
       m2->add_req(NULL, m1);
       m2->_opnds[0] = op_dst;
-      m2->_opnds[1] = op_dst;
+      m2->_opnds[1] = op_src;
+      m2->_opnds[2] = op_dst;
       ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
       nodes->push(m2);
     }
@@ -6975,7 +6985,7 @@
   size(4);
   ins_encode %{
     // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
-    __ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_oop_shift(), 32);
+    __ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_klass_shift(), 32);
   %}
   ins_pipe(pipe_class_default);
 %}
--- a/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -24,7 +24,6 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/assembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_ppc.hpp"
@@ -39,9 +38,6 @@
 #include "runtime/stubCodeGenerator.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/top.hpp"
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
 #include "runtime/thread.inline.hpp"
 
 #define __ _masm->
@@ -216,7 +212,7 @@
     {
       BLOCK_COMMENT("Call frame manager or native entry.");
       // Call frame manager or native entry.
-      Register r_new_arg_entry = R14; // PPC_state;
+      Register r_new_arg_entry = R14;
       assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
                                  r_arg_method, r_arg_thread);
 
--- a/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -90,7 +90,7 @@
 
   // Thread will be loaded to R3_ARG1.
   // Target class oop is in register R5_ARG3 by convention!
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose, R17_tos, R5_ARG3));
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3);
   // Above call must not return here since exception pending.
   DEBUG_ONLY(__ should_not_reach_here();)
   return entry;
@@ -171,12 +171,20 @@
   // Compiled code destroys templateTableBase, reload.
   __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2);
 
+  if (state == atos) {
+    __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2);
+  }
+
   const Register cache = R11_scratch1;
   const Register size  = R12_scratch2;
   __ get_cache_and_index_at_bcp(cache, 1, index_size);
 
-  // Big Endian (get least significant byte of 64 bit value):
+  // Get least significant byte of 64 bit value:
+#if defined(VM_LITTLE_ENDIAN)
+  __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache);
+#else
   __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache);
+#endif
   __ sldi(size, size, Interpreter::logStackElementSize);
   __ add(R15_esp, R15_esp, size);
   __ dispatch_next(state, step);
@@ -857,7 +865,9 @@
   // Our signature handlers copy required arguments to the C stack
   // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13.
   __ mr(R3_ARG1, R18_locals);
+#if !defined(ABI_ELFv2)
   __ ld(signature_handler_fd, 0, signature_handler_fd);
+#endif
 
   __ call_stub(signature_handler_fd);
 
@@ -1019,8 +1029,13 @@
   // native result across the call. No oop is present.
 
   __ mr(R3_ARG1, R16_thread);
+#if defined(ABI_ELFv2)
+  __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
+            relocInfo::none);
+#else
   __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
             relocInfo::none);
+#endif
 
   __ bind(sync_check_done);
 
@@ -1219,6 +1234,10 @@
       __ li(R0, 1);
       __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
     }
+
+    // Argument and return type profiling.
+    __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4);
+
     // Increment invocation counter and check for overflow.
     if (inc_counter) {
       generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
@@ -1538,6 +1557,8 @@
     __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
     if (ProfileInterpreter) {
       __ set_method_data_pointer_for_bcp();
+      __ ld(R11_scratch1, 0, R1_SP);
+      __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1);
     }
 #if INCLUDE_JVMTI
     Label L_done;
@@ -1549,13 +1570,11 @@
     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
     // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
     __ ld(R4_ARG2, 0, R18_locals);
-    __ call_VM(R11_scratch1, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),
-               R4_ARG2, R19_method, R14_bcp);
-
-    __ cmpdi(CCR0, R11_scratch1, 0);
+    __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false);
+    __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
+    __ cmpdi(CCR0, R4_ARG2, 0);
     __ beq(CCR0, L_done);
-
-    __ std(R11_scratch1, wordSize, R15_esp);
+    __ std(R4_ARG2, wordSize, R15_esp);
     __ bind(L_done);
 #endif // INCLUDE_JVMTI
     __ dispatch_next(vtos);
--- a/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -188,8 +188,12 @@
       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
       __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1);
-      // Big Endian: ((*(cache+indices))>>((1+byte_no)*8))&0xFF
+      // ((*(cache+indices))>>((1+byte_no)*8))&0xFF:
+#if defined(VM_LITTLE_ENDIAN)
+      __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp);
+#else
       __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp);
+#endif
       __ cmpwi(CCR0, Rnew_bc, 0);
       __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
       __ beq(CCR0, L_patch_done);
@@ -348,7 +352,6 @@
   __ sldi(Rscratch1, Rscratch1, LogBytesPerWord);
   __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer);
   __ bne(CCR0, notInt);
-  __ isync(); // Order load of constant wrt. tags.
   __ lwax(R17_tos, Rcpool, Rscratch1);
   __ push(itos);
   __ b(exit);
@@ -360,7 +363,6 @@
   __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
   __ asm_assert_eq("unexpected type", 0x8765);
 #endif
-  __ isync(); // Order load of constant wrt. tags.
   __ lfsx(F15_ftos, Rcpool, Rscratch1);
   __ push(ftos);
 
@@ -419,13 +421,11 @@
   // Check out Conversions.java for an example.
   // Also ConstantPool::header_size() is 20, which makes it very difficult
   // to double-align double on the constant pool. SG, 11/7/97
-  __ isync(); // Order load of constant wrt. tags.
   __ lfdx(F15_ftos, Rcpool, Rindex);
   __ push(dtos);
   __ b(Lexit);
 
   __ bind(Llong);
-  __ isync(); // Order load of constant wrt. tags.
   __ ldx(R17_tos, Rcpool, Rindex);
   __ push(ltos);
 
@@ -1838,8 +1838,8 @@
   __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
 
   // Load lo & hi.
-  __ lwz(Rlow_byte, BytesPerInt, Rdef_offset_addr);
-  __ lwz(Rhigh_byte, BytesPerInt * 2, Rdef_offset_addr);
+  __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
+  __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned);
 
   // Check for default case (=index outside [low,high]).
   __ cmpw(CCR0, R17_tos, Rlow_byte);
@@ -1853,12 +1853,17 @@
   __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2);
   __ sldi(Rindex, Rindex, LogBytesPerInt);
   __ addi(Rindex, Rindex, 3 * BytesPerInt);
+#if defined(VM_LITTLE_ENDIAN)
+  __ lwbrx(Roffset, Rdef_offset_addr, Rindex);
+  __ extsw(Roffset, Roffset);
+#else
   __ lwax(Roffset, Rdef_offset_addr, Rindex);
+#endif
   __ b(Ldispatch);
 
   __ bind(Ldefault_case);
   __ profile_switch_default(Rhigh_byte, Rscratch1);
-  __ lwa(Roffset, 0, Rdef_offset_addr);
+  __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
 
   __ bind(Ldispatch);
 
@@ -1874,12 +1879,11 @@
 // Table switch using linear search through cases.
 // Bytecode stream format:
 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
-// Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value.
+// Note: Everything is big-endian format here.
 void TemplateTable::fast_linearswitch() {
   transition(itos, vtos);
 
-  Label Lloop_entry, Lsearch_loop, Lfound, Lcontinue_execution, Ldefault_case;
-
+  Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case;
   Register Rcount           = R3_ARG1,
            Rcurrent_pair    = R4_ARG2,
            Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset.
@@ -1893,47 +1897,40 @@
   __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
 
   // Setup loop counter and limit.
-  __ lwz(Rcount, BytesPerInt, Rdef_offset_addr);    // Load count.
+  __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
   __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
 
-  // Set up search loop.
-  __ cmpwi(CCR0, Rcount, 0);
-  __ beq(CCR0, Ldefault_case);
-
   __ mtctr(Rcount);
-
-  // linear table search
-  __ bind(Lsearch_loop);
-
-  __ lwz(Rvalue, 0, Rcurrent_pair);
-  __ lwa(Roffset, 1 * BytesPerInt, Rcurrent_pair);
-
-  __ cmpw(CCR0, Rvalue, Rcmp_value);
-  __ beq(CCR0, Lfound);
-
-  __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
-  __ bdnz(Lsearch_loop);
-
-  // default case
+  __ cmpwi(CCR0, Rcount, 0);
+  __ bne(CCR0, Lloop_entry);
+
+  // Default case
   __ bind(Ldefault_case);
-
-  __ lwa(Roffset, 0, Rdef_offset_addr);
+  __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
   if (ProfileInterpreter) {
     __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */);
-    __ b(Lcontinue_execution);
   }
-
-  // Entry found, skip Roffset bytecodes and continue.
-  __ bind(Lfound);
+  __ b(Lcontinue_execution);
+
+  // Next iteration
+  __ bind(Lsearch_loop);
+  __ bdz(Ldefault_case);
+  __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
+  __ bind(Lloop_entry);
+  __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned);
+  __ cmpw(CCR0, Rvalue, Rcmp_value);
+  __ bne(CCR0, Lsearch_loop);
+
+  // Found, load offset.
+  __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed);
+  // Calculate case index and profile
+  __ mfctr(Rcurrent_pair);
   if (ProfileInterpreter) {
-    // Calc the num of the pair we hit. Careful, Rcurrent_pair points 2 ints
-    // beyond the actual current pair due to the auto update load above!
-    __ sub(Rcurrent_pair, Rcurrent_pair, Rdef_offset_addr);
-    __ addi(Rcurrent_pair, Rcurrent_pair, - 2 * BytesPerInt);
-    __ srdi(Rcurrent_pair, Rcurrent_pair, LogBytesPerInt + 1);
+    __ sub(Rcurrent_pair, Rcount, Rcurrent_pair);
     __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
-    __ bind(Lcontinue_execution);
   }
+
+  __ bind(Lcontinue_execution);
   __ add(R14_bcp, Roffset, R14_bcp);
   __ dispatch_next(vtos);
 }
@@ -1989,7 +1986,7 @@
 
   // initialize i & j
   __ li(Ri,0);
-  __ lwz(Rj, -BytesPerInt, Rarray);
+  __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned);
 
   // and start.
   Label entry;
@@ -2006,7 +2003,11 @@
     //   i = h;
     // }
     __ sldi(Rscratch, Rh, log_entry_size);
+#if defined(VM_LITTLE_ENDIAN)
+    __ lwbrx(Rscratch, Rscratch, Rarray);
+#else
     __ lwzx(Rscratch, Rscratch, Rarray);
+#endif
 
     // if (key < current value)
     //   Rh = Rj
@@ -2038,20 +2039,20 @@
   // Ri = value offset
   __ sldi(Ri, Ri, log_entry_size);
   __ add(Ri, Ri, Rarray);
-  __ lwz(Rscratch, 0, Ri);
+  __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned);
 
   Label not_found;
   // Ri = offset offset
   __ cmpw(CCR0, Rkey, Rscratch);
   __ beq(CCR0, not_found);
   // entry not found -> j = default offset
-  __ lwz(Rj, -2 * BytesPerInt, Rarray);
+  __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned);
   __ b(default_case);
 
   __ bind(not_found);
   // entry found -> j = offset
   __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
-  __ lwz(Rj, BytesPerInt, Ri);
+  __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned);
 
   if (ProfileInterpreter) {
     __ b(continue_execution);
@@ -2146,8 +2147,11 @@
 
   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
   // We are resolved if the indices offset contains the current bytecode.
-  // Big Endian:
+#if defined(VM_LITTLE_ENDIAN)
+  __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
+#else
   __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
+#endif
   // Acquire by cmp-br-isync (see below).
   __ cmpdi(CCR0, Rscratch, (int)bytecode());
   __ beq(CCR0, Lresolved);
@@ -3230,6 +3234,8 @@
   // Load target.
   __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
   __ ldx(Rtarget_method, Rindex, Rrecv_klass);
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true);
   __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */);
 }
 
@@ -3313,6 +3319,8 @@
   __ null_check_throw(Rrecv, -1, Rscratch1);
 
   __ profile_final_call(Rrecv, Rscratch1);
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
 
   // Do the call.
   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2);
@@ -3334,6 +3342,8 @@
   __ null_check_throw(Rreceiver, -1, R11_scratch1);
 
   __ profile_call(R11_scratch1, R12_scratch2);
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false);
   __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2);
 }
 
@@ -3348,6 +3358,8 @@
   prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1);
 
   __ profile_call(R11_scratch1, R12_scratch2);
+  // Argument and return type profiling.
+  __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false);
   __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2);
 }
 
@@ -3369,6 +3381,8 @@
 
   // Final call case.
   __ profile_final_call(Rtemp1, Rscratch);
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true);
   // Do the final call - the index (f2) contains the method.
   __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */);
 
@@ -3420,6 +3434,8 @@
   __ cmpdi(CCR0, Rindex, 0);
   __ beq(CCR0, Lthrow_ame);
   // Found entry. Jump off!
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true);
   __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2);
 
   // Vtable entry was NULL => Throw abstract method error.
@@ -3473,6 +3489,8 @@
   // to be the callsite object the bootstrap method returned. This is passed to a
   // "link" method which does the dispatch (Most likely just grabs the MH stored
   // inside the callsite and does an invokehandle).
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false);
   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
 }
 
@@ -3499,6 +3517,8 @@
   __ profile_final_call(Rrecv, Rscratch1);
 
   // Still no call from handle => We call the method handle interpreter here.
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
 }
 
--- a/src/cpu/ppc/vm/vm_version_ppc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/ppc/vm/vm_version_ppc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -139,13 +139,44 @@
   }
 
   assert(AllocatePrefetchLines > 0, "invalid value");
-  if (AllocatePrefetchLines < 1) // Set valid value in product VM.
+  if (AllocatePrefetchLines < 1) { // Set valid value in product VM.
     AllocatePrefetchLines = 1; // Conservative value.
+  }
 
-  if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size)
+  if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) {
     AllocatePrefetchStyle = 1; // Fall back if inappropriate.
+  }
 
   assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
+
+  if (UseCRC32Intrinsics) {
+    if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
+      warning("CRC32 intrinsics  are not available on this CPU");
+    FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
+  }
+
+  // The AES intrinsic stubs require AES instruction support.
+  if (UseAES) {
+    warning("AES instructions are not available on this CPU");
+    FLAG_SET_DEFAULT(UseAES, false);
+  }
+  if (UseAESIntrinsics) {
+    if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
+      warning("AES intrinsics are not available on this CPU");
+    FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+  }
+
+  if (UseSHA) {
+    warning("SHA instructions are not available on this CPU");
+    FLAG_SET_DEFAULT(UseSHA, false);
+  }
+  if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
+    warning("SHA intrinsics are not available on this CPU");
+    FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
+    FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
+    FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
+  }
+
 }
 
 void VM_Version::print_features() {
@@ -352,7 +383,7 @@
 
   if (PrintAssembly) {
     ttyLocker ttyl;
-    tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", code);
+    tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", p2i(code));
     Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
     tty->print_cr("Time loop1 :%f", loop1_seconds);
     tty->print_cr("Time loop2 :%f", loop2_seconds);
@@ -435,7 +466,7 @@
   // Print the detection code.
   if (PrintAssembly) {
     ttyLocker ttyl;
-    tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", code);
+    tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", p2i(code));
     Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
   }
 
@@ -468,7 +499,7 @@
   // Print the detection code.
   if (PrintAssembly) {
     ttyLocker ttyl;
-    tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", code);
+    tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", p2i(code));
     Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
   }
 
--- a/src/cpu/sparc/vm/assembler_sparc.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -123,6 +123,7 @@
     fpop2_op3    = 0x35,
     impdep1_op3  = 0x36,
     aes3_op3     = 0x36,
+    sha_op3      = 0x36,
     alignaddr_op3  = 0x36,
     faligndata_op3 = 0x36,
     flog3_op3    = 0x36,
@@ -223,7 +224,11 @@
     mwtos_opf          = 0x119,
 
     aes_kexpand0_opf   = 0x130,
-    aes_kexpand2_opf   = 0x131
+    aes_kexpand2_opf   = 0x131,
+
+    sha1_opf           = 0x141,
+    sha256_opf         = 0x142,
+    sha512_opf         = 0x143
   };
 
   enum op5s {
@@ -595,6 +600,11 @@
   // AES crypto instructions supported only on certain processors
   static void aes_only() { assert( VM_Version::has_aes(), "This instruction only works on SPARC with AES instructions support"); }
 
+  // SHA crypto instructions supported only on certain processors
+  static void sha1_only()   { assert( VM_Version::has_sha1(),   "This instruction only works on SPARC with SHA1"); }
+  static void sha256_only() { assert( VM_Version::has_sha256(), "This instruction only works on SPARC with SHA256"); }
+  static void sha512_only() { assert( VM_Version::has_sha512(), "This instruction only works on SPARC with SHA512"); }
+
   // instruction only in VIS1
   static void vis1_only() { assert( VM_Version::has_vis1(), "This instruction only works on SPARC with VIS1"); }
 
@@ -1179,7 +1189,6 @@
                                                u_field(3, 29, 25) | immed(true) | simm(simm13a, 13)); }
   inline void wrfprs( Register d) { v9_only(); emit_int32( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
 
-
   //  VIS1 instructions
 
   void alignaddr( Register s1, Register s2, Register d ) { vis1_only(); emit_int32( op(arith_op) | rd(d) | op3(alignaddr_op3) | rs1(s1) | opf(alignaddr_opf) | rs2(s2)); }
@@ -1203,6 +1212,12 @@
   void movwtos( Register s, FloatRegister d ) { vis3_only();  emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(mftoi_op3) | opf(mwtos_opf) | rs2(s)); }
   void movxtod( Register s, FloatRegister d ) { vis3_only();  emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(mftoi_op3) | opf(mxtod_opf) | rs2(s)); }
 
+  // Crypto SHA instructions
+
+  void sha1()   { sha1_only();    emit_int32( op(arith_op) | op3(sha_op3) | opf(sha1_opf)); }
+  void sha256() { sha256_only();  emit_int32( op(arith_op) | op3(sha_op3) | opf(sha256_opf)); }
+  void sha512() { sha512_only();  emit_int32( op(arith_op) | op3(sha_op3) | opf(sha512_opf)); }
+
   // Creation
   Assembler(CodeBuffer* code) : AbstractAssembler(code) {
 #ifdef CHECK_DELAY
--- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -872,21 +872,19 @@
 
 
 void LIRGenerator::do_NewInstance(NewInstance* x) {
+  print_if_not_loaded(x);
+
   // This instruction can be deoptimized in the slow path : use
   // O0 as result register.
   const LIR_Opr reg = result_register_for(x->type());
-#ifndef PRODUCT
-  if (PrintNotLoaded && !x->klass()->is_loaded()) {
-    tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
-  }
-#endif
+
   CodeEmitInfo* info = state_for(x, x->state());
   LIR_Opr tmp1 = FrameMap::G1_oop_opr;
   LIR_Opr tmp2 = FrameMap::G3_oop_opr;
   LIR_Opr tmp3 = FrameMap::G4_oop_opr;
   LIR_Opr tmp4 = FrameMap::O1_oop_opr;
   LIR_Opr klass_reg = FrameMap::G5_metadata_opr;
-  new_instance(reg, x->klass(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
+  new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
   LIR_Opr result = rlock_result(x);
   __ move(reg, result);
 }
--- a/src/cpu/sparc/vm/compiledIC_sparc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/sparc/vm/compiledIC_sparc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -50,34 +50,6 @@
   return is_icholder_entry(call->destination());
 }
 
-//-----------------------------------------------------------------------------
-// High-level access to an inline cache. Guaranteed to be MT-safe.
-
-CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
-  : _ic_call(call)
-{
-  address ic_call = call->instruction_address();
-
-  assert(ic_call != NULL, "ic_call address must be set");
-  assert(nm != NULL, "must pass nmethod");
-  assert(nm->contains(ic_call), "must be in nmethod");
-
-  // Search for the ic_call at the given address.
-  RelocIterator iter(nm, ic_call, ic_call+1);
-  bool ret = iter.next();
-  assert(ret == true, "relocInfo must exist at this address");
-  assert(iter.addr() == ic_call, "must find ic_call");
-  if (iter.type() == relocInfo::virtual_call_type) {
-    virtual_call_Relocation* r = iter.virtual_call_reloc();
-    _is_optimized = false;
-    _value = nativeMovConstReg_at(r->cached_value());
-  } else {
-    assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
-    _is_optimized = true;
-    _value = NULL;
-  }
-}
-
 // ----------------------------------------------------------------------------
 
 #define __ _masm.
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1154,51 +1154,82 @@
     // Hoist any int/ptr/long's in the first 6 to int regs.
     // Hoist any flt/dbl's in the first 16 dbl regs.
     int j = 0;                  // Count of actual args, not HALVES
-    for( int i=0; i<total_args_passed; i++, j++ ) {
-      switch( sig_bt[i] ) {
+    VMRegPair param_array_reg;  // location of the argument in the parameter array
+    for (int i = 0; i < total_args_passed; i++, j++) {
+      param_array_reg.set_bad();
+      switch (sig_bt[i]) {
       case T_BOOLEAN:
       case T_BYTE:
       case T_CHAR:
       case T_INT:
       case T_SHORT:
-        regs[i].set1( int_stk_helper( j ) ); break;
+        regs[i].set1(int_stk_helper(j));
+        break;
       case T_LONG:
-        assert( sig_bt[i+1] == T_VOID, "expecting half" );
+        assert(sig_bt[i+1] == T_VOID, "expecting half");
       case T_ADDRESS: // raw pointers, like current thread, for VM calls
       case T_ARRAY:
       case T_OBJECT:
       case T_METADATA:
-        regs[i].set2( int_stk_helper( j ) );
+        regs[i].set2(int_stk_helper(j));
         break;
       case T_FLOAT:
-        if ( j < 16 ) {
-          // V9ism: floats go in ODD registers
-          regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
-        } else {
-          // V9ism: floats go in ODD stack slot
-          regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
+        // Per SPARC Compliance Definition 2.4.1, page 3P-12 available here
+        // http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz
+        //
+        // "When a callee prototype exists, and does not indicate variable arguments,
+        // floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248
+        // will be promoted to floating-point registers"
+        //
+        // By "promoted" it means that the argument is located in two places, an unused
+        // spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live
+        // float register.  In most cases, there are 6 or fewer arguments of any type,
+        // and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive)
+        // serve as shadow slots.  Per the spec floating point registers %d6 to %d16
+        // require slots beyond that (up to %sp+BIAS+248).
+        //
+        {
+          // V9ism: floats go in ODD registers and stack slots
+          int float_index = 1 + (j << 1);
+          param_array_reg.set1(VMRegImpl::stack2reg(float_index));
+          if (j < 16) {
+            regs[i].set1(as_FloatRegister(float_index)->as_VMReg());
+          } else {
+            regs[i] = param_array_reg;
+          }
         }
         break;
       case T_DOUBLE:
-        assert( sig_bt[i+1] == T_VOID, "expecting half" );
-        if ( j < 16 ) {
-          // V9ism: doubles go in EVEN/ODD regs
-          regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
-        } else {
-          // V9ism: doubles go in EVEN/ODD stack slots
-          regs[i].set2(VMRegImpl::stack2reg(j<<1));
+        {
+          assert(sig_bt[i + 1] == T_VOID, "expecting half");
+          // V9ism: doubles go in EVEN/ODD regs and stack slots
+          int double_index = (j << 1);
+          param_array_reg.set2(VMRegImpl::stack2reg(double_index));
+          if (j < 16) {
+            regs[i].set2(as_FloatRegister(double_index)->as_VMReg());
+          } else {
+            // V9ism: doubles go in EVEN/ODD stack slots
+            regs[i] = param_array_reg;
+          }
         }
         break;
-      case T_VOID:  regs[i].set_bad(); j--; break; // Do not count HALVES
+      case T_VOID:
+        regs[i].set_bad();
+        j--;
+        break; // Do not count HALVES
       default:
         ShouldNotReachHere();
       }
-      if (regs[i].first()->is_stack()) {
-        int off =  regs[i].first()->reg2stack();
+      // Keep track of the deepest parameter array slot.
+      if (!param_array_reg.first()->is_valid()) {
+        param_array_reg = regs[i];
+      }
+      if (param_array_reg.first()->is_stack()) {
+        int off = param_array_reg.first()->reg2stack();
         if (off > max_stack_slots) max_stack_slots = off;
       }
-      if (regs[i].second()->is_stack()) {
-        int off =  regs[i].second()->reg2stack();
+      if (param_array_reg.second()->is_stack()) {
+        int off = param_array_reg.second()->reg2stack();
         if (off > max_stack_slots) max_stack_slots = off;
       }
     }
@@ -1206,8 +1237,8 @@
 #else // _LP64
     // V8 convention: first 6 things in O-regs, rest on stack.
     // Alignment is willy-nilly.
-    for( int i=0; i<total_args_passed; i++ ) {
-      switch( sig_bt[i] ) {
+    for (int i = 0; i < total_args_passed; i++) {
+      switch (sig_bt[i]) {
       case T_ADDRESS: // raw pointers, like current thread, for VM calls
       case T_ARRAY:
       case T_BOOLEAN:
@@ -1218,23 +1249,23 @@
       case T_OBJECT:
       case T_METADATA:
       case T_SHORT:
-        regs[i].set1( int_stk_helper( i ) );
+        regs[i].set1(int_stk_helper(i));
         break;
       case T_DOUBLE:
       case T_LONG:
-        assert( sig_bt[i+1] == T_VOID, "expecting half" );
-        regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
+        assert(sig_bt[i + 1] == T_VOID, "expecting half");
+        regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
         break;
       case T_VOID: regs[i].set_bad(); break;
       default:
         ShouldNotReachHere();
       }
       if (regs[i].first()->is_stack()) {
-        int off =  regs[i].first()->reg2stack();
+        int off = regs[i].first()->reg2stack();
         if (off > max_stack_slots) max_stack_slots = off;
       }
       if (regs[i].second()->is_stack()) {
-        int off =  regs[i].second()->reg2stack();
+        int off = regs[i].second()->reg2stack();
         if (off > max_stack_slots) max_stack_slots = off;
       }
     }
@@ -1383,11 +1414,10 @@
     const Register rOop = src.first()->as_Register();
     const Register rHandle = L5;
     int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
-    int offset = oop_slot*VMRegImpl::stack_slot_size;
-    Label skip;
+    int offset = oop_slot * VMRegImpl::stack_slot_size;
     __ st_ptr(rOop, SP, offset + STACK_BIAS);
     if (is_receiver) {
-      *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
+       *receiver_offset = offset;
     }
     map->set_oop(VMRegImpl::stack2reg(oop_slot));
     __ add(SP, offset + STACK_BIAS, rHandle);
--- a/src/cpu/sparc/vm/sparc.ad	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/sparc/vm/sparc.ad	Fri Apr 10 16:58:26 2015 -0700
@@ -1989,7 +1989,7 @@
 // to implement the UseStrictFP mode.
 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 
-// Are floats conerted to double when stored to stack during deoptimization?
+// Are floats converted to double when stored to stack during deoptimization?
 // Sparc does not handle callee-save floats.
 bool Matcher::float_in_double() { return false; }
 
@@ -3218,7 +3218,7 @@
 //         are owned by the CALLEE.  Holes should not be nessecary in the
 //         incoming area, as the Java calling convention is completely under
 //         the control of the AD file.  Doubles can be sorted and packed to
-//         avoid holes.  Holes in the outgoing arguments may be nessecary for
+//         avoid holes.  Holes in the outgoing arguments may be necessary for
 //         varargs C calling conventions.
 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 //         even aligned with pad0 as needed.
@@ -3284,7 +3284,7 @@
   %}
 
   // Body of function which returns an OptoRegs array locating
-  // arguments either in registers or in stack slots for callin
+  // arguments either in registers or in stack slots for calling
   // C.
   c_calling_convention %{
     // This is obviously always outgoing
@@ -6184,7 +6184,11 @@
   ins_cost(DEFAULT_COST * 3/2);
   format %{ "SET    $con,$dst\t! non-oop ptr" %}
   ins_encode %{
-    __ set($con$$constant, $dst$$Register);
+    if (_opnds[1]->constant_reloc() == relocInfo::metadata_type) {
+      __ set_metadata_constant((Metadata*)$con$$constant, $dst$$Register);
+    } else {
+      __ set($con$$constant, $dst$$Register);
+    }
   %}
   ins_pipe(loadConP);
 %}
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -4575,6 +4575,219 @@
     return start;
   }
 
+  address generate_sha1_implCompress(bool multi_block, const char *name) {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    Label L_sha1_loop, L_sha1_unaligned_input, L_sha1_unaligned_input_loop;
+    int i;
+
+    Register buf   = O0; // byte[] source+offset
+    Register state = O1; // int[]  SHA.state
+    Register ofs   = O2; // int    offset
+    Register limit = O3; // int    limit
+
+    // load state into F0-F4
+    for (i = 0; i < 5; i++) {
+      __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i));
+    }
+
+    __ andcc(buf, 7, G0);
+    __ br(Assembler::notZero, false, Assembler::pn, L_sha1_unaligned_input);
+    __ delayed()->nop();
+
+    __ BIND(L_sha1_loop);
+    // load buf into F8-F22
+    for (i = 0; i < 8; i++) {
+      __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8));
+    }
+    __ sha1();
+    if (multi_block) {
+      __ add(ofs, 64, ofs);
+      __ add(buf, 64, buf);
+      __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_loop);
+      __ mov(ofs, O0); // to be returned
+    }
+
+    // store F0-F4 into state and return
+    for (i = 0; i < 4; i++) {
+      __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4);
+    }
+    __ retl();
+    __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10);
+
+    __ BIND(L_sha1_unaligned_input);
+    __ alignaddr(buf, G0, buf);
+
+    __ BIND(L_sha1_unaligned_input_loop);
+    // load buf into F8-F22
+    for (i = 0; i < 9; i++) {
+      __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8));
+    }
+    for (i = 0; i < 8; i++) {
+      __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8));
+    }
+    __ sha1();
+    if (multi_block) {
+      __ add(ofs, 64, ofs);
+      __ add(buf, 64, buf);
+      __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_unaligned_input_loop);
+      __ mov(ofs, O0); // to be returned
+    }
+
+    // store F0-F4 into state and return
+    for (i = 0; i < 4; i++) {
+      __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4);
+    }
+    __ retl();
+    __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10);
+
+    return start;
+  }
+
+  address generate_sha256_implCompress(bool multi_block, const char *name) {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    Label L_sha256_loop, L_sha256_unaligned_input, L_sha256_unaligned_input_loop;
+    int i;
+
+    Register buf   = O0; // byte[] source+offset
+    Register state = O1; // int[]  SHA2.state
+    Register ofs   = O2; // int    offset
+    Register limit = O3; // int    limit
+
+    // load state into F0-F7
+    for (i = 0; i < 8; i++) {
+      __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i));
+    }
+
+    __ andcc(buf, 7, G0);
+    __ br(Assembler::notZero, false, Assembler::pn, L_sha256_unaligned_input);
+    __ delayed()->nop();
+
+    __ BIND(L_sha256_loop);
+    // load buf into F8-F22
+    for (i = 0; i < 8; i++) {
+      __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8));
+    }
+    __ sha256();
+    if (multi_block) {
+      __ add(ofs, 64, ofs);
+      __ add(buf, 64, buf);
+      __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_loop);
+      __ mov(ofs, O0); // to be returned
+    }
+
+    // store F0-F7 into state and return
+    for (i = 0; i < 7; i++) {
+      __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4);
+    }
+    __ retl();
+    __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c);
+
+    __ BIND(L_sha256_unaligned_input);
+    __ alignaddr(buf, G0, buf);
+
+    __ BIND(L_sha256_unaligned_input_loop);
+    // load buf into F8-F22
+    for (i = 0; i < 9; i++) {
+      __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8));
+    }
+    for (i = 0; i < 8; i++) {
+      __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8));
+    }
+    __ sha256();
+    if (multi_block) {
+      __ add(ofs, 64, ofs);
+      __ add(buf, 64, buf);
+      __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_unaligned_input_loop);
+      __ mov(ofs, O0); // to be returned
+    }
+
+    // store F0-F7 into state and return
+    for (i = 0; i < 7; i++) {
+      __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4);
+    }
+    __ retl();
+    __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c);
+
+    return start;
+  }
+
+  address generate_sha512_implCompress(bool multi_block, const char *name) {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ pc();
+
+    Label L_sha512_loop, L_sha512_unaligned_input, L_sha512_unaligned_input_loop;
+    int i;
+
+    Register buf   = O0; // byte[] source+offset
+    Register state = O1; // long[] SHA5.state
+    Register ofs   = O2; // int    offset
+    Register limit = O3; // int    limit
+
+    // load state into F0-F14
+    for (i = 0; i < 8; i++) {
+      __ ldf(FloatRegisterImpl::D, state, i*8, as_FloatRegister(i*2));
+    }
+
+    __ andcc(buf, 7, G0);
+    __ br(Assembler::notZero, false, Assembler::pn, L_sha512_unaligned_input);
+    __ delayed()->nop();
+
+    __ BIND(L_sha512_loop);
+    // load buf into F16-F46
+    for (i = 0; i < 16; i++) {
+      __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16));
+    }
+    __ sha512();
+    if (multi_block) {
+      __ add(ofs, 128, ofs);
+      __ add(buf, 128, buf);
+      __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_loop);
+      __ mov(ofs, O0); // to be returned
+    }
+
+    // store F0-F14 into state and return
+    for (i = 0; i < 7; i++) {
+      __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8);
+    }
+    __ retl();
+    __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38);
+
+    __ BIND(L_sha512_unaligned_input);
+    __ alignaddr(buf, G0, buf);
+
+    __ BIND(L_sha512_unaligned_input_loop);
+    // load buf into F16-F46
+    for (i = 0; i < 17; i++) {
+      __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16));
+    }
+    for (i = 0; i < 16; i++) {
+      __ faligndata(as_FloatRegister(i*2 + 16), as_FloatRegister(i*2 + 18), as_FloatRegister(i*2 + 16));
+    }
+    __ sha512();
+    if (multi_block) {
+      __ add(ofs, 128, ofs);
+      __ add(buf, 128, buf);
+      __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_unaligned_input_loop);
+      __ mov(ofs, O0); // to be returned
+    }
+
+    // store F0-F14 into state and return
+    for (i = 0; i < 7; i++) {
+      __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8);
+    }
+    __ retl();
+    __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38);
+
+    return start;
+  }
+
   void generate_initial() {
     // Generates all stubs and initializes the entry points
 
@@ -4647,6 +4860,20 @@
       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
     }
+
+    // generate SHA1/SHA256/SHA512 intrinsics code
+    if (UseSHA1Intrinsics) {
+      StubRoutines::_sha1_implCompress     = generate_sha1_implCompress(false,   "sha1_implCompress");
+      StubRoutines::_sha1_implCompressMB   = generate_sha1_implCompress(true,    "sha1_implCompressMB");
+    }
+    if (UseSHA256Intrinsics) {
+      StubRoutines::_sha256_implCompress   = generate_sha256_implCompress(false, "sha256_implCompress");
+      StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true,  "sha256_implCompressMB");
+    }
+    if (UseSHA512Intrinsics) {
+      StubRoutines::_sha512_implCompress   = generate_sha512_implCompress(false, "sha512_implCompress");
+      StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true,  "sha512_implCompressMB");
+    }
   }
 
 
--- a/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -41,7 +41,7 @@
 enum /* platform_dependent_constants */ {
   // %%%%%%%% May be able to shrink this a lot
   code_size1 = 20000,           // simply increase if too small (assembler will crash if too small)
-  code_size2 = 22000            // simply increase if too small (assembler will crash if too small)
+  code_size2 = 23000            // simply increase if too small (assembler will crash if too small)
 };
 
 class Sparc {
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -37,6 +37,7 @@
 
 int VM_Version::_features = VM_Version::unknown_m;
 const char* VM_Version::_features_str = "";
+unsigned int VM_Version::_L2_cache_line_size = 0;
 
 void VM_Version::initialize() {
   _features = determine_features();
@@ -197,7 +198,7 @@
   }
 
   assert(BlockZeroingLowLimit > 0, "invalid value");
-  if (has_block_zeroing()) {
+  if (has_block_zeroing() && cache_line_size > 0) {
     if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
       FLAG_SET_DEFAULT(UseBlockZeroing, true);
     }
@@ -207,7 +208,7 @@
   }
 
   assert(BlockCopyLowLimit > 0, "invalid value");
-  if (has_block_zeroing()) { // has_blk_init() && is_T4(): core's local L2 cache
+  if (has_block_zeroing() && cache_line_size > 0) { // has_blk_init() && is_T4(): core's local L2 cache
     if (FLAG_IS_DEFAULT(UseBlockCopy)) {
       FLAG_SET_DEFAULT(UseBlockCopy, true);
     }
@@ -234,7 +235,7 @@
   assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
 
   char buf[512];
-  jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+  jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
                (has_v9() ? ", v9" : (has_v8() ? ", v8" : "")),
                (has_hardware_popc() ? ", popc" : ""),
                (has_vis1() ? ", vis1" : ""),
@@ -243,6 +244,9 @@
                (has_blk_init() ? ", blk_init" : ""),
                (has_cbcond() ? ", cbcond" : ""),
                (has_aes() ? ", aes" : ""),
+               (has_sha1() ? ", sha1" : ""),
+               (has_sha256() ? ", sha256" : ""),
+               (has_sha512() ? ", sha512" : ""),
                (is_ultra3() ? ", ultra3" : ""),
                (is_sun4v() ? ", sun4v" : ""),
                (is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")),
@@ -301,12 +305,65 @@
     }
   }
 
+  // SHA1, SHA256, and SHA512 instructions were added to SPARC T-series at different times
+  if (has_sha1() || has_sha256() || has_sha512()) {
+    if (UseVIS > 0) { // SHA intrinsics use VIS1 instructions
+      if (FLAG_IS_DEFAULT(UseSHA)) {
+        FLAG_SET_DEFAULT(UseSHA, true);
+      }
+    } else {
+      if (UseSHA) {
+        warning("SPARC SHA intrinsics require VIS1 instruction support. Intrinsics will be disabled.");
+        FLAG_SET_DEFAULT(UseSHA, false);
+      }
+    }
+  } else if (UseSHA) {
+    warning("SHA instructions are not available on this CPU");
+    FLAG_SET_DEFAULT(UseSHA, false);
+  }
+
+  if (!UseSHA) {
+    FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
+    FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
+    FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
+  } else {
+    if (has_sha1()) {
+      if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
+        FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
+      }
+    } else if (UseSHA1Intrinsics) {
+      warning("SHA1 instruction is not available on this CPU.");
+      FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
+    }
+    if (has_sha256()) {
+      if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
+        FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
+      }
+    } else if (UseSHA256Intrinsics) {
+      warning("SHA256 instruction (for SHA-224 and SHA-256) is not available on this CPU.");
+      FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
+    }
+
+    if (has_sha512()) {
+      if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
+        FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
+      }
+    } else if (UseSHA512Intrinsics) {
+      warning("SHA512 instruction (for SHA-384 and SHA-512) is not available on this CPU.");
+      FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
+    }
+    if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
+      FLAG_SET_DEFAULT(UseSHA, false);
+    }
+  }
+
   if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
     (cache_line_size > ContendedPaddingWidth))
     ContendedPaddingWidth = cache_line_size;
 
 #ifndef PRODUCT
   if (PrintMiscellaneous && Verbose) {
+    tty->print_cr("L2 cache line size: %u", L2_cache_line_size());
     tty->print("Allocation");
     if (AllocatePrefetchStyle <= 0) {
       tty->print_cr(": no prefetching");
--- a/src/cpu/sparc/vm/vm_version_sparc.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/sparc/vm/vm_version_sparc.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,10 @@
     T_family             = 16,
     T1_model             = 17,
     sparc5_instructions  = 18,
-    aes_instructions     = 19
+    aes_instructions     = 19,
+    sha1_instruction     = 20,
+    sha256_instruction   = 21,
+    sha512_instruction   = 22
   };
 
   enum Feature_Flag_Set {
@@ -78,6 +81,9 @@
     T1_model_m              = 1 << T1_model,
     sparc5_instructions_m   = 1 << sparc5_instructions,
     aes_instructions_m      = 1 << aes_instructions,
+    sha1_instruction_m      = 1 << sha1_instruction,
+    sha256_instruction_m    = 1 << sha256_instruction,
+    sha512_instruction_m    = 1 << sha512_instruction,
 
     generic_v8_m        = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
     generic_v9_m        = generic_v8_m | v9_instructions_m,
@@ -91,6 +97,9 @@
   static int  _features;
   static const char* _features_str;
 
+  static unsigned int _L2_cache_line_size;
+  static unsigned int L2_cache_line_size() { return _L2_cache_line_size; }
+
   static void print_features();
   static int  determine_features();
   static int  platform_features(int features);
@@ -130,6 +139,9 @@
   static bool has_cbcond()              { return (_features & cbcond_instructions_m) != 0; }
   static bool has_sparc5_instr()        { return (_features & sparc5_instructions_m) != 0; }
   static bool has_aes()                 { return (_features & aes_instructions_m) != 0; }
+  static bool has_sha1()                { return (_features & sha1_instruction_m) != 0; }
+  static bool has_sha256()              { return (_features & sha256_instruction_m) != 0; }
+  static bool has_sha512()              { return (_features & sha512_instruction_m) != 0; }
 
   static bool supports_compare_and_exchange()
                                         { return has_v9(); }
@@ -159,9 +171,8 @@
 
   static const char* cpu_features()     { return _features_str; }
 
-  static intx prefetch_data_size()  {
-    return is_T4() && !is_T7() ? 32 : 64;  // default prefetch block size on sparc
-  }
+  // default prefetch block size on sparc
+  static intx prefetch_data_size()      { return L2_cache_line_size();  }
 
   // Prefetch
   static intx prefetch_copy_interval_in_bytes() {
--- a/src/cpu/x86/vm/assembler_x86.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -3868,6 +3868,15 @@
 }
 
 // Carry-Less Multiplication Quadword
+void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
+  assert(VM_Version::supports_clmul(), "");
+  int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
+  emit_int8(0x44);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8((unsigned char)mask);
+}
+
+// Carry-Less Multiplication Quadword
 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
   assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
   bool vector256 = false;
@@ -4942,6 +4951,26 @@
   emit_arith(0x03, 0xC0, dst, src);
 }
 
+void Assembler::adcxq(Register dst, Register src) {
+  //assert(VM_Version::supports_adx(), "adx instructions not supported");
+  emit_int8((unsigned char)0x66);
+  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
+  emit_int8(0x0F);
+  emit_int8(0x38);
+  emit_int8((unsigned char)0xF6);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::adoxq(Register dst, Register src) {
+  //assert(VM_Version::supports_adx(), "adx instructions not supported");
+  emit_int8((unsigned char)0xF3);
+  int encode = prefixq_and_encode(dst->encoding(), src->encoding());
+  emit_int8(0x0F);
+  emit_int8(0x38);
+  emit_int8((unsigned char)0xF6);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
 void Assembler::andq(Address dst, int32_t imm32) {
   InstructionMark im(this);
   prefixq(dst);
@@ -5449,6 +5478,26 @@
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
+void Assembler::mulq(Address src) {
+  InstructionMark im(this);
+  prefixq(src);
+  emit_int8((unsigned char)0xF7);
+  emit_operand(rsp, src);
+}
+
+void Assembler::mulq(Register src) {
+  int encode = prefixq_and_encode(src->encoding());
+  emit_int8((unsigned char)0xF7);
+  emit_int8((unsigned char)(0xE0 | encode));
+}
+
+void Assembler::mulxq(Register dst1, Register dst2, Register src) {
+  assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
+  int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, true, false);
+  emit_int8((unsigned char)0xF6);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
 void Assembler::negq(Register dst) {
   int encode = prefixq_and_encode(dst->encoding());
   emit_int8((unsigned char)0xF7);
@@ -5577,6 +5626,28 @@
     emit_int8(imm8);
   }
 }
+
+void Assembler::rorq(Register dst, int imm8) {
+  assert(isShiftCount(imm8 >> 1), "illegal shift count");
+  int encode = prefixq_and_encode(dst->encoding());
+  if (imm8 == 1) {
+    emit_int8((unsigned char)0xD1);
+    emit_int8((unsigned char)(0xC8 | encode));
+  } else {
+    emit_int8((unsigned char)0xC1);
+    emit_int8((unsigned char)(0xc8 | encode));
+    emit_int8(imm8);
+  }
+}
+
+void Assembler::rorxq(Register dst, Register src, int imm8) {
+  assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
+  int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, true, false);
+  emit_int8((unsigned char)0xF0);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(imm8);
+}
+
 void Assembler::sarq(Register dst, int imm8) {
   assert(isShiftCount(imm8 >> 1), "illegal shift count");
   int encode = prefixq_and_encode(dst->encoding());
--- a/src/cpu/x86/vm/assembler_x86.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -892,6 +892,14 @@
   void addq(Register dst, Address src);
   void addq(Register dst, Register src);
 
+#ifdef _LP64
+ //Add Unsigned Integers with Carry Flag
+  void adcxq(Register dst, Register src);
+
+ //Add Unsigned Integers with Overflow Flag
+  void adoxq(Register dst, Register src);
+#endif
+
   void addr_nop_4();
   void addr_nop_5();
   void addr_nop_7();
@@ -1208,19 +1216,20 @@
   void idivl(Register src);
   void divl(Register src); // Unsigned division
 
+#ifdef _LP64
   void idivq(Register src);
+#endif
 
   void imull(Register dst, Register src);
   void imull(Register dst, Register src, int value);
   void imull(Register dst, Address src);
 
+#ifdef _LP64
   void imulq(Register dst, Register src);
   void imulq(Register dst, Register src, int value);
-#ifdef _LP64
   void imulq(Register dst, Address src);
 #endif
 
-
   // jcc is the generic conditional branch generator to run-
   // time routines, jcc is used for branches to labels. jcc
   // takes a branch opcode (cc) and a label (L) and generates
@@ -1412,9 +1421,16 @@
   void movzwq(Register dst, Register src);
 #endif
 
+  // Unsigned multiply with RAX destination register
   void mull(Address src);
   void mull(Register src);
 
+#ifdef _LP64
+  void mulq(Address src);
+  void mulq(Register src);
+  void mulxq(Register dst1, Register dst2, Register src);
+#endif
+
   // Multiply Scalar Double-Precision Floating-Point Values
   void mulsd(XMMRegister dst, Address src);
   void mulsd(XMMRegister dst, XMMRegister src);
@@ -1545,6 +1561,11 @@
 
   void ret(int imm16);
 
+#ifdef _LP64
+  void rorq(Register dst, int imm8);
+  void rorxq(Register dst, Register src, int imm8);
+#endif
+
   void sahf();
 
   void sarl(Register dst, int imm8);
@@ -1841,6 +1862,7 @@
   void vpbroadcastd(XMMRegister dst, XMMRegister src);
 
   // Carry-Less Multiplication Quadword
+  void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
   void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
 
   // AVX instruction which is used to clear upper 128 bits of YMM registers and
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1085,14 +1085,11 @@
 
 
 void LIRGenerator::do_NewInstance(NewInstance* x) {
-#ifndef PRODUCT
-  if (PrintNotLoaded && !x->klass()->is_loaded()) {
-    tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
-  }
-#endif
+  print_if_not_loaded(x);
+
   CodeEmitInfo* info = state_for(x, x->state());
   LIR_Opr reg = result_register_for(x->type());
-  new_instance(reg, x->klass(),
+  new_instance(reg, x->klass(), x->is_unresolved(),
                        FrameMap::rcx_oop_opr,
                        FrameMap::rdi_oop_opr,
                        FrameMap::rsi_oop_opr,
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -675,7 +675,7 @@
   case handle_exception_nofpu_id:
   case handle_exception_id:
     // At this point all registers MAY be live.
-    oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
+    oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id);
     break;
   case handle_exception_from_callee_id: {
     // At this point all registers except exception oop (RAX) and
@@ -748,7 +748,7 @@
   case handle_exception_nofpu_id:
   case handle_exception_id:
     // Restore the registers that were saved at the beginning.
-    restore_live_registers(sasm, id == handle_exception_nofpu_id);
+    restore_live_registers(sasm, id != handle_exception_nofpu_id);
     break;
   case handle_exception_from_callee_id:
     // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
--- a/src/cpu/x86/vm/compiledIC_x86.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/x86/vm/compiledIC_x86.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -47,34 +47,6 @@
   return is_icholder_entry(call->destination());
 }
 
-//-----------------------------------------------------------------------------
-// High-level access to an inline cache. Guaranteed to be MT-safe.
-
-CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
-  : _ic_call(call)
-{
-  address ic_call = call->instruction_address();
-
-  assert(ic_call != NULL, "ic_call address must be set");
-  assert(nm != NULL, "must pass nmethod");
-  assert(nm->contains(ic_call), "must be in nmethod");
-
-  // Search for the ic_call at the given address.
-  RelocIterator iter(nm, ic_call, ic_call+1);
-  bool ret = iter.next();
-  assert(ret == true, "relocInfo must exist at this address");
-  assert(iter.addr() == ic_call, "must find ic_call");
-  if (iter.type() == relocInfo::virtual_call_type) {
-    virtual_call_Relocation* r = iter.virtual_call_reloc();
-    _is_optimized = false;
-    _value = nativeMovConstReg_at(r->cached_value());
-  } else {
-    assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
-    _is_optimized = true;
-    _value = NULL;
-  }
-}
-
 // ----------------------------------------------------------------------------
 
 #define __ _masm.
--- a/src/cpu/x86/vm/globals_x86.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/x86/vm/globals_x86.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -131,16 +131,16 @@
           "Use fast-string operation for zeroing: rep stosb")               \
                                                                             \
   /* Use Restricted Transactional Memory for lock eliding */                \
-  experimental(bool, UseRTMLocking, false,                                  \
+  product(bool, UseRTMLocking, false,                                       \
           "Enable RTM lock eliding for inflated locks in compiled code")    \
                                                                             \
   experimental(bool, UseRTMForStackLocks, false,                            \
           "Enable RTM lock eliding for stack locks in compiled code")       \
                                                                             \
-  experimental(bool, UseRTMDeopt, false,                                    \
+  product(bool, UseRTMDeopt, false,                                         \
           "Perform deopt and recompilation based on RTM abort ratio")       \
                                                                             \
-  experimental(uintx, RTMRetryCount, 5,                                     \
+  product(uintx, RTMRetryCount, 5,                                          \
           "Number of RTM retries on lock abort or busy")                    \
                                                                             \
   experimental(intx, RTMSpinLoopCount, 100,                                 \
@@ -177,6 +177,8 @@
           "Use count trailing zeros instruction")                           \
                                                                             \
   product(bool, UseBMI1Instructions, false,                                 \
-          "Use BMI instructions")
-
+          "Use BMI1 instructions")                                          \
+                                                                            \
+  product(bool, UseBMI2Instructions, false,                                 \
+          "Use BMI2 instructions")
 #endif // CPU_X86_VM_GLOBALS_X86_HPP
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1769,7 +1769,7 @@
     // at [FETCH], below, will never observe a biased encoding (*101b).
     // If this invariant is not held we risk exclusion (safety) failure.
     if (UseBiasedLocking && !UseOptoBiasInlining) {
-      biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters);
+      biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
     }
 
 #if INCLUDE_RTM_OPT
@@ -7293,6 +7293,467 @@
   bind(L_done);
 }
 
+#ifdef _LP64
+/**
+ * Helper for multiply_to_len().
+ */
+void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
+  addq(dest_lo, src1);
+  adcq(dest_hi, 0);
+  addq(dest_lo, src2);
+  adcq(dest_hi, 0);
+}
+
+/**
+ * Multiply 64 bit by 64 bit first loop.
+ */
+void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
+                                           Register y, Register y_idx, Register z,
+                                           Register carry, Register product,
+                                           Register idx, Register kdx) {
+  //
+  //  jlong carry, x[], y[], z[];
+  //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
+  //    huge_128 product = y[idx] * x[xstart] + carry;
+  //    z[kdx] = (jlong)product;
+  //    carry  = (jlong)(product >>> 64);
+  //  }
+  //  z[xstart] = carry;
+  //
+
+  Label L_first_loop, L_first_loop_exit;
+  Label L_one_x, L_one_y, L_multiply;
+
+  decrementl(xstart);
+  jcc(Assembler::negative, L_one_x);
+
+  movq(x_xstart, Address(x, xstart, Address::times_4,  0));
+  rorq(x_xstart, 32); // convert big-endian to little-endian
+
+  bind(L_first_loop);
+  decrementl(idx);
+  jcc(Assembler::negative, L_first_loop_exit);
+  decrementl(idx);
+  jcc(Assembler::negative, L_one_y);
+  movq(y_idx, Address(y, idx, Address::times_4,  0));
+  rorq(y_idx, 32); // convert big-endian to little-endian
+  bind(L_multiply);
+  movq(product, x_xstart);
+  mulq(y_idx); // product(rax) * y_idx -> rdx:rax
+  addq(product, carry);
+  adcq(rdx, 0);
+  subl(kdx, 2);
+  movl(Address(z, kdx, Address::times_4,  4), product);
+  shrq(product, 32);
+  movl(Address(z, kdx, Address::times_4,  0), product);
+  movq(carry, rdx);
+  jmp(L_first_loop);
+
+  bind(L_one_y);
+  movl(y_idx, Address(y,  0));
+  jmp(L_multiply);
+
+  bind(L_one_x);
+  movl(x_xstart, Address(x,  0));
+  jmp(L_first_loop);
+
+  bind(L_first_loop_exit);
+}
+
+/**
+ * Multiply 64 bit by 64 bit and add 128 bit.
+ */
+void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z,
+                                            Register yz_idx, Register idx,
+                                            Register carry, Register product, int offset) {
+  //     huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
+  //     z[kdx] = (jlong)product;
+
+  movq(yz_idx, Address(y, idx, Address::times_4,  offset));
+  rorq(yz_idx, 32); // convert big-endian to little-endian
+  movq(product, x_xstart);
+  mulq(yz_idx);     // product(rax) * yz_idx -> rdx:product(rax)
+  movq(yz_idx, Address(z, idx, Address::times_4,  offset));
+  rorq(yz_idx, 32); // convert big-endian to little-endian
+
+  add2_with_carry(rdx, product, carry, yz_idx);
+
+  movl(Address(z, idx, Address::times_4,  offset+4), product);
+  shrq(product, 32);
+  movl(Address(z, idx, Address::times_4,  offset), product);
+
+}
+
+/**
+ * Multiply 128 bit by 128 bit. Unrolled inner loop.
+ */
+void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
+                                             Register yz_idx, Register idx, Register jdx,
+                                             Register carry, Register product,
+                                             Register carry2) {
+  //   jlong carry, x[], y[], z[];
+  //   int kdx = ystart+1;
+  //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
+  //     huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
+  //     z[kdx+idx+1] = (jlong)product;
+  //     jlong carry2  = (jlong)(product >>> 64);
+  //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
+  //     z[kdx+idx] = (jlong)product;
+  //     carry  = (jlong)(product >>> 64);
+  //   }
+  //   idx += 2;
+  //   if (idx > 0) {
+  //     product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
+  //     z[kdx+idx] = (jlong)product;
+  //     carry  = (jlong)(product >>> 64);
+  //   }
+  //
+
+  Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
+
+  movl(jdx, idx);
+  andl(jdx, 0xFFFFFFFC);
+  shrl(jdx, 2);
+
+  bind(L_third_loop);
+  subl(jdx, 1);
+  jcc(Assembler::negative, L_third_loop_exit);
+  subl(idx, 4);
+
+  multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
+  movq(carry2, rdx);
+
+  multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
+  movq(carry, rdx);
+  jmp(L_third_loop);
+
+  bind (L_third_loop_exit);
+
+  andl (idx, 0x3);
+  jcc(Assembler::zero, L_post_third_loop_done);
+
+  Label L_check_1;
+  subl(idx, 2);
+  jcc(Assembler::negative, L_check_1);
+
+  multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
+  movq(carry, rdx);
+
+  bind (L_check_1);
+  addl (idx, 0x2);
+  andl (idx, 0x1);
+  subl(idx, 1);
+  jcc(Assembler::negative, L_post_third_loop_done);
+
+  movl(yz_idx, Address(y, idx, Address::times_4,  0));
+  movq(product, x_xstart);
+  mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
+  movl(yz_idx, Address(z, idx, Address::times_4,  0));
+
+  add2_with_carry(rdx, product, yz_idx, carry);
+
+  movl(Address(z, idx, Address::times_4,  0), product);
+  shrq(product, 32);
+
+  shlq(rdx, 32);
+  orq(product, rdx);
+  movq(carry, product);
+
+  bind(L_post_third_loop_done);
+}
+
+/**
+ * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop.
+ *
+ */
+void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
+                                                  Register carry, Register carry2,
+                                                  Register idx, Register jdx,
+                                                  Register yz_idx1, Register yz_idx2,
+                                                  Register tmp, Register tmp3, Register tmp4) {
+  assert(UseBMI2Instructions, "should be used only when BMI2 is available");
+
+  //   jlong carry, x[], y[], z[];
+  //   int kdx = ystart+1;
+  //   for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
+  //     huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry;
+  //     jlong carry2  = (jlong)(tmp3 >>> 64);
+  //     huge_128 tmp4 = (y[idx]   * rdx) + z[kdx+idx] + carry2;
+  //     carry  = (jlong)(tmp4 >>> 64);
+  //     z[kdx+idx+1] = (jlong)tmp3;
+  //     z[kdx+idx] = (jlong)tmp4;
+  //   }
+  //   idx += 2;
+  //   if (idx > 0) {
+  //     yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry;
+  //     z[kdx+idx] = (jlong)yz_idx1;
+  //     carry  = (jlong)(yz_idx1 >>> 64);
+  //   }
+  //
+
+  Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
+
+  movl(jdx, idx);
+  andl(jdx, 0xFFFFFFFC);
+  shrl(jdx, 2);
+
+  bind(L_third_loop);
+  subl(jdx, 1);
+  jcc(Assembler::negative, L_third_loop_exit);
+  subl(idx, 4);
+
+  movq(yz_idx1,  Address(y, idx, Address::times_4,  8));
+  rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
+  movq(yz_idx2, Address(y, idx, Address::times_4,  0));
+  rorxq(yz_idx2, yz_idx2, 32);
+
+  mulxq(tmp4, tmp3, yz_idx1);  //  yz_idx1 * rdx -> tmp4:tmp3
+  mulxq(carry2, tmp, yz_idx2); //  yz_idx2 * rdx -> carry2:tmp
+
+  movq(yz_idx1,  Address(z, idx, Address::times_4,  8));
+  rorxq(yz_idx1, yz_idx1, 32);
+  movq(yz_idx2, Address(z, idx, Address::times_4,  0));
+  rorxq(yz_idx2, yz_idx2, 32);
+
+  if (VM_Version::supports_adx()) {
+    adcxq(tmp3, carry);
+    adoxq(tmp3, yz_idx1);
+
+    adcxq(tmp4, tmp);
+    adoxq(tmp4, yz_idx2);
+
+    movl(carry, 0); // does not affect flags
+    adcxq(carry2, carry);
+    adoxq(carry2, carry);
+  } else {
+    add2_with_carry(tmp4, tmp3, carry, yz_idx1);
+    add2_with_carry(carry2, tmp4, tmp, yz_idx2);
+  }
+  movq(carry, carry2);
+
+  movl(Address(z, idx, Address::times_4, 12), tmp3);
+  shrq(tmp3, 32);
+  movl(Address(z, idx, Address::times_4,  8), tmp3);
+
+  movl(Address(z, idx, Address::times_4,  4), tmp4);
+  shrq(tmp4, 32);
+  movl(Address(z, idx, Address::times_4,  0), tmp4);
+
+  jmp(L_third_loop);
+
+  bind (L_third_loop_exit);
+
+  andl (idx, 0x3);
+  jcc(Assembler::zero, L_post_third_loop_done);
+
+  Label L_check_1;
+  subl(idx, 2);
+  jcc(Assembler::negative, L_check_1);
+
+  movq(yz_idx1, Address(y, idx, Address::times_4,  0));
+  rorxq(yz_idx1, yz_idx1, 32);
+  mulxq(tmp4, tmp3, yz_idx1); //  yz_idx1 * rdx -> tmp4:tmp3
+  movq(yz_idx2, Address(z, idx, Address::times_4,  0));
+  rorxq(yz_idx2, yz_idx2, 32);
+
+  add2_with_carry(tmp4, tmp3, carry, yz_idx2);
+
+  movl(Address(z, idx, Address::times_4,  4), tmp3);
+  shrq(tmp3, 32);
+  movl(Address(z, idx, Address::times_4,  0), tmp3);
+  movq(carry, tmp4);
+
+  bind (L_check_1);
+  addl (idx, 0x2);
+  andl (idx, 0x1);
+  subl(idx, 1);
+  jcc(Assembler::negative, L_post_third_loop_done);
+  movl(tmp4, Address(y, idx, Address::times_4,  0));
+  mulxq(carry2, tmp3, tmp4);  //  tmp4 * rdx -> carry2:tmp3
+  movl(tmp4, Address(z, idx, Address::times_4,  0));
+
+  add2_with_carry(carry2, tmp3, tmp4, carry);
+
+  movl(Address(z, idx, Address::times_4,  0), tmp3);
+  shrq(tmp3, 32);
+
+  shlq(carry2, 32);
+  orq(tmp3, carry2);
+  movq(carry, tmp3);
+
+  bind(L_post_third_loop_done);
+}
+
+/**
+ * Code for BigInteger::multiplyToLen() instrinsic.
+ *
+ * rdi: x
+ * rax: xlen
+ * rsi: y
+ * rcx: ylen
+ * r8:  z
+ * r11: zlen
+ * r12: tmp1
+ * r13: tmp2
+ * r14: tmp3
+ * r15: tmp4
+ * rbx: tmp5
+ *
+ */
+void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
+                                     Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
+  ShortBranchVerifier sbv(this);
+  assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
+
+  push(tmp1);
+  push(tmp2);
+  push(tmp3);
+  push(tmp4);
+  push(tmp5);
+
+  push(xlen);
+  push(zlen);
+
+  const Register idx = tmp1;
+  const Register kdx = tmp2;
+  const Register xstart = tmp3;
+
+  const Register y_idx = tmp4;
+  const Register carry = tmp5;
+  const Register product  = xlen;
+  const Register x_xstart = zlen;  // reuse register
+
+  // First Loop.
+  //
+  //  final static long LONG_MASK = 0xffffffffL;
+  //  int xstart = xlen - 1;
+  //  int ystart = ylen - 1;
+  //  long carry = 0;
+  //  for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
+  //    long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
+  //    z[kdx] = (int)product;
+  //    carry = product >>> 32;
+  //  }
+  //  z[xstart] = (int)carry;
+  //
+
+  movl(idx, ylen);      // idx = ylen;
+  movl(kdx, zlen);      // kdx = xlen+ylen;
+  xorq(carry, carry);   // carry = 0;
+
+  Label L_done;
+
+  movl(xstart, xlen);
+  decrementl(xstart);
+  jcc(Assembler::negative, L_done);
+
+  multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
+
+  Label L_second_loop;
+  testl(kdx, kdx);
+  jcc(Assembler::zero, L_second_loop);
+
+  Label L_carry;
+  subl(kdx, 1);
+  jcc(Assembler::zero, L_carry);
+
+  movl(Address(z, kdx, Address::times_4,  0), carry);
+  shrq(carry, 32);
+  subl(kdx, 1);
+
+  bind(L_carry);
+  movl(Address(z, kdx, Address::times_4,  0), carry);
+
+  // Second and third (nested) loops.
+  //
+  // for (int i = xstart-1; i >= 0; i--) { // Second loop
+  //   carry = 0;
+  //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
+  //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
+  //                    (z[k] & LONG_MASK) + carry;
+  //     z[k] = (int)product;
+  //     carry = product >>> 32;
+  //   }
+  //   z[i] = (int)carry;
+  // }
+  //
+  // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
+
+  const Register jdx = tmp1;
+
+  bind(L_second_loop);
+  xorl(carry, carry);    // carry = 0;
+  movl(jdx, ylen);       // j = ystart+1
+
+  subl(xstart, 1);       // i = xstart-1;
+  jcc(Assembler::negative, L_done);
+
+  push (z);
+
+  Label L_last_x;
+  lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j
+  subl(xstart, 1);       // i = xstart-1;
+  jcc(Assembler::negative, L_last_x);
+
+  if (UseBMI2Instructions) {
+    movq(rdx,  Address(x, xstart, Address::times_4,  0));
+    rorxq(rdx, rdx, 32); // convert big-endian to little-endian
+  } else {
+    movq(x_xstart, Address(x, xstart, Address::times_4,  0));
+    rorq(x_xstart, 32);  // convert big-endian to little-endian
+  }
+
+  Label L_third_loop_prologue;
+  bind(L_third_loop_prologue);
+
+  push (x);
+  push (xstart);
+  push (ylen);
+
+
+  if (UseBMI2Instructions) {
+    multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4);
+  } else { // !UseBMI2Instructions
+    multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
+  }
+
+  pop(ylen);
+  pop(xlen);
+  pop(x);
+  pop(z);
+
+  movl(tmp3, xlen);
+  addl(tmp3, 1);
+  movl(Address(z, tmp3, Address::times_4,  0), carry);
+  subl(tmp3, 1);
+  jccb(Assembler::negative, L_done);
+
+  shrq(carry, 32);
+  movl(Address(z, tmp3, Address::times_4,  0), carry);
+  jmp(L_second_loop);
+
+  // Next infrequent code is moved outside loops.
+  bind(L_last_x);
+  if (UseBMI2Instructions) {
+    movl(rdx, Address(x,  0));
+  } else {
+    movl(x_xstart, Address(x,  0));
+  }
+  jmp(L_third_loop_prologue);
+
+  bind(L_done);
+
+  pop(zlen);
+  pop(xlen);
+
+  pop(tmp5);
+  pop(tmp4);
+  pop(tmp3);
+  pop(tmp2);
+  pop(tmp1);
+}
+#endif
+
 /**
  * Emits code to update CRC-32 with a byte value according to constants in table
  *
@@ -7316,17 +7777,34 @@
  * Fold 128-bit data chunk
  */
 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
-  vpclmulhdq(xtmp, xK, xcrc); // [123:64]
-  vpclmulldq(xcrc, xK, xcrc); // [63:0]
-  vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
-  pxor(xcrc, xtmp);
+  if (UseAVX > 0) {
+    vpclmulhdq(xtmp, xK, xcrc); // [123:64]
+    vpclmulldq(xcrc, xK, xcrc); // [63:0]
+    vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
+    pxor(xcrc, xtmp);
+  } else {
+    movdqa(xtmp, xcrc);
+    pclmulhdq(xtmp, xK);   // [123:64]
+    pclmulldq(xcrc, xK);   // [63:0]
+    pxor(xcrc, xtmp);
+    movdqu(xtmp, Address(buf, offset));
+    pxor(xcrc, xtmp);
+  }
 }
 
 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
-  vpclmulhdq(xtmp, xK, xcrc);
-  vpclmulldq(xcrc, xK, xcrc);
-  pxor(xcrc, xbuf);
-  pxor(xcrc, xtmp);
+  if (UseAVX > 0) {
+    vpclmulhdq(xtmp, xK, xcrc);
+    vpclmulldq(xcrc, xK, xcrc);
+    pxor(xcrc, xbuf);
+    pxor(xcrc, xtmp);
+  } else {
+    movdqa(xtmp, xcrc);
+    pclmulhdq(xtmp, xK);
+    pclmulldq(xcrc, xK);
+    pxor(xcrc, xbuf);
+    pxor(xcrc, xtmp);
+  }
 }
 
 /**
@@ -7444,9 +7922,17 @@
   // Fold 128 bits in xmm1 down into 32 bits in crc register.
   BIND(L_fold_128b);
   movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()));
-  vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
-  vpand(xmm3, xmm0, xmm2, false /* vector256 */);
-  vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
+  if (UseAVX > 0) {
+    vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
+    vpand(xmm3, xmm0, xmm2, false /* vector256 */);
+    vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
+  } else {
+    movdqa(xmm2, xmm0);
+    pclmulqdq(xmm2, xmm1, 0x1);
+    movdqa(xmm3, xmm0);
+    pand(xmm3, xmm2);
+    pclmulqdq(xmm0, xmm3, 0x1);
+  }
   psrldq(xmm1, 8);
   psrldq(xmm2, 4);
   pxor(xmm0, xmm1);
--- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -966,6 +966,16 @@
   void mulss(XMMRegister dst, Address src)        { Assembler::mulss(dst, src); }
   void mulss(XMMRegister dst, AddressLiteral src);
 
+  // Carry-Less Multiplication Quadword
+  void pclmulldq(XMMRegister dst, XMMRegister src) {
+    // 0x00 - multiply lower 64 bits [0:63]
+    Assembler::pclmulqdq(dst, src, 0x00);
+  }
+  void pclmulhdq(XMMRegister dst, XMMRegister src) {
+    // 0x11 - multiply upper 64 bits [64:127]
+    Assembler::pclmulqdq(dst, src, 0x11);
+  }
+
   void sqrtsd(XMMRegister dst, XMMRegister src)    { Assembler::sqrtsd(dst, src); }
   void sqrtsd(XMMRegister dst, Address src)        { Assembler::sqrtsd(dst, src); }
   void sqrtsd(XMMRegister dst, AddressLiteral src);
@@ -1211,6 +1221,28 @@
                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
                         XMMRegister tmp4, Register tmp5, Register result);
 
+#ifdef _LP64
+  void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
+  void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
+                             Register y, Register y_idx, Register z,
+                             Register carry, Register product,
+                             Register idx, Register kdx);
+  void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
+                              Register yz_idx, Register idx,
+                              Register carry, Register product, int offset);
+  void multiply_128_x_128_bmi2_loop(Register y, Register z,
+                                    Register carry, Register carry2,
+                                    Register idx, Register jdx,
+                                    Register yz_idx1, Register yz_idx2,
+                                    Register tmp, Register tmp3, Register tmp4);
+  void multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
+                               Register yz_idx, Register idx, Register jdx,
+                               Register carry, Register product,
+                               Register carry2);
+  void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
+                       Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
+#endif
+
   // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic.
   void update_byte_crc32(Register crc, Register val, Register table);
   void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp);
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -3677,6 +3677,70 @@
     return start;
   }
 
+
+  /**
+   *  Arguments:
+   *
+   *  Input:
+   *    c_rarg0   - x address
+   *    c_rarg1   - x length
+   *    c_rarg2   - y address
+   *    c_rarg3   - y lenth
+   * not Win64
+   *    c_rarg4   - z address
+   *    c_rarg5   - z length
+   * Win64
+   *    rsp+40    - z address
+   *    rsp+48    - z length
+   */
+  address generate_multiplyToLen() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
+
+    address start = __ pc();
+    // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
+    // Unix:  rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
+    const Register x     = rdi;
+    const Register xlen  = rax;
+    const Register y     = rsi;
+    const Register ylen  = rcx;
+    const Register z     = r8;
+    const Register zlen  = r11;
+
+    // Next registers will be saved on stack in multiply_to_len().
+    const Register tmp1  = r12;
+    const Register tmp2  = r13;
+    const Register tmp3  = r14;
+    const Register tmp4  = r15;
+    const Register tmp5  = rbx;
+
+    BLOCK_COMMENT("Entry:");
+    __ enter(); // required for proper stackwalking of RuntimeStub frame
+
+#ifndef _WIN64
+    __ movptr(zlen, r9); // Save r9 in r11 - zlen
+#endif
+    setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx
+                       // ylen => rcx, z => r8, zlen => r11
+                       // r9 and r10 may be used to save non-volatile registers
+#ifdef _WIN64
+    // last 2 arguments (#4, #5) are on stack on Win64
+    __ movptr(z, Address(rsp, 6 * wordSize));
+    __ movptr(zlen, Address(rsp, 7 * wordSize));
+#endif
+
+    __ movptr(xlen, rsi);
+    __ movptr(y,    rdx);
+    __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5);
+
+    restore_arg_regs();
+
+    __ leave(); // required for proper stackwalking of RuntimeStub frame
+    __ ret(0);
+
+    return start;
+  }
+
 #undef __
 #define __ masm->
 
@@ -3917,6 +3981,11 @@
     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
                                                        &StubRoutines::_safefetchN_fault_pc,
                                                        &StubRoutines::_safefetchN_continuation_pc);
+#ifdef COMPILER2
+    if (UseMultiplyToLenIntrinsic) {
+      StubRoutines::_multiplyToLen = generate_multiplyToLen();
+    }
+#endif
   }
 
  public:
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -493,7 +493,7 @@
   }
 
   char buf[256];
-  jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+  jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
                cores_per_cpu(), threads_per_core(),
                cpu_family(), _model, _stepping,
                (supports_cmov() ? ", cmov" : ""),
@@ -522,7 +522,8 @@
                (supports_tscinv_bit() ? ", tscinvbit": ""),
                (supports_tscinv() ? ", tscinv": ""),
                (supports_bmi1() ? ", bmi1" : ""),
-               (supports_bmi2() ? ", bmi2" : ""));
+               (supports_bmi2() ? ", bmi2" : ""),
+               (supports_adx() ? ", adx" : ""));
   _features_str = strdup(buf);
 
   // UseSSE is set to the smaller of what hardware supports and what
@@ -568,13 +569,13 @@
     FLAG_SET_DEFAULT(UseCLMUL, false);
   }
 
-  if (UseCLMUL && (UseAVX > 0) && (UseSSE > 2)) {
+  if (UseCLMUL && (UseSSE > 2)) {
     if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
       UseCRC32Intrinsics = true;
     }
   } else if (UseCRC32Intrinsics) {
     if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
-      warning("CRC32 Intrinsics requires AVX and CLMUL instructions (not available on this CPU)");
+      warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)");
     FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
   }
 
@@ -590,6 +591,17 @@
     FLAG_SET_DEFAULT(UseAESIntrinsics, false);
   }
 
+  if (UseSHA) {
+    warning("SHA instructions are not available on this CPU");
+    FLAG_SET_DEFAULT(UseSHA, false);
+  }
+  if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
+    warning("SHA intrinsics are not available on this CPU");
+    FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
+    FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
+    FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
+  }
+
   // Adjust RTM (Restricted Transactional Memory) flags
   if (!supports_rtm() && UseRTMLocking) {
     // Can't continue because UseRTMLocking affects UseBiasedLocking flag
@@ -601,6 +613,17 @@
 
 #if INCLUDE_RTM_OPT
   if (UseRTMLocking) {
+    if (is_intel_family_core()) {
+      if ((_model == CPU_MODEL_HASWELL_E3) ||
+          (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) ||
+          (_model == CPU_MODEL_BROADWELL  && _stepping < 4)) {
+        if (!UnlockExperimentalVMOptions) {
+          vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
+        } else {
+          warning("UseRTMLocking is only available as experimental option on this platform.");
+        }
+      }
+    }
     if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
       // RTM locking should be used only for applications with
       // high lock contention. For now we do not use it by default.
@@ -675,7 +698,20 @@
     }
 #endif
   }
+
+#ifdef _LP64
+  if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
+    UseMultiplyToLenIntrinsic = true;
+  }
+#else
+  if (UseMultiplyToLenIntrinsic) {
+    if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
+      warning("multiplyToLen intrinsic is not available in 32-bit VM");
+    }
+    FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false);
+  }
 #endif
+#endif // COMPILER2
 
   // On new cpus instructions which update whole XMM register should be used
   // to prevent partial register stall due to dependencies on high half.
@@ -803,6 +839,24 @@
         }
       }
     }
+    if ((cpu_family() == 0x06) &&
+        ((extended_cpu_model() == 0x36) || // Centerton
+         (extended_cpu_model() == 0x37) || // Silvermont
+         (extended_cpu_model() == 0x4D))) {
+#ifdef COMPILER2
+      if (FLAG_IS_DEFAULT(OptoScheduling)) {
+        OptoScheduling = true;
+      }
+#endif
+      if (supports_sse4_2()) { // Silvermont
+        if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
+          UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
+        }
+      }
+    }
+    if(FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
+      AllocatePrefetchInstr = 3;
+    }
   }
 
   // Use count leading zeros count instruction if available.
@@ -815,23 +869,40 @@
     FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false);
   }
 
+  // Use count trailing zeros instruction if available
   if (supports_bmi1()) {
+    // tzcnt does not require VEX prefix
+    if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
+      if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) {
+        // Don't use tzcnt if BMI1 is switched off on command line.
+        UseCountTrailingZerosInstruction = false;
+      } else {
+        UseCountTrailingZerosInstruction = true;
+      }
+    }
+  } else if (UseCountTrailingZerosInstruction) {
+    warning("tzcnt instruction is not available on this CPU");
+    FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
+  }
+
+  // BMI instructions (except tzcnt) use an encoding with VEX prefix.
+  // VEX prefix is generated only when AVX > 0.
+  if (supports_bmi1() && supports_avx()) {
     if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {
       UseBMI1Instructions = true;
     }
   } else if (UseBMI1Instructions) {
-    warning("BMI1 instructions are not available on this CPU");
+    warning("BMI1 instructions are not available on this CPU (AVX is also required)");
     FLAG_SET_DEFAULT(UseBMI1Instructions, false);
   }
 
-  // Use count trailing zeros instruction if available
-  if (supports_bmi1()) {
-    if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
-      UseCountTrailingZerosInstruction = UseBMI1Instructions;
+  if (supports_bmi2() && supports_avx()) {
+    if (FLAG_IS_DEFAULT(UseBMI2Instructions)) {
+      UseBMI2Instructions = true;
     }
-  } else if (UseCountTrailingZerosInstruction) {
-    warning("tzcnt instruction is not available on this CPU");
-    FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
+  } else if (UseBMI2Instructions) {
+    warning("BMI2 instructions are not available on this CPU (AVX is also required)");
+    FLAG_SET_DEFAULT(UseBMI2Instructions, false);
   }
 
   // Use population count instruction if available.
@@ -890,23 +961,25 @@
   AllocatePrefetchDistance = allocate_prefetch_distance();
   AllocatePrefetchStyle    = allocate_prefetch_style();
 
-  if( is_intel() && cpu_family() == 6 && supports_sse3() ) {
-    if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core
+  if (is_intel() && cpu_family() == 6 && supports_sse3()) {
+    if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core
 #ifdef _LP64
       AllocatePrefetchDistance = 384;
 #else
       AllocatePrefetchDistance = 320;
 #endif
     }
-    if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus
+    if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus
       AllocatePrefetchDistance = 192;
       AllocatePrefetchLines = 4;
+    }
 #ifdef COMPILER2
-      if (AggressiveOpts && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
+    if (supports_sse4_2()) {
+      if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
         FLAG_SET_DEFAULT(UseFPUForSpilling, true);
       }
+    }
 #endif
-    }
   }
   assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");
 
--- a/src/cpu/x86/vm/vm_version_x86.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/x86/vm/vm_version_x86.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -210,7 +210,9 @@
                    erms : 1,
                         : 1,
                    rtm  : 1,
-                        : 20;
+                        : 7,
+                   adx  : 1,
+                        : 12;
     } bits;
   };
 
@@ -261,7 +263,8 @@
     CPU_CLMUL  = (1 << 21), // carryless multiply for CRC
     CPU_BMI1   = (1 << 22),
     CPU_BMI2   = (1 << 23),
-    CPU_RTM    = (1 << 24)  // Restricted Transactional Memory instructions
+    CPU_RTM    = (1 << 24),  // Restricted Transactional Memory instructions
+    CPU_ADX    = (1 << 25)
   } cpuFeatureFlags;
 
   enum {
@@ -277,7 +280,10 @@
     CPU_MODEL_WESTMERE_EX    = 0x2f,
     CPU_MODEL_SANDYBRIDGE    = 0x2a,
     CPU_MODEL_SANDYBRIDGE_EP = 0x2d,
-    CPU_MODEL_IVYBRIDGE_EP   = 0x3a
+    CPU_MODEL_IVYBRIDGE_EP   = 0x3a,
+    CPU_MODEL_HASWELL_E3     = 0x3c,
+    CPU_MODEL_HASWELL_E7     = 0x3f,
+    CPU_MODEL_BROADWELL      = 0x3d
   } cpuExtendedFamily;
 
   // cpuid information block.  All info derived from executing cpuid with
@@ -463,10 +469,16 @@
     }
     // Intel features.
     if(is_intel()) {
+      if(_cpuid_info.sef_cpuid7_ebx.bits.adx != 0)
+         result |= CPU_ADX;
       if(_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
         result |= CPU_BMI2;
       if(_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0)
         result |= CPU_LZCNT;
+      // for Intel, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw
+      if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) {
+        result |= CPU_3DNOW_PREFETCH;
+      }
     }
 
     return result;
@@ -619,6 +631,7 @@
   static bool supports_rtm()      { return (_cpuFeatures & CPU_RTM) != 0; }
   static bool supports_bmi1()     { return (_cpuFeatures & CPU_BMI1) != 0; }
   static bool supports_bmi2()     { return (_cpuFeatures & CPU_BMI2) != 0; }
+  static bool supports_adx()     { return (_cpuFeatures & CPU_ADX) != 0; }
   // Intel features
   static bool is_intel_family_core() { return is_intel() &&
                                        extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }
--- a/src/cpu/zero/vm/compiledIC_zero.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/zero/vm/compiledIC_zero.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -58,34 +58,6 @@
   return is_icholder_entry(call->destination());
 }
 
-//-----------------------------------------------------------------------------
-// High-level access to an inline cache. Guaranteed to be MT-safe.
-
-CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
-  : _ic_call(call)
-{
-  address ic_call = call->instruction_address();
-
-  assert(ic_call != NULL, "ic_call address must be set");
-  assert(nm != NULL, "must pass nmethod");
-  assert(nm->contains(ic_call), "must be in nmethod");
-
-  // Search for the ic_call at the given address.
-  RelocIterator iter(nm, ic_call, ic_call+1);
-  bool ret = iter.next();
-  assert(ret == true, "relocInfo must exist at this address");
-  assert(iter.addr() == ic_call, "must find ic_call");
-  if (iter.type() == relocInfo::virtual_call_type) {
-    virtual_call_Relocation* r = iter.virtual_call_reloc();
-    _is_optimized = false;
-    _value = nativeMovConstReg_at(r->cached_value());
-  } else {
-    assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
-    _is_optimized = true;
-    _value = NULL;
-  }
-}
-
 // ----------------------------------------------------------------------------
 
 void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -40,6 +40,7 @@
 #include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/synchronizer.hpp"
--- a/src/os/aix/vm/os_aix.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/aix/vm/os_aix.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -55,6 +55,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -113,12 +114,6 @@
 }
 #endif
 
-// Excerpts from systemcfg.h definitions newer than AIX 5.3
-#ifndef PV_7
-# define PV_7 0x200000          // Power PC 7
-# define PV_7_Compat 0x208000   // Power PC 7
-#endif
-
 #define MAX_PATH (2 * K)
 
 // for timer info max values which include all bits
@@ -129,17 +124,40 @@
 #define ERROR_MP_VMGETINFO_FAILED                    102
 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 
-// the semantics in this file are thus that codeptr_t is a *real code ptr*
+// The semantics in this file are thus that codeptr_t is a *real code ptr*.
 // This means that any function taking codeptr_t as arguments will assume
 // a real codeptr and won't handle function descriptors (eg getFuncName),
 // whereas functions taking address as args will deal with function
-// descriptors (eg os::dll_address_to_library_name)
+// descriptors (eg os::dll_address_to_library_name).
 typedef unsigned int* codeptr_t;
 
-// typedefs for stackslots, stack pointers, pointers to op codes
+// Typedefs for stackslots, stack pointers, pointers to op codes.
 typedef unsigned long stackslot_t;
 typedef stackslot_t* stackptr_t;
 
+// Excerpts from systemcfg.h definitions newer than AIX 5.3.
+#ifndef PV_7
+#define PV_7 0x200000          /* Power PC 7 */
+#define PV_7_Compat 0x208000   /* Power PC 7 */
+#endif
+#ifndef PV_8
+#define PV_8 0x300000          /* Power PC 8 */
+#define PV_8_Compat 0x308000   /* Power PC 8 */
+#endif
+
+#define trcVerbose(fmt, ...) { /* PPC port */  \
+  if (Verbose) { \
+    fprintf(stderr, fmt, ##__VA_ARGS__); \
+    fputc('\n', stderr); fflush(stderr); \
+  } \
+}
+#define trc(fmt, ...)        /* PPC port */
+
+#define ERRBYE(s) { \
+    trcVerbose(s); \
+    return -1; \
+}
+
 // query dimensions of the stack of the calling thread
 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 
@@ -171,12 +189,12 @@
   return true;
 }
 
-// macro to check a given stack pointer against given stack limits and to die if test fails
+// Macro to check a given stack pointer against given stack limits and to die if test fails.
 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 }
 
-// macro to check the current stack pointer against given stacklimits
+// Macro to check the current stack pointer against given stacklimits.
 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
   address sp; \
   sp = os::current_stack_pointer(); \
@@ -210,7 +228,7 @@
 static pid_t    _initial_pid       = 0;
 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 static sigset_t SR_sigset;
-static pthread_mutex_t dl_mutex;           // Used to protect dlsym() calls */
+static pthread_mutex_t dl_mutex;              // Used to protect dlsym() calls.
 
 julong os::available_memory() {
   return Aix::available_memory();
@@ -242,7 +260,6 @@
   return false;
 }
 
-
 // Return true if user is running as root.
 
 bool os::have_special_privileges() {
@@ -273,8 +290,7 @@
 
   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
-      //if (Verbose)
-      fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
+      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
       return false;
     }
     p += maxDisclaimSize;
@@ -282,8 +298,7 @@
 
   if (lastDisclaimSize > 0) {
     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
-      //if (Verbose)
-        fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
+      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
       return false;
     }
   }
@@ -323,11 +338,11 @@
 
 void os::Aix::initialize_system_info() {
 
-  // get the number of online(logical) cpus instead of configured
+  // Get the number of online(logical) cpus instead of configured.
   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
   assert(_processor_count > 0, "_processor_count must be > 0");
 
-  // retrieve total physical storage
+  // Retrieve total physical storage.
   os::Aix::meminfo_t mi;
   if (!os::Aix::get_meminfo(&mi)) {
     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
@@ -502,7 +517,6 @@
 
 } // end os::Aix::query_multipage_support()
 
-// The code for this method was initially derived from the version in os_linux.cpp.
 void os::init_system_properties_values() {
 
 #define DEFAULT_LIBPATH "/usr/lib:/lib"
@@ -599,10 +613,11 @@
   sigaction(sig, (struct sigaction*)NULL, &oact);
   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
-  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
+  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
     return true;
-  else
+  } else {
     return false;
+  }
 }
 
 void os::Aix::signal_sets_init() {
@@ -776,6 +791,9 @@
 
   // get the processor version from _system_configuration
   switch (_system_configuration.version) {
+  case PV_8:
+    strcpy(pci->version, "Power PC 8");
+    break;
   case PV_7:
     strcpy(pci->version, "Power PC 7");
     break;
@@ -803,6 +821,9 @@
   case PV_7_Compat:
     strcpy(pci->version, "PV_7_Compat");
     break;
+  case PV_8_Compat:
+    strcpy(pci->version, "PV_8_Compat");
+    break;
   default:
     strcpy(pci->version, "unknown");
   }
@@ -938,7 +959,9 @@
 
   pthread_attr_destroy(&attr);
 
-  if (ret != 0) {
+  if (ret == 0) {
+    // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
+  } else {
     if (PrintMiscellaneous && (Verbose || WizardMode)) {
       perror("pthread_create()");
     }
@@ -1095,8 +1118,7 @@
   if (os::Aix::on_pase()) {
     Unimplemented();
     return 0;
-  }
-  else {
+  } else {
     // On AIX use the precision of processors real time clock
     // or time base registers.
     timebasestruct_t time;
@@ -1149,7 +1171,6 @@
   }
 }
 
-
 char * os::local_time_string(char *buf, size_t buflen) {
   struct tm t;
   time_t long_time;
@@ -1187,7 +1208,6 @@
   if (abort_hook != NULL) {
     abort_hook();
   }
-
 }
 
 // Note: os::abort() might be called very early during initialization, or
@@ -1219,8 +1239,7 @@
 // from src/solaris/hpi/src/system_md.c
 
 size_t os::lasterror(char *buf, size_t len) {
-
-  if (errno == 0)  return 0;
+  if (errno == 0) return 0;
 
   const char *s = ::strerror(errno);
   size_t n = ::strlen(s);
@@ -1233,6 +1252,7 @@
 }
 
 intx os::current_thread_id() { return (intx)pthread_self(); }
+
 int os::current_process_id() {
 
   // This implementation returns a unique pid, the pid of the
@@ -1369,9 +1389,9 @@
   if (offset) {
     *offset = -1;
   }
-  if (buf) {
-    buf[0] = '\0';
-  }
+  // Buf is not optional, but offset is optional.
+  assert(buf != NULL, "sanity check");
+  buf[0] = '\0';
 
   // Resolve function ptr literals first.
   addr = resolve_function_descriptor_to_code_pointer(addr);
@@ -1404,12 +1424,9 @@
     return 0;
   }
 
-  if (Verbose) {
-    fprintf(stderr, "pc outside any module");
-  }
+  trcVerbose("pc outside any module");
 
   return -1;
-
 }
 
 bool os::dll_address_to_library_name(address addr, char* buf,
@@ -1417,9 +1434,9 @@
   if (offset) {
     *offset = -1;
   }
-  if (buf) {
-      buf[0] = '\0';
-  }
+  // Buf is not optional, but offset is optional.
+  assert(buf != NULL, "sanity check");
+  buf[0] = '\0';
 
   // Resolve function ptr literals first.
   addr = resolve_function_descriptor_to_code_pointer(addr);
@@ -1434,7 +1451,7 @@
 }
 
 // Loads .dll/.so and in case of error it checks if .dll/.so was built
-// for the same architecture as Hotspot is running on
+// for the same architecture as Hotspot is running on.
 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
 
   if (ebuf && ebuflen > 0) {
@@ -1597,7 +1614,6 @@
   st->cr();
 }
 
-
 static void print_signal_handler(outputStream* st, int sig,
                                  char* buf, size_t buflen);
 
@@ -1621,7 +1637,7 @@
 
 static char saved_jvm_path[MAXPATHLEN] = {0};
 
-// Find the full path to the current module, libjvm.so or libjvm_g.so
+// Find the full path to the current module, libjvm.so.
 void os::jvm_path(char *buf, jint buflen) {
   // Error checking.
   if (buflen < MAXPATHLEN) {
@@ -1691,7 +1707,7 @@
   // Do not block out synchronous signals in the signal handler.
   // Blocking synchronous signals only makes sense if you can really
   // be sure that those signals won't happen during signal handling,
-  // when the blocking applies.  Normal signal handlers are lean and
+  // when the blocking applies. Normal signal handlers are lean and
   // do not cause signals. But our signal handlers tend to be "risky"
   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
   // On AIX, PASE there was a case where a SIGSEGV happened, followed
@@ -2966,13 +2982,9 @@
   param.sched_priority = newpri;
   int ret = pthread_setschedparam(thr, policy, &param);
 
-  if (Verbose) {
-    if (ret == 0) {
-      fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
-    } else {
-      fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
-              (int)thr, newpri, ret, strerror(ret));
-    }
+  if (ret != 0) {
+    trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
+        (int)thr, newpri, ret, strerror(ret));
   }
   return (ret == 0) ? OS_OK : OS_ERR;
 }
@@ -3093,7 +3105,6 @@
   errno = old_errno;
 }
 
-
 static int SR_initialize() {
   struct sigaction act;
   char *s;
@@ -3336,7 +3347,6 @@
   JVM_handle_aix_signal(sig, info, uc, true);
 }
 
-
 // This boolean allows users to forward their own non-matching signals
 // to JVM_handle_aix_signal, harmlessly.
 bool os::Aix::signal_handlers_are_installed = false;
@@ -3530,7 +3540,7 @@
     set_signal_handler(SIGDANGER, true);
 
     if (libjsig_is_loaded) {
-      // Tell libjsig jvm finishes setting signal handlers
+      // Tell libjsig jvm finishes setting signal handlers.
       (*end_signal_setting)();
     }
 
@@ -3546,7 +3556,7 @@
         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
         check_signals = false;
       }
-      // need to initialize check_signal_done
+      // Need to initialize check_signal_done.
       ::sigemptyset(&check_signal_done);
     }
   }
@@ -3620,7 +3630,6 @@
   st->cr();
 }
 
-
 #define DO_SIGNAL_CHECK(sig) \
   if (!sigismember(&check_signal_done, sig)) \
     os::Aix::check_signal_handler(sig)
@@ -3681,7 +3690,6 @@
     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
     : CAST_FROM_FN_PTR(address, act.sa_handler);
 
-
   switch(sig) {
   case SIGSEGV:
   case SIGBUS:
@@ -3829,15 +3837,13 @@
   pthread_mutex_init(&dl_mutex, NULL);
 }
 
-// this is called _after_ the global arguments have been parsed
+// This is called _after_ the global arguments have been parsed.
 jint os::init_2(void) {
 
-  if (Verbose) {
-    fprintf(stderr, "processor count: %d\n", os::_processor_count);
-    fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
-  }
-
-  // initially build up the loaded dll map
+  trcVerbose("processor count: %d", os::_processor_count);
+  trcVerbose("physical memory: %lu", Aix::_physical_memory);
+
+  // Initially build up the loaded dll map.
   LoadedLibraries::reload();
 
   const int page_size = Aix::page_size();
@@ -3887,7 +3893,7 @@
       }
 
       if (map_address != (address) MAP_FAILED) {
-        // map succeeded, but polling_page is not at wished address, unmap and continue.
+        // Map succeeded, but polling_page is not at wished address, unmap and continue.
         ::munmap(map_address, map_size);
         map_address = (address) MAP_FAILED;
       }
@@ -3941,7 +3947,7 @@
 
   // Make the stack size a multiple of the page size so that
   // the yellow/red zones can be guarded.
-  // note that this can be 0, if no default stacksize was set
+  // Note that this can be 0, if no default stacksize was set.
   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
 
   Aix::libpthread_init();
@@ -4254,7 +4260,6 @@
   return fd;
 }
 
-
 // create binary file, rewriting existing file if required
 int os::create_binary_file(const char* path, bool rewrite_existing) {
   int oflags = O_WRONLY | O_CREAT;
@@ -4323,7 +4328,6 @@
   return NULL;
 }
 
-
 // Remap a block of memory.
 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
                           char *addr, size_t bytes, bool read_only,
@@ -4371,14 +4375,14 @@
   jlong sys_time = 0;
   jlong user_time = 0;
 
-  // reimplemented using getthrds64().
+  // Reimplemented using getthrds64().
   //
-  // goes like this:
+  // Works like this:
   // For the thread in question, get the kernel thread id. Then get the
   // kernel thread statistics using that id.
   //
   // This only works of course when no pthread scheduling is used,
-  // ie there is a 1:1 relationship to kernel threads.
+  // i.e. there is a 1:1 relationship to kernel threads.
   // On AIX, see AIXTHREAD_SCOPE variable.
 
   pthread_t pthtid = thread->osthread()->pthread_id();
@@ -4525,14 +4529,12 @@
   memset(&uts, 0, sizeof(uts));
   strcpy(uts.sysname, "?");
   if (::uname(&uts) == -1) {
-    fprintf(stderr, "uname failed (%d)\n", errno);
+    trc("uname failed (%d)", errno);
     guarantee(0, "Could not determine whether we run on AIX or PASE");
   } else {
-    if (Verbose) {
-      fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
-              "node \"%s\" machine \"%s\"\n",
-              uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
-    }
+    trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
+               "node \"%s\" machine \"%s\"\n",
+               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
     const int major = atoi(uts.version);
     assert(major > 0, "invalid OS version");
     const int minor = atoi(uts.release);
@@ -4544,12 +4546,10 @@
       // We run on AIX. We do not support versions older than AIX 5.3.
       _on_pase = 0;
       if (_os_version < 0x0503) {
-        fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
+        trc("AIX release older than AIX 5.3 not supported.");
         assert(false, "AIX release too old.");
       } else {
-        if (Verbose) {
-          fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
-        }
+        trcVerbose("We run on AIX %d.%d\n", major, minor);
       }
     } else {
       assert(false, "unknown OS");
@@ -4557,7 +4557,6 @@
   }
 
   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
-
 } // end: os::Aix::initialize_os_info()
 
 // Scan environment for important settings which might effect the VM.
@@ -4595,12 +4594,10 @@
   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
   // exec() ? before loading the libjvm ? ....)
   p = ::getenv("XPG_SUS_ENV");
-  if (Verbose) {
-    fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
-  }
+  trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
   if (p && strcmp(p, "ON") == 0) {
     _xpg_sus_mode = 1;
-    fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
+    trc("Unsupported setting: XPG_SUS_ENV=ON");
     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
     // clobber address ranges. If we ever want to support that, we have to do some
     // testing first.
@@ -4612,10 +4609,7 @@
   // Switch off AIX internal (pthread) guard pages. This has
   // immediate effect for any pthread_create calls which follow.
   p = ::getenv("AIXTHREAD_GUARDPAGES");
-  if (Verbose) {
-    fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
-    fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
-  }
+  trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
   guarantee(rc == 0, "");
 
@@ -4633,7 +4627,7 @@
   assert(os::Aix::on_aix(), "AIX only");
 
   if (!libperfstat::init()) {
-    fprintf(stderr, "libperfstat initialization failed.\n");
+    trc("libperfstat initialization failed.");
     assert(false, "libperfstat initialization failed");
   } else {
     if (Verbose) {
@@ -4805,7 +4799,6 @@
   return abstime;
 }
 
-
 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
 // Conceptually TryPark() should be equivalent to park(0).
 
@@ -4888,7 +4881,7 @@
   while (_Event < 0) {
     status = pthread_cond_timedwait(_cond, _mutex, &abst);
     assert_status(status == 0 || status == ETIMEDOUT,
-          status, "cond_timedwait");
+                  status, "cond_timedwait");
     if (!FilterSpuriousWakeups) break;         // previous semantics
     if (status == ETIMEDOUT) break;
     // We consume and ignore EINTR and spurious wakeups.
@@ -5022,9 +5015,9 @@
   // Optional fast-path check:
   // Return immediately if a permit is available.
   if (_counter > 0) {
-      _counter = 0;
-      OrderAccess::fence();
-      return;
+    _counter = 0;
+    OrderAccess::fence();
+    return;
   }
 
   Thread* thread = Thread::current();
@@ -5046,7 +5039,6 @@
     unpackTime(&absTime, isAbsolute, time);
   }
 
-
   // Enter safepoint region
   // Beware of deadlocks such as 6317397.
   // The per-thread Parker:: mutex is a classic leaf-lock.
@@ -5134,7 +5126,6 @@
   }
 }
 
-
 extern char** environ;
 
 // Run the specified command in a separate process. Return its exit value,
@@ -5153,44 +5144,43 @@
   } else if (pid == 0) {
     // child process
 
-    // try to be consistent with system(), which uses "/usr/bin/sh" on AIX
+    // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
     execve("/usr/bin/sh", argv, environ);
 
     // execve failed
     _exit(-1);
 
-  } else  {
+  } else {
     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
     // care about the actual exit code, for now.
 
     int status;
 
-    // Wait for the child process to exit.  This returns immediately if
+    // Wait for the child process to exit. This returns immediately if
     // the child has already exited. */
     while (waitpid(pid, &status, 0) < 0) {
-        switch (errno) {
+      switch (errno) {
         case ECHILD: return 0;
         case EINTR: break;
         default: return -1;
-        }
+      }
     }
 
     if (WIFEXITED(status)) {
-       // The child exited normally; get its exit code.
-       return WEXITSTATUS(status);
+      // The child exited normally; get its exit code.
+      return WEXITSTATUS(status);
     } else if (WIFSIGNALED(status)) {
-       // The child exited because of a signal
-       // The best value to return is 0x80 + signal number,
-       // because that is what all Unix shells do, and because
-       // it allows callers to distinguish between process exit and
-       // process death by signal.
-       return 0x80 + WTERMSIG(status);
+      // The child exited because of a signal.
+      // The best value to return is 0x80 + signal number,
+      // because that is what all Unix shells do, and because
+      // it allows callers to distinguish between process exit and
+      // process death by signal.
+      return 0x80 + WTERMSIG(status);
     } else {
-       // Unknown exit code; pass it through
-       return status;
+      // Unknown exit code; pass it through.
+      return status;
     }
   }
-  // Remove warning.
   return -1;
 }
 
@@ -5205,7 +5195,7 @@
   struct stat statbuf;
   char buf[MAXPATHLEN];
   char libmawtpath[MAXPATHLEN];
-  const char *xawtstr  = "/xawt/libmawt.so";
+  const char *xawtstr = "/xawt/libmawt.so";
   const char *new_xawtstr = "/libawt_xawt.so";
 
   char *p;
--- a/src/os/aix/vm/os_aix.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/aix/vm/os_aix.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -209,7 +209,7 @@
     return _can_use_16M_pages == 1 ? true : false;
   }
 
-  static address   ucontext_get_pc(ucontext_t* uc);
+  static address   ucontext_get_pc(const ucontext_t* uc);
   static intptr_t* ucontext_get_sp(ucontext_t* uc);
   static intptr_t* ucontext_get_fp(ucontext_t* uc);
   // Set PC into context. Needed for continuation after signal.
--- a/src/os/aix/vm/os_aix.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/aix/vm/os_aix.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,12 +26,9 @@
 #ifndef OS_AIX_VM_OS_AIX_INLINE_HPP
 #define OS_AIX_VM_OS_AIX_INLINE_HPP
 
-#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
-#ifdef TARGET_OS_ARCH_aix_ppc
-# include "atomic_aix_ppc.inline.hpp"
-# include "orderAccess_aix_ppc.inline.hpp"
-#endif
 
 // System includes
 
--- a/src/os/aix/vm/perfMemory_aix.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/aix/vm/perfMemory_aix.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -31,6 +31,7 @@
 #include "os_aix.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/perfMemory.hpp"
+#include "services/memTracker.hpp"
 #include "utilities/exceptions.hpp"
 
 // put OS-includes here
@@ -196,12 +197,37 @@
   return pid;
 }
 
+// Check if the given statbuf is considered a secure directory for
+// the backing store files. Returns true if the directory is considered
+// a secure location. Returns false if the statbuf is a symbolic link or
+// if an error occurred.
+static bool is_statbuf_secure(struct stat *statp) {
+  if (S_ISLNK(statp->st_mode) || !S_ISDIR(statp->st_mode)) {
+    // The path represents a link or some non-directory file type,
+    // which is not what we expected. Declare it insecure.
+    //
+    return false;
+  }
+  // We have an existing directory, check if the permissions are safe.
+  if ((statp->st_mode & (S_IWGRP|S_IWOTH)) != 0) {
+    // The directory is open for writing and could be subjected
+    // to a symlink or a hard link attack. Declare it insecure.
+    return false;
+  }
+  // See if the uid of the directory matches the effective uid of the process.
+  //
+  if (statp->st_uid != geteuid()) {
+    // The directory was not created by this user, declare it insecure.
+    return false;
+  }
+  return true;
+}
 
-// check if the given path is considered a secure directory for
+
+// Check if the given path is considered a secure directory for
 // the backing store files. Returns true if the directory exists
 // and is considered a secure location. Returns false if the path
 // is a symbolic link or if an error occurred.
-//
 static bool is_directory_secure(const char* path) {
   struct stat statbuf;
   int result = 0;
@@ -211,38 +237,276 @@
     return false;
   }
 
-  // the path exists, now check it's mode
-  if (S_ISLNK(statbuf.st_mode) || !S_ISDIR(statbuf.st_mode)) {
-    // the path represents a link or some non-directory file type,
-    // which is not what we expected. declare it insecure.
-    //
+  // The path exists, see if it is secure.
+  return is_statbuf_secure(&statbuf);
+}
+
+// (Taken over from Solaris to support the O_NOFOLLOW case on AIX.)
+// Check if the given directory file descriptor is considered a secure
+// directory for the backing store files. Returns true if the directory
+// exists and is considered a secure location. Returns false if the path
+// is a symbolic link or if an error occurred.
+static bool is_dirfd_secure(int dir_fd) {
+  struct stat statbuf;
+  int result = 0;
+
+  RESTARTABLE(::fstat(dir_fd, &statbuf), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+
+  // The path exists, now check its mode.
+  return is_statbuf_secure(&statbuf);
+}
+
+
+// Check to make sure fd1 and fd2 are referencing the same file system object.
+static bool is_same_fsobject(int fd1, int fd2) {
+  struct stat statbuf1;
+  struct stat statbuf2;
+  int result = 0;
+
+  RESTARTABLE(::fstat(fd1, &statbuf1), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+  RESTARTABLE(::fstat(fd2, &statbuf2), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+
+  if ((statbuf1.st_ino == statbuf2.st_ino) &&
+      (statbuf1.st_dev == statbuf2.st_dev)) {
+    return true;
+  } else {
     return false;
   }
-  else {
-    // we have an existing directory, check if the permissions are safe.
-    //
-    if ((statbuf.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
-      // the directory is open for writing and could be subjected
-      // to a symlnk attack. declare it insecure.
-      //
-      return false;
+}
+
+// Helper functions for open without O_NOFOLLOW which is not present on AIX 5.3/6.1.
+// We use the jdk6 implementation here.
+#ifndef O_NOFOLLOW
+// The O_NOFOLLOW oflag doesn't exist before solaris 5.10, this is to simulate that behaviour
+// was done in jdk 5/6 hotspot by Oracle this way
+static int open_o_nofollow_impl(const char* path, int oflag, mode_t mode, bool use_mode) {
+  struct stat orig_st;
+  struct stat new_st;
+  bool create;
+  int error;
+  int fd;
+
+  create = false;
+
+  if (lstat(path, &orig_st) != 0) {
+    if (errno == ENOENT && (oflag & O_CREAT) != 0) {
+      // File doesn't exist, but_we want to create it, add O_EXCL flag
+      // to make sure no-one creates it (or a symlink) before us
+      // This works as we expect with symlinks, from posix man page:
+      // 'If O_EXCL  and  O_CREAT  are set, and path names a symbolic
+      // link, open() shall fail and set errno to [EEXIST]'.
+      oflag |= O_EXCL;
+      create = true;
+    } else {
+      // File doesn't exist, and we are not creating it.
+      return OS_ERR;
+    }
+  } else {
+    // Lstat success, check if existing file is a link.
+    if ((orig_st.st_mode & S_IFMT) == S_IFLNK)  {
+      // File is a symlink.
+      errno = ELOOP;
+      return OS_ERR;
+    }
+  }
+
+  if (use_mode == true) {
+    fd = open(path, oflag, mode);
+  } else {
+    fd = open(path, oflag);
+  }
+
+  if (fd == OS_ERR) {
+    return fd;
+  }
+
+  // Can't do inode checks on before/after if we created the file.
+  if (create == false) {
+    if (fstat(fd, &new_st) != 0) {
+      // Keep errno from fstat, in case close also fails.
+      error = errno;
+      ::close(fd);
+      errno = error;
+      return OS_ERR;
+    }
+
+    if (orig_st.st_dev != new_st.st_dev || orig_st.st_ino != new_st.st_ino) {
+      // File was tampered with during race window.
+      ::close(fd);
+      errno = EEXIST;
+      if (PrintMiscellaneous && Verbose) {
+        warning("possible file tampering attempt detected when opening %s", path);
+      }
+      return OS_ERR;
     }
   }
+
+  return fd;
+}
+
+static int open_o_nofollow(const char* path, int oflag, mode_t mode) {
+  return open_o_nofollow_impl(path, oflag, mode, true);
+}
+
+static int open_o_nofollow(const char* path, int oflag) {
+  return open_o_nofollow_impl(path, oflag, 0, false);
+}
+#endif
+
+// Open the directory of the given path and validate it.
+// Return a DIR * of the open directory.
+static DIR *open_directory_secure(const char* dirname) {
+  // Open the directory using open() so that it can be verified
+  // to be secure by calling is_dirfd_secure(), opendir() and then check
+  // to see if they are the same file system object.  This method does not
+  // introduce a window of opportunity for the directory to be attacked that
+  // calling opendir() and is_directory_secure() does.
+  int result;
+  DIR *dirp = NULL;
+
+  // No O_NOFOLLOW defined at buildtime, and it is not documented for open;
+  // so provide a workaround in this case.
+#ifdef O_NOFOLLOW
+  RESTARTABLE(::open(dirname, O_RDONLY|O_NOFOLLOW), result);
+#else
+  // workaround (jdk6 coding)
+  RESTARTABLE(::open_o_nofollow(dirname, O_RDONLY), result);
+#endif
+
+  if (result == OS_ERR) {
+    // Directory doesn't exist or is a symlink, so there is nothing to cleanup.
+    if (PrintMiscellaneous && Verbose) {
+      if (errno == ELOOP) {
+        warning("directory %s is a symlink and is not secure\n", dirname);
+      } else {
+        warning("could not open directory %s: %s\n", dirname, strerror(errno));
+      }
+    }
+    return dirp;
+  }
+  int fd = result;
+
+  // Determine if the open directory is secure.
+  if (!is_dirfd_secure(fd)) {
+    // The directory is not a secure directory.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Open the directory.
+  dirp = ::opendir(dirname);
+  if (dirp == NULL) {
+    // The directory doesn't exist, close fd and return.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Check to make sure fd and dirp are referencing the same file system object.
+  if (!is_same_fsobject(fd, dirp->dd_fd)) {
+    // The directory is not secure.
+    os::close(fd);
+    os::closedir(dirp);
+    dirp = NULL;
+    return dirp;
+  }
+
+  // Close initial open now that we know directory is secure
+  os::close(fd);
+
+  return dirp;
+}
+
+// NOTE: The code below uses fchdir(), open() and unlink() because
+// fdopendir(), openat() and unlinkat() are not supported on all
+// versions.  Once the support for fdopendir(), openat() and unlinkat()
+// is available on all supported versions the code can be changed
+// to use these functions.
+
+// Open the directory of the given path, validate it and set the
+// current working directory to it.
+// Return a DIR * of the open directory and the saved cwd fd.
+//
+static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
+
+  // Open the directory.
+  DIR* dirp = open_directory_secure(dirname);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so there is nothing to cleanup.
+    return dirp;
+  }
+  int fd = dirp->dd_fd;
+
+  // Open a fd to the cwd and save it off.
+  int result;
+  RESTARTABLE(::open(".", O_RDONLY), result);
+  if (result == OS_ERR) {
+    *saved_cwd_fd = -1;
+  } else {
+    *saved_cwd_fd = result;
+  }
+
+  // Set the current directory to dirname by using the fd of the directory.
+  result = fchdir(fd);
+
+  return dirp;
+}
+
+// Close the directory and restore the current working directory.
+static void close_directory_secure_cwd(DIR* dirp, int saved_cwd_fd) {
+
+  int result;
+  // If we have a saved cwd change back to it and close the fd.
+  if (saved_cwd_fd != -1) {
+    result = fchdir(saved_cwd_fd);
+    ::close(saved_cwd_fd);
+  }
+
+  // Close the directory.
+  os::closedir(dirp);
+}
+
+// Check if the given file descriptor is considered a secure.
+static bool is_file_secure(int fd, const char *filename) {
+
+  int result;
+  struct stat statbuf;
+
+  // Determine if the file is secure.
+  RESTARTABLE(::fstat(fd, &statbuf), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("fstat failed on %s: %s\n", filename, strerror(errno));
+    }
+    return false;
+  }
+  if (statbuf.st_nlink > 1) {
+    // A file with multiple links is not expected.
+    if (PrintMiscellaneous && Verbose) {
+      warning("file %s has multiple links\n", filename);
+    }
+    return false;
+  }
   return true;
 }
 
-
-// return the user name for the given user id
+// Return the user name for the given user id.
 //
-// the caller is expected to free the allocated memory.
-//
+// The caller is expected to free the allocated memory.
 static char* get_user_name(uid_t uid) {
 
   struct passwd pwent;
 
-  // determine the max pwbuf size from sysconf, and hardcode
+  // Determine the max pwbuf size from sysconf, and hardcode
   // a default if this not available through sysconf.
-  //
   long bufsize = sysconf(_SC_GETPW_R_SIZE_MAX);
   if (bufsize == -1)
     bufsize = 1024;
@@ -344,7 +608,8 @@
     strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
 
-    DIR* subdirp = os::opendir(usrdir_name);
+    // Open the user directory.
+    DIR* subdirp = open_directory_secure(usrdir_name);
 
     if (subdirp == NULL) {
       FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
@@ -464,28 +729,7 @@
   }
 }
 
-
-// remove file
-//
-// this method removes the file with the given file name in the
-// named directory.
-//
-static void remove_file(const char* dirname, const char* filename) {
-
-  size_t nbytes = strlen(dirname) + strlen(filename) + 2;
-  char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
-
-  strcpy(path, dirname);
-  strcat(path, "/");
-  strcat(path, filename);
-
-  remove_file(path);
-
-  FREE_C_HEAP_ARRAY(char, path, mtInternal);
-}
-
-
-// cleanup stale shared memory resources
+// Cleanup stale shared memory resources
 //
 // This method attempts to remove all stale shared memory files in
 // the named user temporary directory. It scans the named directory
@@ -493,32 +737,26 @@
 // process id is extracted from the file name and a test is run to
 // determine if the process is alive. If the process is not alive,
 // any stale file resources are removed.
-//
 static void cleanup_sharedmem_resources(const char* dirname) {
 
-  // open the user temp directory
-  DIR* dirp = os::opendir(dirname);
-
+  int saved_cwd_fd;
+  // Open the directory.
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
   if (dirp == NULL) {
-    // directory doesn't exist, so there is nothing to cleanup
+     // Directory doesn't exist or is insecure, so there is nothing to cleanup.
     return;
   }
 
-  if (!is_directory_secure(dirname)) {
-    // the directory is not a secure directory
-    return;
-  }
-
-  // for each entry in the directory that matches the expected file
+  // For each entry in the directory that matches the expected file
   // name pattern, determine if the file resources are stale and if
   // so, remove the file resources. Note, instrumented HotSpot processes
   // for this user may start and/or terminate during this search and
   // remove or create new files in this directory. The behavior of this
   // loop under these conditions is dependent upon the implementation of
   // opendir/readdir.
-  //
   struct dirent* entry;
   char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
+
   errno = 0;
   while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
 
@@ -528,56 +766,55 @@
 
       if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
 
-        // attempt to remove all unexpected files, except "." and ".."
-        remove_file(dirname, entry->d_name);
+        // Attempt to remove all unexpected files, except "." and "..".
+        unlink(entry->d_name);
       }
 
       errno = 0;
       continue;
     }
 
-    // we now have a file name that converts to a valid integer
+    // We now have a file name that converts to a valid integer
     // that could represent a process id . if this process id
     // matches the current process id or the process is not running,
     // then remove the stale file resources.
     //
-    // process liveness is detected by sending signal number 0 to
+    // Process liveness is detected by sending signal number 0 to
     // the process id (see kill(2)). if kill determines that the
     // process does not exist, then the file resources are removed.
     // if kill determines that that we don't have permission to
     // signal the process, then the file resources are assumed to
     // be stale and are removed because the resources for such a
     // process should be in a different user specific directory.
-    //
     if ((pid == os::current_process_id()) ||
         (kill(pid, 0) == OS_ERR && (errno == ESRCH || errno == EPERM))) {
 
-        remove_file(dirname, entry->d_name);
+        unlink(entry->d_name);
     }
     errno = 0;
   }
-  os::closedir(dirp);
+
+  // Close the directory and reset the current working directory.
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
+
   FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
 }
 
-// make the user specific temporary directory. Returns true if
+// Make the user specific temporary directory. Returns true if
 // the directory exists and is secure upon return. Returns false
 // if the directory exists but is either a symlink, is otherwise
 // insecure, or if an error occurred.
-//
 static bool make_user_tmp_dir(const char* dirname) {
 
-  // create the directory with 0755 permissions. note that the directory
+  // Create the directory with 0755 permissions. note that the directory
   // will be owned by euid::egid, which may not be the same as uid::gid.
-  //
   if (mkdir(dirname, S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) == OS_ERR) {
     if (errno == EEXIST) {
       // The directory already exists and was probably created by another
       // JVM instance. However, this could also be the result of a
       // deliberate symlink. Verify that the existing directory is safe.
-      //
       if (!is_directory_secure(dirname)) {
-        // directory is not secure
+        // Directory is not secure.
         if (PrintMiscellaneous && Verbose) {
           warning("%s directory is insecure\n", dirname);
         }
@@ -613,19 +850,63 @@
     return -1;
   }
 
+  int saved_cwd_fd;
+  // Open the directory and set the current working directory to it.
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so cannot create shared
+    // memory file.
+    return -1;
+  }
+
+  // Open the filename in the current directory.
+  // Cannot use O_TRUNC here; truncation of an existing file has to happen
+  // after the is_file_secure() check below.
   int result;
 
-  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_TRUNC, S_IREAD|S_IWRITE), result);
+  // No O_NOFOLLOW defined at buildtime, and it is not documented for open;
+  // so provide a workaround in this case.
+#ifdef O_NOFOLLOW
+  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IREAD|S_IWRITE), result);
+#else
+  // workaround function (jdk6 code)
+  RESTARTABLE(::open_o_nofollow(filename, O_RDWR|O_CREAT, S_IREAD|S_IWRITE), result);
+#endif
+
   if (result == OS_ERR) {
     if (PrintMiscellaneous && Verbose) {
-      warning("could not create file %s: %s\n", filename, strerror(errno));
+      if (errno == ELOOP) {
+        warning("file %s is a symlink and is not secure\n", filename);
+      } else {
+        warning("could not create file %s: %s\n", filename, strerror(errno));
+      }
     }
+    // Close the directory and reset the current working directory.
+    close_directory_secure_cwd(dirp, saved_cwd_fd);
+
     return -1;
   }
+  // Close the directory and reset the current working directory.
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
 
   // save the file descriptor
   int fd = result;
 
+  // Check to see if the file is secure.
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  // Truncate the file to get rid of any existing data.
+  RESTARTABLE(::ftruncate(fd, (off_t)0), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("could not truncate shared memory file: %s\n", strerror(errno));
+    }
+    ::close(fd);
+    return -1;
+  }
   // set the file size
   RESTARTABLE(::ftruncate(fd, (off_t)size), result);
   if (result == OS_ERR) {
@@ -647,7 +928,14 @@
 
   // open the file
   int result;
+  // No O_NOFOLLOW defined at buildtime, and it is not documented for open;
+  // so provide a workaround in this case
+#ifdef O_NOFOLLOW
   RESTARTABLE(::open(filename, oflags), result);
+#else
+  RESTARTABLE(::open_o_nofollow(filename, oflags), result);
+#endif
+
   if (result == OS_ERR) {
     if (errno == ENOENT) {
       THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
@@ -661,8 +949,15 @@
       THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
     }
   }
+  int fd = result;
 
-  return result;
+  // Check to see if the file is secure.
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  return fd;
 }
 
 // create a named shared memory region. returns the address of the
@@ -694,13 +989,21 @@
   char* dirname = get_user_tmp_dir(user_name);
   char* filename = get_sharedmem_filename(dirname, vmid);
 
+  // Get the short filename.
+  char* short_filename = strrchr(filename, '/');
+  if (short_filename == NULL) {
+    short_filename = filename;
+  } else {
+    short_filename++;
+  }
+
   // cleanup any stale shared memory files
   cleanup_sharedmem_resources(dirname);
 
   assert(((size > 0) && (size % os::vm_page_size() == 0)),
          "unexpected PerfMemory region size");
 
-  fd = create_sharedmem_resources(dirname, filename, size);
+  fd = create_sharedmem_resources(dirname, short_filename, size);
 
   FREE_C_HEAP_ARRAY(char, user_name, mtInternal);
   FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
@@ -732,6 +1035,9 @@
   // clear the shared memory region
   (void)::memset((void*) mapAddress, 0, size);
 
+  // It does not go through os api, the operation has to record from here.
+  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC, mtInternal);
+
   return mapAddress;
 }
 
@@ -806,7 +1112,7 @@
   char* mapAddress;
   int result;
   int fd;
-  size_t size;
+  size_t size = 0;
   const char* luser = NULL;
 
   int mmap_prot;
@@ -818,12 +1124,18 @@
   // constructs for the file and the shared memory mapping.
   if (mode == PerfMemory::PERF_MODE_RO) {
     mmap_prot = PROT_READ;
+
+  // No O_NOFOLLOW defined at buildtime, and it is not documented for open.
+#ifdef O_NOFOLLOW
+    file_flags = O_RDONLY | O_NOFOLLOW;
+#else
     file_flags = O_RDONLY;
+#endif
   }
   else if (mode == PerfMemory::PERF_MODE_RW) {
 #ifdef LATER
     mmap_prot = PROT_READ | PROT_WRITE;
-    file_flags = O_RDWR;
+    file_flags = O_RDWR | O_NOFOLLOW;
 #else
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
               "Unsupported access mode");
@@ -853,6 +1165,9 @@
   //
   if (!is_directory_secure(dirname)) {
     FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
+    if (luser != user) {
+      FREE_C_HEAP_ARRAY(char, luser, mtInternal);
+    }
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
               "Process not found");
   }
@@ -897,6 +1212,9 @@
               "Could not map PerfMemory");
   }
 
+  // It does not go through os api, the operation has to record from here.
+  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC, mtInternal);
+
   *addr = mapAddress;
   *sizep = size;
 
--- a/src/os/aix/vm/thread_aix.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/aix/vm/thread_aix.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,15 +26,9 @@
 #ifndef OS_AIX_VM_THREAD_AIX_INLINE_HPP
 #define OS_AIX_VM_THREAD_AIX_INLINE_HPP
 
-#include "runtime/atomic.hpp"
-#include "runtime/prefetch.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/threadLocalStorage.hpp"
 
-#include "atomic_aix_ppc.inline.hpp"
-#include "orderAccess_aix_ppc.inline.hpp"
-#include "prefetch_aix_ppc.inline.hpp"
-
 // Contains inlined functions for class Thread and ThreadLocalStorage
 
 inline void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
--- a/src/os/bsd/dtrace/libjvm_db.c	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/bsd/dtrace/libjvm_db.c	Fri Apr 10 16:58:26 2015 -0700
@@ -260,6 +260,9 @@
   uint64_t base;
   int err;
 
+  /* Clear *vmp now in case we jump to fail: */
+  memset(vmp, 0, sizeof(VMStructEntry));
+
   err = ps_pglobal_lookup(J->P, LIBJVM_SO, "gHotSpotVMStructs", &sym_addr);
   CHECK_FAIL(err);
   err = read_pointer(J, sym_addr, &gHotSpotVMStructs);
--- a/src/os/bsd/vm/os_bsd.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/bsd/vm/os_bsd.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -48,6 +48,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -2433,23 +2434,25 @@
   }
 
   // The memory is committed
-  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
 
   return addr;
 }
 
 bool os::release_memory_special(char* base, size_t bytes) {
-  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-  // detaching the SHM segment will also delete it, see reserve_memory_special()
-  int rslt = shmdt(base);
-  if (rslt == 0) {
-    tkr.record((address)base, bytes);
-    return true;
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+    // detaching the SHM segment will also delete it, see reserve_memory_special()
+    int rslt = shmdt(base);
+    if (rslt == 0) {
+      tkr.record((address)base, bytes);
+      return true;
+    } else {
+      return false;
+    }
   } else {
-    tkr.discard();
-    return false;
+    return shmdt(base) == 0;
   }
-
 }
 
 size_t os::large_page_size() {
--- a/src/os/bsd/vm/os_bsd.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/bsd/vm/os_bsd.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,15 +26,9 @@
 #define OS_BSD_VM_OS_BSD_INLINE_HPP
 
 #include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "orderAccess_bsd_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "orderAccess_bsd_zero.inline.hpp"
-#endif
-
 // System includes
 
 #include <unistd.h>
--- a/src/os/bsd/vm/perfMemory_bsd.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/bsd/vm/perfMemory_bsd.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -197,7 +197,38 @@
 }
 
 
-// check if the given path is considered a secure directory for
+// Check if the given statbuf is considered a secure directory for
+// the backing store files. Returns true if the directory is considered
+// a secure location. Returns false if the statbuf is a symbolic link or
+// if an error occurred.
+//
+static bool is_statbuf_secure(struct stat *statp) {
+  if (S_ISLNK(statp->st_mode) || !S_ISDIR(statp->st_mode)) {
+    // The path represents a link or some non-directory file type,
+    // which is not what we expected. Declare it insecure.
+    //
+    return false;
+  }
+  // We have an existing directory, check if the permissions are safe.
+  //
+  if ((statp->st_mode & (S_IWGRP|S_IWOTH)) != 0) {
+    // The directory is open for writing and could be subjected
+    // to a symlink or a hard link attack. Declare it insecure.
+    //
+    return false;
+  }
+  // See if the uid of the directory matches the effective uid of the process.
+  //
+  if (statp->st_uid != geteuid()) {
+    // The directory was not created by this user, declare it insecure.
+    //
+    return false;
+  }
+  return true;
+}
+
+
+// Check if the given path is considered a secure directory for
 // the backing store files. Returns true if the directory exists
 // and is considered a secure location. Returns false if the path
 // is a symbolic link or if an error occurred.
@@ -211,27 +242,185 @@
     return false;
   }
 
-  // the path exists, now check it's mode
-  if (S_ISLNK(statbuf.st_mode) || !S_ISDIR(statbuf.st_mode)) {
-    // the path represents a link or some non-directory file type,
-    // which is not what we expected. declare it insecure.
-    //
+  // The path exists, see if it is secure.
+  return is_statbuf_secure(&statbuf);
+}
+
+
+// Check if the given directory file descriptor is considered a secure
+// directory for the backing store files. Returns true if the directory
+// exists and is considered a secure location. Returns false if the path
+// is a symbolic link or if an error occurred.
+//
+static bool is_dirfd_secure(int dir_fd) {
+  struct stat statbuf;
+  int result = 0;
+
+  RESTARTABLE(::fstat(dir_fd, &statbuf), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+
+  // The path exists, now check its mode.
+  return is_statbuf_secure(&statbuf);
+}
+
+
+// Check to make sure fd1 and fd2 are referencing the same file system object.
+//
+static bool is_same_fsobject(int fd1, int fd2) {
+  struct stat statbuf1;
+  struct stat statbuf2;
+  int result = 0;
+
+  RESTARTABLE(::fstat(fd1, &statbuf1), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+  RESTARTABLE(::fstat(fd2, &statbuf2), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+
+  if ((statbuf1.st_ino == statbuf2.st_ino) &&
+      (statbuf1.st_dev == statbuf2.st_dev)) {
+    return true;
+  } else {
     return false;
   }
-  else {
-    // we have an existing directory, check if the permissions are safe.
-    //
-    if ((statbuf.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
-      // the directory is open for writing and could be subjected
-      // to a symlnk attack. declare it insecure.
-      //
-      return false;
+}
+
+
+// Open the directory of the given path and validate it.
+// Return a DIR * of the open directory.
+//
+static DIR *open_directory_secure(const char* dirname) {
+  // Open the directory using open() so that it can be verified
+  // to be secure by calling is_dirfd_secure(), opendir() and then check
+  // to see if they are the same file system object.  This method does not
+  // introduce a window of opportunity for the directory to be attacked that
+  // calling opendir() and is_directory_secure() does.
+  int result;
+  DIR *dirp = NULL;
+  RESTARTABLE(::open(dirname, O_RDONLY|O_NOFOLLOW), result);
+  if (result == OS_ERR) {
+    // Directory doesn't exist or is a symlink, so there is nothing to cleanup.
+    if (PrintMiscellaneous && Verbose) {
+      if (errno == ELOOP) {
+        warning("directory %s is a symlink and is not secure\n", dirname);
+      } else {
+        warning("could not open directory %s: %s\n", dirname, strerror(errno));
+      }
     }
+    return dirp;
+  }
+  int fd = result;
+
+  // Determine if the open directory is secure.
+  if (!is_dirfd_secure(fd)) {
+    // The directory is not a secure directory.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Open the directory.
+  dirp = ::opendir(dirname);
+  if (dirp == NULL) {
+    // The directory doesn't exist, close fd and return.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Check to make sure fd and dirp are referencing the same file system object.
+  if (!is_same_fsobject(fd, dirfd(dirp))) {
+    // The directory is not secure.
+    os::close(fd);
+    os::closedir(dirp);
+    dirp = NULL;
+    return dirp;
+  }
+
+  // Close initial open now that we know directory is secure
+  os::close(fd);
+
+  return dirp;
+}
+
+// NOTE: The code below uses fchdir(), open() and unlink() because
+// fdopendir(), openat() and unlinkat() are not supported on all
+// versions.  Once the support for fdopendir(), openat() and unlinkat()
+// is available on all supported versions the code can be changed
+// to use these functions.
+
+// Open the directory of the given path, validate it and set the
+// current working directory to it.
+// Return a DIR * of the open directory and the saved cwd fd.
+//
+static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
+
+  // Open the directory.
+  DIR* dirp = open_directory_secure(dirname);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so there is nothing to cleanup.
+    return dirp;
+  }
+  int fd = dirfd(dirp);
+
+  // Open a fd to the cwd and save it off.
+  int result;
+  RESTARTABLE(::open(".", O_RDONLY), result);
+  if (result == OS_ERR) {
+    *saved_cwd_fd = -1;
+  } else {
+    *saved_cwd_fd = result;
+  }
+
+  // Set the current directory to dirname by using the fd of the directory.
+  result = fchdir(fd);
+
+  return dirp;
+}
+
+// Close the directory and restore the current working directory.
+//
+static void close_directory_secure_cwd(DIR* dirp, int saved_cwd_fd) {
+
+  int result;
+  // If we have a saved cwd change back to it and close the fd.
+  if (saved_cwd_fd != -1) {
+    result = fchdir(saved_cwd_fd);
+    ::close(saved_cwd_fd);
+  }
+
+  // Close the directory.
+  os::closedir(dirp);
+}
+
+// Check if the given file descriptor is considered a secure.
+//
+static bool is_file_secure(int fd, const char *filename) {
+
+  int result;
+  struct stat statbuf;
+
+  // Determine if the file is secure.
+  RESTARTABLE(::fstat(fd, &statbuf), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("fstat failed on %s: %s\n", filename, strerror(errno));
+    }
+    return false;
+  }
+  if (statbuf.st_nlink > 1) {
+    // A file with multiple links is not expected.
+    if (PrintMiscellaneous && Verbose) {
+      warning("file %s has multiple links\n", filename);
+    }
+    return false;
   }
   return true;
 }
 
-
 // return the user name for the given user id
 //
 // the caller is expected to free the allocated memory.
@@ -317,9 +506,11 @@
 
   const char* tmpdirname = os::get_temp_directory();
 
+  // open the temp directory
   DIR* tmpdirp = os::opendir(tmpdirname);
 
   if (tmpdirp == NULL) {
+    // Cannot open the directory to get the user name, return.
     return NULL;
   }
 
@@ -344,25 +535,14 @@
     strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
 
-    DIR* subdirp = os::opendir(usrdir_name);
+    // open the user directory
+    DIR* subdirp = open_directory_secure(usrdir_name);
 
     if (subdirp == NULL) {
       FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
       continue;
     }
 
-    // Since we don't create the backing store files in directories
-    // pointed to by symbolic links, we also don't follow them when
-    // looking for the files. We check for a symbolic link after the
-    // call to opendir in order to eliminate a small window where the
-    // symlink can be exploited.
-    //
-    if (!is_directory_secure(usrdir_name)) {
-      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
-      os::closedir(subdirp);
-      continue;
-    }
-
     struct dirent* udentry;
     char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal);
     errno = 0;
@@ -465,26 +645,6 @@
 }
 
 
-// remove file
-//
-// this method removes the file with the given file name in the
-// named directory.
-//
-static void remove_file(const char* dirname, const char* filename) {
-
-  size_t nbytes = strlen(dirname) + strlen(filename) + 2;
-  char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
-
-  strcpy(path, dirname);
-  strcat(path, "/");
-  strcat(path, filename);
-
-  remove_file(path);
-
-  FREE_C_HEAP_ARRAY(char, path, mtInternal);
-}
-
-
 // cleanup stale shared memory resources
 //
 // This method attempts to remove all stale shared memory files in
@@ -496,16 +656,11 @@
 //
 static void cleanup_sharedmem_resources(const char* dirname) {
 
-  // open the user temp directory
-  DIR* dirp = os::opendir(dirname);
-
+  int saved_cwd_fd;
+  // open the directory and set the current working directory to it
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
   if (dirp == NULL) {
-    // directory doesn't exist, so there is nothing to cleanup
-    return;
-  }
-
-  if (!is_directory_secure(dirname)) {
-    // the directory is not a secure directory
+    // directory doesn't exist or is insecure, so there is nothing to cleanup
     return;
   }
 
@@ -519,6 +674,7 @@
   //
   struct dirent* entry;
   char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
+
   errno = 0;
   while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
 
@@ -529,7 +685,7 @@
       if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
 
         // attempt to remove all unexpected files, except "." and ".."
-        remove_file(dirname, entry->d_name);
+        unlink(entry->d_name);
       }
 
       errno = 0;
@@ -552,11 +708,14 @@
     if ((pid == os::current_process_id()) ||
         (kill(pid, 0) == OS_ERR && (errno == ESRCH || errno == EPERM))) {
 
-        remove_file(dirname, entry->d_name);
+        unlink(entry->d_name);
     }
     errno = 0;
   }
-  os::closedir(dirp);
+
+  // close the directory and reset the current working directory
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
+
   FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
 }
 
@@ -613,19 +772,54 @@
     return -1;
   }
 
-  int result;
+  int saved_cwd_fd;
+  // open the directory and set the current working directory to it
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so cannot create shared
+    // memory file.
+    return -1;
+  }
 
-  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_TRUNC, S_IREAD|S_IWRITE), result);
+  // Open the filename in the current directory.
+  // Cannot use O_TRUNC here; truncation of an existing file has to happen
+  // after the is_file_secure() check below.
+  int result;
+  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IREAD|S_IWRITE), result);
   if (result == OS_ERR) {
     if (PrintMiscellaneous && Verbose) {
-      warning("could not create file %s: %s\n", filename, strerror(errno));
+      if (errno == ELOOP) {
+        warning("file %s is a symlink and is not secure\n", filename);
+      } else {
+        warning("could not create file %s: %s\n", filename, strerror(errno));
+      }
     }
+    // close the directory and reset the current working directory
+    close_directory_secure_cwd(dirp, saved_cwd_fd);
+
     return -1;
   }
+  // close the directory and reset the current working directory
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
 
   // save the file descriptor
   int fd = result;
 
+  // check to see if the file is secure
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  // truncate the file to get rid of any existing data
+  RESTARTABLE(::ftruncate(fd, (off_t)0), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("could not truncate shared memory file: %s\n", strerror(errno));
+    }
+    ::close(fd);
+    return -1;
+  }
   // set the file size
   RESTARTABLE(::ftruncate(fd, (off_t)size), result);
   if (result == OS_ERR) {
@@ -683,8 +877,15 @@
       THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
     }
   }
+  int fd = result;
 
-  return result;
+  // check to see if the file is secure
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  return fd;
 }
 
 // create a named shared memory region. returns the address of the
@@ -716,13 +917,21 @@
   char* dirname = get_user_tmp_dir(user_name);
   char* filename = get_sharedmem_filename(dirname, vmid);
 
+  // get the short filename
+  char* short_filename = strrchr(filename, '/');
+  if (short_filename == NULL) {
+    short_filename = filename;
+  } else {
+    short_filename++;
+  }
+
   // cleanup any stale shared memory files
   cleanup_sharedmem_resources(dirname);
 
   assert(((size > 0) && (size % os::vm_page_size() == 0)),
          "unexpected PerfMemory region size");
 
-  fd = create_sharedmem_resources(dirname, filename, size);
+  fd = create_sharedmem_resources(dirname, short_filename, size);
 
   FREE_C_HEAP_ARRAY(char, user_name, mtInternal);
   FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
@@ -753,7 +962,7 @@
   (void)::memset((void*) mapAddress, 0, size);
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
 
   return mapAddress;
 }
@@ -837,12 +1046,12 @@
   // constructs for the file and the shared memory mapping.
   if (mode == PerfMemory::PERF_MODE_RO) {
     mmap_prot = PROT_READ;
-    file_flags = O_RDONLY;
+    file_flags = O_RDONLY | O_NOFOLLOW;
   }
   else if (mode == PerfMemory::PERF_MODE_RW) {
 #ifdef LATER
     mmap_prot = PROT_READ | PROT_WRITE;
-    file_flags = O_RDWR;
+    file_flags = O_RDWR | O_NOFOLLOW;
 #else
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
               "Unsupported access mode");
@@ -918,7 +1127,7 @@
   }
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
 
   *addr = mapAddress;
   *sizep = size;
--- a/src/os/bsd/vm/thread_bsd.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/bsd/vm/thread_bsd.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -29,20 +29,8 @@
 #error "This file should only be included from thread.inline.hpp"
 #endif
 
-#include "runtime/atomic.hpp"
-#include "runtime/prefetch.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/threadLocalStorage.hpp"
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "atomic_bsd_x86.inline.hpp"
-# include "orderAccess_bsd_x86.inline.hpp"
-# include "prefetch_bsd_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "atomic_bsd_zero.inline.hpp"
-# include "orderAccess_bsd_zero.inline.hpp"
-# include "prefetch_bsd_zero.inline.hpp"
-#endif
 
 // Contains inlined functions for class Thread and ThreadLocalStorage
 
--- a/src/os/linux/vm/os_linux.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -49,6 +49,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -2243,7 +2244,7 @@
   const siginfo_t* si = (const siginfo_t*)siginfo;
 
   os::Posix::print_siginfo_brief(st, si);
-
+#if INCLUDE_CDS
   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
       UseSharedSpaces) {
     FileMapInfo* mapinfo = FileMapInfo::current_info();
@@ -2253,6 +2254,7 @@
                 " possible disk/network problem.");
     }
   }
+#endif
   st->cr();
 }
 
@@ -3500,9 +3502,12 @@
 
   assert(is_ptr_aligned(start, alignment), "Must be");
 
-  // os::reserve_memory_special will record this memory area.
-  // Need to release it here to prevent overlapping reservations.
-  MemTracker::record_virtual_memory_release((address)start, bytes);
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    // os::reserve_memory_special will record this memory area.
+    // Need to release it here to prevent overlapping reservations.
+    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+    tkr.record((address)start, bytes);
+  }
 
   char* end = start + bytes;
 
@@ -3597,7 +3602,7 @@
     }
 
     // The memory is committed
-    MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
+    MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
   }
 
   return addr;
@@ -3613,24 +3618,30 @@
 }
 
 bool os::release_memory_special(char* base, size_t bytes) {
+  bool res;
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+    res = os::Linux::release_memory_special_impl(base, bytes);
+    if (res) {
+      tkr.record((address)base, bytes);
+    }
+
+  } else {
+    res = os::Linux::release_memory_special_impl(base, bytes);
+  }
+  return res;
+}
+
+bool os::Linux::release_memory_special_impl(char* base, size_t bytes) {
   assert(UseLargePages, "only for large pages");
-
-  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-
   bool res;
+
   if (UseSHM) {
     res = os::Linux::release_memory_special_shm(base, bytes);
   } else {
     assert(UseHugeTLBFS, "must be");
     res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
   }
-
-  if (res) {
-    tkr.record((address)base, bytes);
-  } else {
-    tkr.discard();
-  }
-
   return res;
 }
 
--- a/src/os/linux/vm/os_linux.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/linux/vm/os_linux.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -108,6 +108,7 @@
   static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
   static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
 
+  static bool release_memory_special_impl(char* base, size_t bytes);
   static bool release_memory_special_shm(char* base, size_t bytes);
   static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
 
--- a/src/os/linux/vm/os_linux.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/linux/vm/os_linux.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,24 +26,9 @@
 #define OS_LINUX_VM_OS_LINUX_INLINE_HPP
 
 #include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "orderAccess_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "orderAccess_linux_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "orderAccess_linux_zero.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "orderAccess_linux_arm.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "orderAccess_linux_ppc.inline.hpp"
-#endif
-
 // System includes
 
 #include <unistd.h>
--- a/src/os/linux/vm/perfMemory_linux.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/linux/vm/perfMemory_linux.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -197,7 +197,38 @@
 }
 
 
-// check if the given path is considered a secure directory for
+// Check if the given statbuf is considered a secure directory for
+// the backing store files. Returns true if the directory is considered
+// a secure location. Returns false if the statbuf is a symbolic link or
+// if an error occurred.
+//
+static bool is_statbuf_secure(struct stat *statp) {
+  if (S_ISLNK(statp->st_mode) || !S_ISDIR(statp->st_mode)) {
+    // The path represents a link or some non-directory file type,
+    // which is not what we expected. Declare it insecure.
+    //
+    return false;
+  }
+  // We have an existing directory, check if the permissions are safe.
+  //
+  if ((statp->st_mode & (S_IWGRP|S_IWOTH)) != 0) {
+    // The directory is open for writing and could be subjected
+    // to a symlink or a hard link attack. Declare it insecure.
+    //
+    return false;
+  }
+  // See if the uid of the directory matches the effective uid of the process.
+  //
+  if (statp->st_uid != geteuid()) {
+    // The directory was not created by this user, declare it insecure.
+    //
+    return false;
+  }
+  return true;
+}
+
+
+// Check if the given path is considered a secure directory for
 // the backing store files. Returns true if the directory exists
 // and is considered a secure location. Returns false if the path
 // is a symbolic link or if an error occurred.
@@ -211,22 +242,180 @@
     return false;
   }
 
-  // the path exists, now check it's mode
-  if (S_ISLNK(statbuf.st_mode) || !S_ISDIR(statbuf.st_mode)) {
-    // the path represents a link or some non-directory file type,
-    // which is not what we expected. declare it insecure.
-    //
+  // The path exists, see if it is secure.
+  return is_statbuf_secure(&statbuf);
+}
+
+
+// Check if the given directory file descriptor is considered a secure
+// directory for the backing store files. Returns true if the directory
+// exists and is considered a secure location. Returns false if the path
+// is a symbolic link or if an error occurred.
+//
+static bool is_dirfd_secure(int dir_fd) {
+  struct stat statbuf;
+  int result = 0;
+
+  RESTARTABLE(::fstat(dir_fd, &statbuf), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+
+  // The path exists, now check its mode.
+  return is_statbuf_secure(&statbuf);
+}
+
+
+// Check to make sure fd1 and fd2 are referencing the same file system object.
+//
+static bool is_same_fsobject(int fd1, int fd2) {
+  struct stat statbuf1;
+  struct stat statbuf2;
+  int result = 0;
+
+  RESTARTABLE(::fstat(fd1, &statbuf1), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+  RESTARTABLE(::fstat(fd2, &statbuf2), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+
+  if ((statbuf1.st_ino == statbuf2.st_ino) &&
+      (statbuf1.st_dev == statbuf2.st_dev)) {
+    return true;
+  } else {
     return false;
   }
-  else {
-    // we have an existing directory, check if the permissions are safe.
-    //
-    if ((statbuf.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
-      // the directory is open for writing and could be subjected
-      // to a symlnk attack. declare it insecure.
-      //
-      return false;
+}
+
+
+// Open the directory of the given path and validate it.
+// Return a DIR * of the open directory.
+//
+static DIR *open_directory_secure(const char* dirname) {
+  // Open the directory using open() so that it can be verified
+  // to be secure by calling is_dirfd_secure(), opendir() and then check
+  // to see if they are the same file system object.  This method does not
+  // introduce a window of opportunity for the directory to be attacked that
+  // calling opendir() and is_directory_secure() does.
+  int result;
+  DIR *dirp = NULL;
+  RESTARTABLE(::open(dirname, O_RDONLY|O_NOFOLLOW), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      if (errno == ELOOP) {
+        warning("directory %s is a symlink and is not secure\n", dirname);
+      } else {
+        warning("could not open directory %s: %s\n", dirname, strerror(errno));
+      }
     }
+    return dirp;
+  }
+  int fd = result;
+
+  // Determine if the open directory is secure.
+  if (!is_dirfd_secure(fd)) {
+    // The directory is not a secure directory.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Open the directory.
+  dirp = ::opendir(dirname);
+  if (dirp == NULL) {
+    // The directory doesn't exist, close fd and return.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Check to make sure fd and dirp are referencing the same file system object.
+  if (!is_same_fsobject(fd, dirfd(dirp))) {
+    // The directory is not secure.
+    os::close(fd);
+    os::closedir(dirp);
+    dirp = NULL;
+    return dirp;
+  }
+
+  // Close initial open now that we know directory is secure
+  os::close(fd);
+
+  return dirp;
+}
+
+// NOTE: The code below uses fchdir(), open() and unlink() because
+// fdopendir(), openat() and unlinkat() are not supported on all
+// versions.  Once the support for fdopendir(), openat() and unlinkat()
+// is available on all supported versions the code can be changed
+// to use these functions.
+
+// Open the directory of the given path, validate it and set the
+// current working directory to it.
+// Return a DIR * of the open directory and the saved cwd fd.
+//
+static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
+
+  // Open the directory.
+  DIR* dirp = open_directory_secure(dirname);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so there is nothing to cleanup.
+    return dirp;
+  }
+  int fd = dirfd(dirp);
+
+  // Open a fd to the cwd and save it off.
+  int result;
+  RESTARTABLE(::open(".", O_RDONLY), result);
+  if (result == OS_ERR) {
+    *saved_cwd_fd = -1;
+  } else {
+    *saved_cwd_fd = result;
+  }
+
+  // Set the current directory to dirname by using the fd of the directory.
+  result = fchdir(fd);
+
+  return dirp;
+}
+
+// Close the directory and restore the current working directory.
+//
+static void close_directory_secure_cwd(DIR* dirp, int saved_cwd_fd) {
+
+  int result;
+  // If we have a saved cwd change back to it and close the fd.
+  if (saved_cwd_fd != -1) {
+    result = fchdir(saved_cwd_fd);
+    ::close(saved_cwd_fd);
+  }
+
+  // Close the directory.
+  os::closedir(dirp);
+}
+
+// Check if the given file descriptor is considered a secure.
+//
+static bool is_file_secure(int fd, const char *filename) {
+
+  int result;
+  struct stat statbuf;
+
+  // Determine if the file is secure.
+  RESTARTABLE(::fstat(fd, &statbuf), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("fstat failed on %s: %s\n", filename, strerror(errno));
+    }
+    return false;
+  }
+  if (statbuf.st_nlink > 1) {
+    // A file with multiple links is not expected.
+    if (PrintMiscellaneous && Verbose) {
+      warning("file %s has multiple links\n", filename);
+    }
+    return false;
   }
   return true;
 }
@@ -317,9 +506,11 @@
 
   const char* tmpdirname = os::get_temp_directory();
 
+  // open the temp directory
   DIR* tmpdirp = os::opendir(tmpdirname);
 
   if (tmpdirp == NULL) {
+    // Cannot open the directory to get the user name, return.
     return NULL;
   }
 
@@ -344,7 +535,8 @@
     strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
 
-    DIR* subdirp = os::opendir(usrdir_name);
+    // open the user directory
+    DIR* subdirp = open_directory_secure(usrdir_name);
 
     if (subdirp == NULL) {
       FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
@@ -465,26 +657,6 @@
 }
 
 
-// remove file
-//
-// this method removes the file with the given file name in the
-// named directory.
-//
-static void remove_file(const char* dirname, const char* filename) {
-
-  size_t nbytes = strlen(dirname) + strlen(filename) + 2;
-  char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
-
-  strcpy(path, dirname);
-  strcat(path, "/");
-  strcat(path, filename);
-
-  remove_file(path);
-
-  FREE_C_HEAP_ARRAY(char, path, mtInternal);
-}
-
-
 // cleanup stale shared memory resources
 //
 // This method attempts to remove all stale shared memory files in
@@ -496,16 +668,11 @@
 //
 static void cleanup_sharedmem_resources(const char* dirname) {
 
-  // open the user temp directory
-  DIR* dirp = os::opendir(dirname);
-
+  int saved_cwd_fd;
+  // open the directory
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
   if (dirp == NULL) {
-    // directory doesn't exist, so there is nothing to cleanup
-    return;
-  }
-
-  if (!is_directory_secure(dirname)) {
-    // the directory is not a secure directory
+    // directory doesn't exist or is insecure, so there is nothing to cleanup
     return;
   }
 
@@ -519,6 +686,7 @@
   //
   struct dirent* entry;
   char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
+
   errno = 0;
   while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
 
@@ -527,9 +695,8 @@
     if (pid == 0) {
 
       if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
-
         // attempt to remove all unexpected files, except "." and ".."
-        remove_file(dirname, entry->d_name);
+        unlink(entry->d_name);
       }
 
       errno = 0;
@@ -551,12 +718,14 @@
     //
     if ((pid == os::current_process_id()) ||
         (kill(pid, 0) == OS_ERR && (errno == ESRCH || errno == EPERM))) {
-
-        remove_file(dirname, entry->d_name);
+        unlink(entry->d_name);
     }
     errno = 0;
   }
-  os::closedir(dirp);
+
+  // close the directory and reset the current working directory
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
+
   FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
 }
 
@@ -613,19 +782,54 @@
     return -1;
   }
 
-  int result;
+  int saved_cwd_fd;
+  // open the directory and set the current working directory to it
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so cannot create shared
+    // memory file.
+    return -1;
+  }
 
-  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_TRUNC, S_IREAD|S_IWRITE), result);
+  // Open the filename in the current directory.
+  // Cannot use O_TRUNC here; truncation of an existing file has to happen
+  // after the is_file_secure() check below.
+  int result;
+  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IREAD|S_IWRITE), result);
   if (result == OS_ERR) {
     if (PrintMiscellaneous && Verbose) {
-      warning("could not create file %s: %s\n", filename, strerror(errno));
+      if (errno == ELOOP) {
+        warning("file %s is a symlink and is not secure\n", filename);
+      } else {
+        warning("could not create file %s: %s\n", filename, strerror(errno));
+      }
     }
+    // close the directory and reset the current working directory
+    close_directory_secure_cwd(dirp, saved_cwd_fd);
+
     return -1;
   }
+  // close the directory and reset the current working directory
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
 
   // save the file descriptor
   int fd = result;
 
+  // check to see if the file is secure
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  // truncate the file to get rid of any existing data
+  RESTARTABLE(::ftruncate(fd, (off_t)0), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("could not truncate shared memory file: %s\n", strerror(errno));
+    }
+    ::close(fd);
+    return -1;
+  }
   // set the file size
   RESTARTABLE(::ftruncate(fd, (off_t)size), result);
   if (result == OS_ERR) {
@@ -683,8 +887,15 @@
       THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
     }
   }
+  int fd = result;
 
-  return result;
+  // check to see if the file is secure
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  return fd;
 }
 
 // create a named shared memory region. returns the address of the
@@ -715,6 +926,13 @@
 
   char* dirname = get_user_tmp_dir(user_name);
   char* filename = get_sharedmem_filename(dirname, vmid);
+  // get the short filename
+  char* short_filename = strrchr(filename, '/');
+  if (short_filename == NULL) {
+    short_filename = filename;
+  } else {
+    short_filename++;
+  }
 
   // cleanup any stale shared memory files
   cleanup_sharedmem_resources(dirname);
@@ -722,7 +940,7 @@
   assert(((size > 0) && (size % os::vm_page_size() == 0)),
          "unexpected PerfMemory region size");
 
-  fd = create_sharedmem_resources(dirname, filename, size);
+  fd = create_sharedmem_resources(dirname, short_filename, size);
 
   FREE_C_HEAP_ARRAY(char, user_name, mtInternal);
   FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
@@ -753,7 +971,7 @@
   (void)::memset((void*) mapAddress, 0, size);
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
 
   return mapAddress;
 }
@@ -837,12 +1055,12 @@
   // constructs for the file and the shared memory mapping.
   if (mode == PerfMemory::PERF_MODE_RO) {
     mmap_prot = PROT_READ;
-    file_flags = O_RDONLY;
+    file_flags = O_RDONLY | O_NOFOLLOW;
   }
   else if (mode == PerfMemory::PERF_MODE_RW) {
 #ifdef LATER
     mmap_prot = PROT_READ | PROT_WRITE;
-    file_flags = O_RDWR;
+    file_flags = O_RDWR | O_NOFOLLOW;
 #else
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
               "Unsupported access mode");
@@ -924,7 +1142,7 @@
   }
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
 
   *addr = mapAddress;
   *sizep = size;
--- a/src/os/linux/vm/thread_linux.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/linux/vm/thread_linux.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -29,35 +29,8 @@
 #error "This file should only be included from thread.inline.hpp"
 #endif
 
-#include "runtime/atomic.hpp"
-#include "runtime/prefetch.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/threadLocalStorage.hpp"
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "atomic_linux_x86.inline.hpp"
-# include "orderAccess_linux_x86.inline.hpp"
-# include "prefetch_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "atomic_linux_sparc.inline.hpp"
-# include "orderAccess_linux_sparc.inline.hpp"
-# include "prefetch_linux_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "atomic_linux_zero.inline.hpp"
-# include "orderAccess_linux_zero.inline.hpp"
-# include "prefetch_linux_zero.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "atomic_linux_arm.inline.hpp"
-# include "orderAccess_linux_arm.inline.hpp"
-# include "prefetch_linux_arm.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "atomic_linux_ppc.inline.hpp"
-# include "orderAccess_linux_ppc.inline.hpp"
-# include "prefetch_linux_ppc.inline.hpp"
-#endif
 
 // Contains inlined functions for class Thread and ThreadLocalStorage
 
--- a/src/os/posix/vm/os_posix.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/posix/vm/os_posix.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -74,21 +74,41 @@
   VMError::report_coredump_status(buffer, success);
 }
 
-address os::get_caller_pc(int n) {
+int os::get_native_stack(address* stack, int frames, int toSkip) {
 #ifdef _NMT_NOINLINE_
-  n ++;
+  toSkip++;
 #endif
+
+  int frame_idx = 0;
+  int num_of_frames;  // number of frames captured
   frame fr = os::current_frame();
-  while (n > 0 && fr.pc() &&
-    !os::is_first_C_frame(&fr) && fr.sender_pc()) {
-    fr = os::get_sender_for_C_frame(&fr);
-    n --;
+  while (fr.pc() && frame_idx < frames) {
+    if (toSkip > 0) {
+      toSkip --;
+    } else {
+      stack[frame_idx ++] = fr.pc();
+    }
+    if (fr.fp() == NULL || os::is_first_C_frame(&fr)
+        ||fr.sender_pc() == NULL || fr.cb() != NULL) break;
+
+    if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
+      fr = os::get_sender_for_C_frame(&fr);
+    } else {
+      break;
+    }
   }
-  if (n == 0) {
-    return fr.pc();
-  } else {
-    return NULL;
+  num_of_frames = frame_idx;
+  for (; frame_idx < frames; frame_idx ++) {
+    stack[frame_idx] = NULL;
   }
+
+  return num_of_frames;
+}
+
+
+bool os::unsetenv(const char* name) {
+  assert(name != NULL, "Null pointer");
+  return (::unsetenv(name) == 0);
 }
 
 int os::get_last_error() {
--- a/src/os/solaris/add_gnu_debuglink/add_gnu_debuglink.c	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,285 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-/*
- * Name:        add_gnu_debuglink.c
- *
- * Description: Add a ".gnu_debuglink" section that refers to the specified
- *     debug_info_path to the specified ELF object.
- *
- *     This program is adapted from the example program shown on the
- *     elf(3elf) man page and from code from the Solaris compiler
- *     driver.
- */
-
-/*
- * needed to define SHF_EXCLUDE
- */
-#define ELF_TARGET_ALL
-
-#include <fcntl.h>
-#include <stdio.h>
-#include <libelf.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-static void failure(void);
-static unsigned int gnu_debuglink_crc32(unsigned int crc, unsigned char *buf,
-                                        size_t len);
-
-void
-main(int argc, char ** argv) {
-                                 /* new ELF section name */
-    static char SEC_NAME[] = ".gnu_debuglink";
-
-    unsigned char buffer[8 * 1024];  /* I/O buffer */
-    int           buffer_len;        /* buffer length */
-    char *        debug_info_path;   /* debug info path */
-    void *        ehdr;              /* ELF header */
-    Elf *         elf;               /* ELF descriptor */
-    char *        elf_ident;         /* ELF identity string */
-    char *        elf_obj;           /* elf_obj file */
-    int           fd;                /* descriptor for files */
-    unsigned int  file_crc = 0;      /* CRC for debug info file */
-    int           is_elfclass64;     /* is an ELFCLASS64 file? */
-    Elf_Data *    link_dat;          /* ELF data for new debug info link */
-    Elf_Data *    name_dat;          /* ELF data for new section name */
-    Elf_Scn *     new_scn;           /* new ELF section descriptor */
-    void *        new_shdr;          /* new ELF section header */
-    Elf_Scn *     scn;               /* ELF section descriptor */
-    void *        shdr;              /* ELF section header */
-
-    if (argc != 3) {
-        (void) fprintf(stderr, "Usage: %s debug_info_path elf_obj\n", argv[0]);
-        exit(2);
-    }
-
-    debug_info_path = argv[1];  /* save for later */
-    if ((fd = open(debug_info_path, O_RDONLY)) == -1) {
-        (void) fprintf(stderr, "%s: cannot open file.\n", debug_info_path);
-        exit(3);
-    }
-
-    (void) printf("Computing CRC for '%s'\n", debug_info_path);
-    (void) fflush(stdout);
-    /* compute CRC for the debug info file */
-    for (;;) {
-        int len = read(fd, buffer, sizeof buffer);
-        if (len <= 0) {
-            break;
-        }
-        file_crc = gnu_debuglink_crc32(file_crc, buffer, len);
-    }
-    (void) close(fd);
-
-    /* open the elf_obj */
-    elf_obj = argv[2];
-    if ((fd = open(elf_obj, O_RDWR)) == -1) {
-        (void) fprintf(stderr, "%s: cannot open file.\n", elf_obj);
-        exit(4);
-    }
-
-    (void) printf("Opening '%s' for update\n", elf_obj);
-    (void) fflush(stdout);
-    (void) elf_version(EV_CURRENT);  /* coordinate ELF versions */
-
-    /* obtain the ELF descriptors from the input file */
-    if ((elf = elf_begin(fd, ELF_C_RDWR, NULL)) == NULL) {
-        failure();
-    }
-
-    /* determine if ELFCLASS64 or not? */
-    elf_ident = elf_getident(elf, NULL);
-    is_elfclass64 = (elf_ident[EI_CLASS] == ELFCLASS64);
-
-    /* get the ELF header */
-    if (is_elfclass64) {
-        ehdr = elf64_getehdr(elf);
-    } else {
-        ehdr = elf32_getehdr(elf);
-    }
-    if (ehdr == NULL) {
-        failure();
-    }
-
-    /* get the ELF section descriptor */
-    if (is_elfclass64) {
-        scn = elf_getscn(elf, ((Elf64_Ehdr *) ehdr)->e_shstrndx);
-    } else {
-        scn = elf_getscn(elf, ((Elf32_Ehdr *) ehdr)->e_shstrndx);
-    }
-    if (scn == NULL) {
-        failure();
-    }
-
-    /* get the section header */
-    if (is_elfclass64) {
-        shdr = elf64_getshdr(scn);
-    } else {
-        shdr = elf32_getshdr(scn);
-    }
-    if (shdr == NULL) {
-        failure();
-    }
-
-    (void) printf("Adding ELF data for new section name\n");
-    (void) fflush(stdout);
-    name_dat = elf_newdata(scn);
-    name_dat->d_buf = (void *) SEC_NAME;
-    if (is_elfclass64) {
-        name_dat->d_off = ((Elf64_Shdr *) shdr)->sh_size + 1;
-    } else {
-        name_dat->d_off = ((Elf32_Shdr *) shdr)->sh_size + 1;
-    }
-    name_dat->d_align = 1;
-    name_dat->d_size = strlen(SEC_NAME) + 1;
-
-    new_scn = elf_newscn(elf);
-
-    if (is_elfclass64) {
-        new_shdr = elf64_getshdr(new_scn);
-        ((Elf64_Shdr *) new_shdr)->sh_flags = SHF_EXCLUDE;
-        ((Elf64_Shdr *) new_shdr)->sh_type = SHT_PROGBITS;
-        ((Elf64_Shdr *) new_shdr)->sh_name = ((Elf64_Shdr *) shdr)->sh_size;
-        ((Elf64_Shdr *) new_shdr)->sh_addralign = 1;
-        ((Elf64_Shdr *) shdr)->sh_size += (strlen(SEC_NAME) + 1);
-    } else {
-        new_shdr = elf32_getshdr(new_scn);
-        ((Elf32_Shdr *) new_shdr)->sh_flags = SHF_EXCLUDE;
-        ((Elf32_Shdr *) new_shdr)->sh_type = SHT_PROGBITS;
-        ((Elf32_Shdr *) new_shdr)->sh_name = ((Elf32_Shdr *) shdr)->sh_size;
-        ((Elf32_Shdr *) new_shdr)->sh_addralign = 1;
-        ((Elf32_Shdr *) shdr)->sh_size += (strlen(SEC_NAME) + 1);
-    }
-
-    (void) printf("Adding ELF data for debug_info_path value\n");
-    (void) fflush(stdout);
-    (void) memset(buffer, 0, sizeof buffer);
-    buffer_len = strlen(debug_info_path) + 1;  /* +1 for NUL */
-    (void) strncpy((char *) buffer, debug_info_path, buffer_len);
-    if (buffer_len % 4 != 0) {
-        /* not on a 4 byte boundary so pad to the next one */
-        buffer_len += (4 - buffer_len % 4);
-    }
-    /* save the CRC */
-    (void) memcpy(&buffer[buffer_len], &file_crc, sizeof file_crc);
-    buffer_len += sizeof file_crc;
-
-    link_dat = elf_newdata(new_scn);
-    link_dat->d_type = ELF_T_BYTE;
-    link_dat->d_size = buffer_len;
-    link_dat->d_buf = buffer;
-    link_dat->d_align = 1;
-
-    (void) printf("Saving updates to '%s'\n", elf_obj);
-    (void) fflush(stdout);
-    (void) elf_update(elf, ELF_C_NULL);   /* recalc ELF memory structures */
-    (void) elf_update(elf, ELF_C_WRITE);  /* write out changes to ELF obj */
-    (void) elf_end(elf);                  /* done with ELF obj */
-    (void) close(fd);
-
-    (void) printf("Done updating '%s'\n", elf_obj);
-    (void) fflush(stdout);
-    exit(0);
-}  /* end main */
-
-
-static void
-failure() {
-    (void) fprintf(stderr, "%s\n", elf_errmsg(elf_errno()));
-    exit(5);
-}
-
-
-/*
- * The CRC used in gnu_debuglink, retrieved from
- * http://sourceware.org/gdb/current/onlinedocs/gdb/Separate-Debug-Files.html#Separate-Debug-Files.
- */
-
-static unsigned int
-gnu_debuglink_crc32(unsigned int crc, unsigned char *buf, size_t len) {
-    static const unsigned int crc32_table[256] = {
-        0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
-        0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
-        0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
-        0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
-        0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
-        0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
-        0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
-        0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
-        0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
-        0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
-        0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
-        0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
-        0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
-        0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
-        0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
-        0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
-        0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
-        0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
-        0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
-        0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
-        0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
-        0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
-        0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
-        0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
-        0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
-        0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
-        0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
-        0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
-        0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
-        0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
-        0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
-        0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
-        0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
-        0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
-        0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
-        0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
-        0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
-        0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
-        0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
-        0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
-        0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
-        0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
-        0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
-        0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
-        0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
-        0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
-        0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
-        0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
-        0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
-        0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
-        0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
-        0x2d02ef8d
-    };
-
-    unsigned char *end;
-
-    crc = ~crc & 0xffffffff;
-    for (end = buf + len; buf < end; ++buf) {
-        crc = crc32_table[(crc ^ *buf) & 0xff] ^ (crc >> 8);
-    }
-    return ~crc & 0xffffffff;
-}
--- a/src/os/solaris/dtrace/libjvm_db.c	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/solaris/dtrace/libjvm_db.c	Fri Apr 10 16:58:26 2015 -0700
@@ -260,6 +260,9 @@
   uint64_t base;
   int err;
 
+  /* Clear *vmp now in case we jump to fail: */
+  memset(vmp, 0, sizeof(VMStructEntry));
+
   err = ps_pglobal_lookup(J->P, LIBJVM_SO, "gHotSpotVMStructs", &sym_addr);
   CHECK_FAIL(err);
   err = read_pointer(J, sym_addr, &gHotSpotVMStructs);
--- a/src/os/solaris/fix_empty_sec_hdr_flags/fix_empty_sec_hdr_flags.c	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,181 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-/*
- * Name:        fix_empty_sec_hdr_flags.c
- *
- * Description: Remove the SHF_ALLOC flag from "empty" section headers.
- *     An "empty" section header has sh_addr == 0 and sh_size == 0.
- *
- *     This program is adapted from the example program shown on the
- *     elf(3elf) man page and from code from the Solaris compiler
- *     driver.
- */
-
-#include <fcntl.h>
-#include <stdio.h>
-#include <libelf.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-static void failure(void);
-
-void
-main(int argc, char ** argv) {
-    void *        ehdr;           /* ELF header */
-    unsigned int  i;              /* section counter */
-    int           fd;             /* descriptor for file */
-    Elf *         elf;            /* ELF descriptor */
-    char *        elf_ident;      /* ELF identity string */
-    char *        elf_obj;        /* elf_obj file */
-    int           fix_count;      /* number of flags fixed */
-    int           is_elfclass64;  /* is an ELFCLASS64 file? */
-    Elf_Scn *     scn;            /* ELF section descriptor */
-    void *        shdr;           /* ELF section header */
-    Elf_Data *    shstrtab;       /* ELF section header string table */
-
-    if (argc != 2) {
-        (void) fprintf(stderr, "Usage: %s elf_obj\n", argv[0]);
-        exit(2);
-    }
-
-    /* open the elf_obj */
-    elf_obj = argv[1];
-    if ((fd = open(elf_obj, O_RDWR)) == -1) {
-        (void) fprintf(stderr, "%s: cannot open file.\n", elf_obj);
-        exit(3);
-    }
-
-    (void) printf("Opening '%s' for update\n", elf_obj);
-    (void) fflush(stdout);
-    (void) elf_version(EV_CURRENT);  /* coordinate ELF versions */
-
-    /* obtain the ELF descriptors from the input file */
-    if ((elf = elf_begin(fd, ELF_C_RDWR, NULL)) == NULL) {
-        failure();
-    }
-
-    /* determine if ELFCLASS64 or not? */
-    elf_ident = elf_getident(elf, NULL);
-    is_elfclass64 = (elf_ident[EI_CLASS] == ELFCLASS64);
-
-    /* get the ELF header */
-    if (is_elfclass64) {
-        ehdr = elf64_getehdr(elf);
-    } else {
-        ehdr = elf32_getehdr(elf);
-    }
-    if (ehdr == NULL) {
-        failure();
-    }
-
-    /* get the ELF section descriptor */
-    if (is_elfclass64) {
-        scn = elf_getscn(elf, ((Elf64_Ehdr *) ehdr)->e_shstrndx);
-    } else {
-        scn = elf_getscn(elf, ((Elf32_Ehdr *) ehdr)->e_shstrndx);
-    }
-    if (scn == NULL) {
-        failure();
-    }
-
-    /* get the section header string table */
-    shstrtab = elf_getdata(scn, NULL);
-    if (shstrtab == NULL) {
-        failure();
-    }
-
-    fix_count = 0;
-
-    /* traverse the sections of the input file */
-    for (i = 1, scn = NULL; scn = elf_nextscn(elf, scn); i++) {
-        int    has_flag_set;  /* is SHF_ALLOC flag set? */
-        int    is_empty;      /* is section empty? */
-        char * name;          /* short hand pointer */
-
-        /* get the section header */
-        if (is_elfclass64) {
-            shdr = elf64_getshdr(scn);
-        } else {
-            shdr = elf32_getshdr(scn);
-        }
-        if (shdr == NULL) {
-            failure();
-        }
-
-        if (is_elfclass64) {
-            name = (char *)shstrtab->d_buf + ((Elf64_Shdr *) shdr)->sh_name;
-        } else {
-            name = (char *)shstrtab->d_buf + ((Elf32_Shdr *) shdr)->sh_name;
-        }
-
-        if (is_elfclass64) {
-            has_flag_set = ((Elf64_Shdr *) shdr)->sh_flags & SHF_ALLOC;
-            is_empty = ((Elf64_Shdr *) shdr)->sh_addr == 0 &&
-                ((Elf64_Shdr *) shdr)->sh_size == 0;
-        } else {
-            has_flag_set = ((Elf32_Shdr *) shdr)->sh_flags & SHF_ALLOC;
-            is_empty = ((Elf32_Shdr *) shdr)->sh_addr == 0 &&
-                ((Elf32_Shdr *) shdr)->sh_size == 0;
-        }
-
-        if (is_empty && has_flag_set) {
-            (void) printf("section[%u] '%s' is empty, "
-                "but SHF_ALLOC flag is set.\n", i, name);
-            (void) printf("Clearing the SHF_ALLOC flag.\n");
-
-            if (is_elfclass64) {
-                ((Elf64_Shdr *) shdr)->sh_flags &= ~SHF_ALLOC;
-            } else {
-                ((Elf32_Shdr *) shdr)->sh_flags &= ~SHF_ALLOC;
-            }
-            fix_count++;
-        }
-    }  /* end for each ELF section */
-
-    if (fix_count > 0) {
-        (void) printf("Saving %d updates to '%s'\n", fix_count, elf_obj);
-        (void) fflush(stdout);
-        (void) elf_update(elf, ELF_C_NULL);   /* recalc ELF memory structures */
-        (void) elf_update(elf, ELF_C_WRITE);  /* write out changes to ELF obj */
-    } else {
-        (void) printf("No SHF_ALLOC flags needed to be cleared.\n");
-    }
-
-    (void) elf_end(elf);                  /* done with ELF obj */
-    (void) close(fd);
-
-    (void) printf("Done %s '%s'\n",
-               (fix_count > 0) ? "updating" : "with", elf_obj);
-    (void) fflush(stdout);
-    exit(0);
-}  /* end main */
-
-
-static void
-failure() {
-    (void) fprintf(stderr, "%s\n", elf_errmsg(elf_errno()));
-    exit(6);
-}
--- a/src/os/solaris/vm/jvm_solaris.h	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/solaris/vm/jvm_solaris.h	Fri Apr 10 16:58:26 2015 -0700
@@ -40,8 +40,9 @@
  * This file is currently collecting system-specific dregs for the
  * JNI conversion, which should be sorted out later.
  */
-
+#define __USE_LEGACY_PROTOTYPES__
 #include <dirent.h>             /* For DIR */
+#undef __USE_LEGACY_PROTOTYPES__
 #include <sys/param.h>          /* For MAXPATHLEN */
 #include <sys/socket.h>         /* For socklen_t */
 #include <unistd.h>             /* For F_OK, R_OK, W_OK */
--- a/src/os/solaris/vm/os_solaris.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/solaris/vm/os_solaris.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -48,6 +48,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
--- a/src/os/solaris/vm/os_solaris.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/solaris/vm/os_solaris.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,15 +26,9 @@
 #define OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP
 
 #include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "orderAccess_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "orderAccess_solaris_sparc.inline.hpp"
-#endif
-
 // System includes
 #include <sys/param.h>
 #include <dlfcn.h>
--- a/src/os/solaris/vm/perfMemory_solaris.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/solaris/vm/perfMemory_solaris.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -199,7 +199,38 @@
 }
 
 
-// check if the given path is considered a secure directory for
+// Check if the given statbuf is considered a secure directory for
+// the backing store files. Returns true if the directory is considered
+// a secure location. Returns false if the statbuf is a symbolic link or
+// if an error occurred.
+//
+static bool is_statbuf_secure(struct stat *statp) {
+  if (S_ISLNK(statp->st_mode) || !S_ISDIR(statp->st_mode)) {
+    // The path represents a link or some non-directory file type,
+    // which is not what we expected. Declare it insecure.
+    //
+    return false;
+  }
+  // We have an existing directory, check if the permissions are safe.
+  //
+  if ((statp->st_mode & (S_IWGRP|S_IWOTH)) != 0) {
+    // The directory is open for writing and could be subjected
+    // to a symlink or a hard link attack. Declare it insecure.
+    //
+    return false;
+  }
+  // See if the uid of the directory matches the effective uid of the process.
+  //
+  if (statp->st_uid != geteuid()) {
+    // The directory was not created by this user, declare it insecure.
+    //
+    return false;
+  }
+  return true;
+}
+
+
+// Check if the given path is considered a secure directory for
 // the backing store files. Returns true if the directory exists
 // and is considered a secure location. Returns false if the path
 // is a symbolic link or if an error occurred.
@@ -213,27 +244,185 @@
     return false;
   }
 
-  // the path exists, now check it's mode
-  if (S_ISLNK(statbuf.st_mode) || !S_ISDIR(statbuf.st_mode)) {
-    // the path represents a link or some non-directory file type,
-    // which is not what we expected. declare it insecure.
-    //
+  // The path exists, see if it is secure.
+  return is_statbuf_secure(&statbuf);
+}
+
+
+// Check if the given directory file descriptor is considered a secure
+// directory for the backing store files. Returns true if the directory
+// exists and is considered a secure location. Returns false if the path
+// is a symbolic link or if an error occurred.
+//
+static bool is_dirfd_secure(int dir_fd) {
+  struct stat statbuf;
+  int result = 0;
+
+  RESTARTABLE(::fstat(dir_fd, &statbuf), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+
+  // The path exists, now check its mode.
+  return is_statbuf_secure(&statbuf);
+}
+
+
+// Check to make sure fd1 and fd2 are referencing the same file system object.
+//
+static bool is_same_fsobject(int fd1, int fd2) {
+  struct stat statbuf1;
+  struct stat statbuf2;
+  int result = 0;
+
+  RESTARTABLE(::fstat(fd1, &statbuf1), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+  RESTARTABLE(::fstat(fd2, &statbuf2), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+
+  if ((statbuf1.st_ino == statbuf2.st_ino) &&
+      (statbuf1.st_dev == statbuf2.st_dev)) {
+    return true;
+  } else {
     return false;
   }
-  else {
-    // we have an existing directory, check if the permissions are safe.
-    //
-    if ((statbuf.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
-      // the directory is open for writing and could be subjected
-      // to a symlnk attack. declare it insecure.
-      //
-      return false;
+}
+
+
+// Open the directory of the given path and validate it.
+// Return a DIR * of the open directory.
+//
+static DIR *open_directory_secure(const char* dirname) {
+  // Open the directory using open() so that it can be verified
+  // to be secure by calling is_dirfd_secure(), opendir() and then check
+  // to see if they are the same file system object.  This method does not
+  // introduce a window of opportunity for the directory to be attacked that
+  // calling opendir() and is_directory_secure() does.
+  int result;
+  DIR *dirp = NULL;
+  RESTARTABLE(::open(dirname, O_RDONLY|O_NOFOLLOW), result);
+  if (result == OS_ERR) {
+    // Directory doesn't exist or is a symlink, so there is nothing to cleanup.
+    if (PrintMiscellaneous && Verbose) {
+      if (errno == ELOOP) {
+        warning("directory %s is a symlink and is not secure\n", dirname);
+      } else {
+        warning("could not open directory %s: %s\n", dirname, strerror(errno));
+      }
     }
+    return dirp;
+  }
+  int fd = result;
+
+  // Determine if the open directory is secure.
+  if (!is_dirfd_secure(fd)) {
+    // The directory is not a secure directory.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Open the directory.
+  dirp = ::opendir(dirname);
+  if (dirp == NULL) {
+    // The directory doesn't exist, close fd and return.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Check to make sure fd and dirp are referencing the same file system object.
+  if (!is_same_fsobject(fd, dirp->dd_fd)) {
+    // The directory is not secure.
+    os::close(fd);
+    os::closedir(dirp);
+    dirp = NULL;
+    return dirp;
+  }
+
+  // Close initial open now that we know directory is secure
+  os::close(fd);
+
+  return dirp;
+}
+
+// NOTE: The code below uses fchdir(), open() and unlink() because
+// fdopendir(), openat() and unlinkat() are not supported on all
+// versions.  Once the support for fdopendir(), openat() and unlinkat()
+// is available on all supported versions the code can be changed
+// to use these functions.
+
+// Open the directory of the given path, validate it and set the
+// current working directory to it.
+// Return a DIR * of the open directory and the saved cwd fd.
+//
+static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
+
+  // Open the directory.
+  DIR* dirp = open_directory_secure(dirname);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so there is nothing to cleanup.
+    return dirp;
+  }
+  int fd = dirp->dd_fd;
+
+  // Open a fd to the cwd and save it off.
+  int result;
+  RESTARTABLE(::open(".", O_RDONLY), result);
+  if (result == OS_ERR) {
+    *saved_cwd_fd = -1;
+  } else {
+    *saved_cwd_fd = result;
+  }
+
+  // Set the current directory to dirname by using the fd of the directory.
+  result = fchdir(fd);
+
+  return dirp;
+}
+
+// Close the directory and restore the current working directory.
+//
+static void close_directory_secure_cwd(DIR* dirp, int saved_cwd_fd) {
+
+  int result;
+  // If we have a saved cwd change back to it and close the fd.
+  if (saved_cwd_fd != -1) {
+    result = fchdir(saved_cwd_fd);
+    ::close(saved_cwd_fd);
+  }
+
+  // Close the directory.
+  os::closedir(dirp);
+}
+
+// Check if the given file descriptor is considered a secure.
+//
+static bool is_file_secure(int fd, const char *filename) {
+
+  int result;
+  struct stat statbuf;
+
+  // Determine if the file is secure.
+  RESTARTABLE(::fstat(fd, &statbuf), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("fstat failed on %s: %s\n", filename, strerror(errno));
+    }
+    return false;
+  }
+  if (statbuf.st_nlink > 1) {
+    // A file with multiple links is not expected.
+    if (PrintMiscellaneous && Verbose) {
+      warning("file %s has multiple links\n", filename);
+    }
+    return false;
   }
   return true;
 }
 
-
 // return the user name for the given user id
 //
 // the caller is expected to free the allocated memory.
@@ -308,9 +497,11 @@
 
   const char* tmpdirname = os::get_temp_directory();
 
+  // open the temp directory
   DIR* tmpdirp = os::opendir(tmpdirname);
 
   if (tmpdirp == NULL) {
+    // Cannot open the directory to get the user name, return.
     return NULL;
   }
 
@@ -335,7 +526,8 @@
     strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
 
-    DIR* subdirp = os::opendir(usrdir_name);
+    // open the user directory
+    DIR* subdirp = open_directory_secure(usrdir_name);
 
     if (subdirp == NULL) {
       FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
@@ -504,26 +696,6 @@
 }
 
 
-// remove file
-//
-// this method removes the file with the given file name in the
-// named directory.
-//
-static void remove_file(const char* dirname, const char* filename) {
-
-  size_t nbytes = strlen(dirname) + strlen(filename) + 2;
-  char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
-
-  strcpy(path, dirname);
-  strcat(path, "/");
-  strcat(path, filename);
-
-  remove_file(path);
-
-  FREE_C_HEAP_ARRAY(char, path, mtInternal);
-}
-
-
 // cleanup stale shared memory resources
 //
 // This method attempts to remove all stale shared memory files in
@@ -535,16 +707,11 @@
 //
 static void cleanup_sharedmem_resources(const char* dirname) {
 
-  // open the user temp directory
-  DIR* dirp = os::opendir(dirname);
-
+  int saved_cwd_fd;
+  // open the directory
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
   if (dirp == NULL) {
-    // directory doesn't exist, so there is nothing to cleanup
-    return;
-  }
-
-  if (!is_directory_secure(dirname)) {
-    // the directory is not a secure directory
+     // directory doesn't exist or is insecure, so there is nothing to cleanup
     return;
   }
 
@@ -558,6 +725,7 @@
   //
   struct dirent* entry;
   char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
+
   errno = 0;
   while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
 
@@ -568,7 +736,7 @@
       if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
 
         // attempt to remove all unexpected files, except "." and ".."
-        remove_file(dirname, entry->d_name);
+        unlink(entry->d_name);
       }
 
       errno = 0;
@@ -591,11 +759,14 @@
     if ((pid == os::current_process_id()) ||
         (kill(pid, 0) == OS_ERR && (errno == ESRCH || errno == EPERM))) {
 
-        remove_file(dirname, entry->d_name);
+        unlink(entry->d_name);
     }
     errno = 0;
   }
-  os::closedir(dirp);
+
+  // close the directory and reset the current working directory
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
+
   FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
 }
 
@@ -652,19 +823,54 @@
     return -1;
   }
 
-  int result;
+  int saved_cwd_fd;
+  // open the directory and set the current working directory to it
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so cannot create shared
+    // memory file.
+    return -1;
+  }
 
-  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_TRUNC, S_IREAD|S_IWRITE), result);
+  // Open the filename in the current directory.
+  // Cannot use O_TRUNC here; truncation of an existing file has to happen
+  // after the is_file_secure() check below.
+  int result;
+  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IREAD|S_IWRITE), result);
   if (result == OS_ERR) {
     if (PrintMiscellaneous && Verbose) {
-      warning("could not create file %s: %s\n", filename, strerror(errno));
+      if (errno == ELOOP) {
+        warning("file %s is a symlink and is not secure\n", filename);
+      } else {
+        warning("could not create file %s: %s\n", filename, strerror(errno));
+      }
     }
+    // close the directory and reset the current working directory
+    close_directory_secure_cwd(dirp, saved_cwd_fd);
+
     return -1;
   }
+  // close the directory and reset the current working directory
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
 
   // save the file descriptor
   int fd = result;
 
+  // check to see if the file is secure
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  // truncate the file to get rid of any existing data
+  RESTARTABLE(::ftruncate(fd, (off_t)0), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("could not truncate shared memory file: %s\n", strerror(errno));
+    }
+    ::close(fd);
+    return -1;
+  }
   // set the file size
   RESTARTABLE(::ftruncate(fd, (off_t)size), result);
   if (result == OS_ERR) {
@@ -700,8 +906,15 @@
       THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
     }
   }
+  int fd = result;
 
-  return result;
+  // check to see if the file is secure
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  return fd;
 }
 
 // create a named shared memory region. returns the address of the
@@ -733,13 +946,21 @@
   char* dirname = get_user_tmp_dir(user_name);
   char* filename = get_sharedmem_filename(dirname, vmid);
 
+  // get the short filename
+  char* short_filename = strrchr(filename, '/');
+  if (short_filename == NULL) {
+    short_filename = filename;
+  } else {
+    short_filename++;
+  }
+
   // cleanup any stale shared memory files
   cleanup_sharedmem_resources(dirname);
 
   assert(((size > 0) && (size % os::vm_page_size() == 0)),
          "unexpected PerfMemory region size");
 
-  fd = create_sharedmem_resources(dirname, filename, size);
+  fd = create_sharedmem_resources(dirname, short_filename, size);
 
   FREE_C_HEAP_ARRAY(char, user_name, mtInternal);
   FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
@@ -770,7 +991,8 @@
   (void)::memset((void*) mapAddress, 0, size);
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
+    size, CURRENT_PC, mtInternal);
 
   return mapAddress;
 }
@@ -854,12 +1076,12 @@
   // constructs for the file and the shared memory mapping.
   if (mode == PerfMemory::PERF_MODE_RO) {
     mmap_prot = PROT_READ;
-    file_flags = O_RDONLY;
+    file_flags = O_RDONLY | O_NOFOLLOW;
   }
   else if (mode == PerfMemory::PERF_MODE_RW) {
 #ifdef LATER
     mmap_prot = PROT_READ | PROT_WRITE;
-    file_flags = O_RDWR;
+    file_flags = O_RDWR | O_NOFOLLOW;
 #else
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
               "Unsupported access mode");
@@ -941,7 +1163,8 @@
   }
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
+    size, CURRENT_PC, mtInternal);
 
   *addr = mapAddress;
   *sizep = size;
--- a/src/os/solaris/vm/thread_solaris.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/solaris/vm/thread_solaris.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -29,20 +29,9 @@
 #error "This file should only be included from thread.inline.hpp"
 #endif
 
-#include "runtime/atomic.hpp"
-#include "runtime/prefetch.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/threadLocalStorage.hpp"
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "atomic_solaris_x86.inline.hpp"
-# include "orderAccess_solaris_x86.inline.hpp"
-# include "prefetch_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "atomic_solaris_sparc.inline.hpp"
-# include "orderAccess_solaris_sparc.inline.hpp"
-# include "prefetch_solaris_sparc.inline.hpp"
-#endif
 
 // Thread::current is "hot" it's called > 128K times in the 1st 500 msecs of
 // startup.
--- a/src/os/windows/vm/os_windows.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -51,6 +51,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -130,6 +131,7 @@
     case DLL_PROCESS_DETACH:
       if(ForceTimeHighResolution)
         timeEndPeriod(1L);
+
       break;
     default:
       break;
@@ -152,6 +154,10 @@
  return result > 0 && result < len;
 }
 
+bool os::unsetenv(const char* name) {
+  assert(name != NULL, "Null pointer");
+  return (SetEnvironmentVariable(name, NULL) == TRUE);
+}
 
 // No setuid programs under Windows.
 bool os::have_special_privileges() {
@@ -310,15 +316,17 @@
  * So far, this method is only used by Native Memory Tracking, which is
  * only supported on Windows XP or later.
  */
-address os::get_caller_pc(int n) {
+
+int os::get_native_stack(address* stack, int frames, int toSkip) {
 #ifdef _NMT_NOINLINE_
-  n ++;
+  toSkip ++;
 #endif
-  address pc;
-  if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
-    return pc;
-  }
-  return NULL;
+  int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames,
+    (PVOID*)stack, NULL);
+  for (int index = captured; index < frames; index ++) {
+    stack[index] = NULL;
+  }
+  return captured;
 }
 
 
@@ -1642,96 +1650,123 @@
 
 void os::win32::print_windows_version(outputStream* st) {
   OSVERSIONINFOEX osvi;
-  SYSTEM_INFO si;
-
+  VS_FIXEDFILEINFO *file_info;
+  TCHAR kernel32_path[MAX_PATH];
+  UINT len, ret;
+
+  // Use the GetVersionEx information to see if we're on a server or
+  // workstation edition of Windows. Starting with Windows 8.1 we can't
+  // trust the OS version information returned by this API.
   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
-
   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
-    st->print_cr("N/A");
+    st->print_cr("Call to GetVersionEx failed");
+    return;
+  }
+  bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
+
+  // Get the full path to \Windows\System32\kernel32.dll and use that for
+  // determining what version of Windows we're running on.
+  len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
+  ret = GetSystemDirectory(kernel32_path, len);
+  if (ret == 0 || ret > len) {
+    st->print_cr("Call to GetSystemDirectory failed");
+    return;
+  }
+  strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
+
+  DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
+  if (version_size == 0) {
+    st->print_cr("Call to GetFileVersionInfoSize failed");
+    return;
+  }
+
+  LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
+  if (version_info == NULL) {
+    st->print_cr("Failed to allocate version_info");
     return;
   }
 
-  int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;
-
-  ZeroMemory(&si, sizeof(SYSTEM_INFO));
-  if (os_vers >= 5002) {
-    // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
-    // find out whether we are running on 64 bit processor or not.
-    if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) {
-      os::Kernel32Dll::GetNativeSystemInfo(&si);
+  if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
+    os::free(version_info);
+    st->print_cr("Call to GetFileVersionInfo failed");
+    return;
+  }
+
+  if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
+    os::free(version_info);
+    st->print_cr("Call to VerQueryValue failed");
+    return;
+  }
+
+  int major_version = HIWORD(file_info->dwProductVersionMS);
+  int minor_version = LOWORD(file_info->dwProductVersionMS);
+  int build_number = HIWORD(file_info->dwProductVersionLS);
+  int build_minor = LOWORD(file_info->dwProductVersionLS);
+  int os_vers = major_version * 1000 + minor_version;
+  os::free(version_info);
+
+  st->print(" Windows ");
+  switch (os_vers) {
+
+  case 6000:
+    if (is_workstation) {
+      st->print("Vista");
     } else {
-      GetSystemInfo(&si);
+      st->print("Server 2008");
     }
-  }
-
-  if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
-    switch (os_vers) {
-    case 3051: st->print(" Windows NT 3.51"); break;
-    case 4000: st->print(" Windows NT 4.0"); break;
-    case 5000: st->print(" Windows 2000"); break;
-    case 5001: st->print(" Windows XP"); break;
-    case 5002:
-      if (osvi.wProductType == VER_NT_WORKSTATION &&
-          si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
-        st->print(" Windows XP x64 Edition");
-      } else {
-        st->print(" Windows Server 2003 family");
-      }
-      break;
-
-    case 6000:
-      if (osvi.wProductType == VER_NT_WORKSTATION) {
-        st->print(" Windows Vista");
-      } else {
-        st->print(" Windows Server 2008");
-      }
-      break;
-
-    case 6001:
-      if (osvi.wProductType == VER_NT_WORKSTATION) {
-        st->print(" Windows 7");
-      } else {
-        st->print(" Windows Server 2008 R2");
-      }
-      break;
-
-    case 6002:
-      if (osvi.wProductType == VER_NT_WORKSTATION) {
-        st->print(" Windows 8");
-      } else {
-        st->print(" Windows Server 2012");
-      }
-      break;
-
-    case 6003:
-      if (osvi.wProductType == VER_NT_WORKSTATION) {
-        st->print(" Windows 8.1");
-      } else {
-        st->print(" Windows Server 2012 R2");
-      }
-      break;
-
-    default: // future os
-      // Unrecognized windows, print out its major and minor versions
-      st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
+    break;
+
+  case 6001:
+    if (is_workstation) {
+      st->print("7");
+    } else {
+      st->print("Server 2008 R2");
+    }
+    break;
+
+  case 6002:
+    if (is_workstation) {
+      st->print("8");
+    } else {
+      st->print("Server 2012");
+    }
+    break;
+
+  case 6003:
+    if (is_workstation) {
+      st->print("8.1");
+    } else {
+      st->print("Server 2012 R2");
     }
-  } else {
-    switch (os_vers) {
-    case 4000: st->print(" Windows 95"); break;
-    case 4010: st->print(" Windows 98"); break;
-    case 4090: st->print(" Windows Me"); break;
-    default: // future windows, print out its major and minor versions
-      st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
+    break;
+
+  case 6004:
+    if (is_workstation) {
+      st->print("10");
+    } else {
+      // The server version name of Windows 10 is not known at this time
+      st->print("%d.%d", major_version, minor_version);
     }
-  }
-
-  if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
+    break;
+
+  default:
+    // Unrecognized windows, print out its major and minor versions
+    st->print("%d.%d", major_version, minor_version);
+    break;
+  }
+
+  // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
+  // find out whether we are running on 64 bit processor or not
+  SYSTEM_INFO si;
+  ZeroMemory(&si, sizeof(SYSTEM_INFO));
+  os::Kernel32Dll::GetNativeSystemInfo(&si);
+  if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
     st->print(" , 64 bit");
   }
 
-  st->print(" Build %d", osvi.dwBuildNumber);
-  st->print(" %s", osvi.szCSDVersion);           // service pack
+  st->print(" Build %d", build_number);
+  st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
   st->cr();
 }
 
@@ -2916,7 +2951,7 @@
                                 PAGE_READWRITE);
   // If reservation failed, return NULL
   if (p_buf == NULL) return NULL;
-  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
+  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
   os::release_memory(p_buf, bytes + chunk_size);
 
   // we still need to round up to a page boundary (in case we are using large pages)
@@ -2982,7 +3017,7 @@
         // need to create a dummy 'reserve' record to match
         // the release.
         MemTracker::record_virtual_memory_reserve((address)p_buf,
-          bytes_to_release, mtNone, CALLER_PC);
+          bytes_to_release, CALLER_PC);
         os::release_memory(p_buf, bytes_to_release);
       }
 #ifdef ASSERT
@@ -3001,11 +3036,10 @@
   }
   // Although the memory is allocated individually, it is returned as one.
   // NMT records it as one block.
-  address pc = CALLER_PC;
   if ((flags & MEM_COMMIT) != 0) {
-    MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
+    MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
   } else {
-    MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
+    MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
   }
 
   // made it this far, success
@@ -3203,8 +3237,7 @@
     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
     if (res != NULL) {
-      address pc = CALLER_PC;
-      MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
+      MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
     }
 
     return res;
@@ -5367,11 +5400,6 @@
   return ::Module32Next(hSnapshot, lpme);
 }
 
-
-inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() {
-  return true;
-}
-
 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) {
   ::GetNativeSystemInfo(lpSystemInfo);
 }
--- a/src/os/windows/vm/os_windows.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/windows/vm/os_windows.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -192,7 +192,6 @@
   static BOOL Module32First(HANDLE,LPMODULEENTRY32);
   static BOOL Module32Next(HANDLE,LPMODULEENTRY32);
 
-  static BOOL GetNativeSystemInfoAvailable();
   static void GetNativeSystemInfo(LPSYSTEM_INFO);
 
   // NUMA calls
--- a/src/os/windows/vm/os_windows.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/windows/vm/os_windows.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,12 +26,9 @@
 #define OS_WINDOWS_VM_OS_WINDOWS_INLINE_HPP
 
 #include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "orderAccess_windows_x86.inline.hpp"
-#endif
-
 inline const char* os::file_separator()                { return "\\"; }
 inline const char* os::line_separator()                { return "\r\n"; }
 inline const char* os::path_separator()                { return ";"; }
--- a/src/os/windows/vm/perfMemory_windows.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/windows/vm/perfMemory_windows.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1498,7 +1498,8 @@
   (void)memset(mapAddress, '\0', size);
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
+    size, CURRENT_PC, mtInternal);
 
   return (char*) mapAddress;
 }
@@ -1680,7 +1681,8 @@
   }
 
   // it does not go through os api, the operation has to record from here
-  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
+  MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size,
+    CURRENT_PC, mtInternal);
 
 
   *addrp = (char*)mapAddress;
@@ -1834,10 +1836,14 @@
     return;
   }
 
-  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-  remove_file_mapping(addr);
-  // it does not go through os api, the operation has to record from here
-  tkr.record((address)addr, bytes);
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    // it does not go through os api, the operation has to record from here
+    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+    remove_file_mapping(addr);
+    tkr.record((address)addr, bytes);
+  } else {
+    remove_file_mapping(addr);
+  }
 }
 
 char* PerfMemory::backing_store_filename() {
--- a/src/os/windows/vm/thread_windows.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os/windows/vm/thread_windows.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -29,15 +29,8 @@
 #error "This file should only be included from thread.inline.hpp"
 #endif
 
-#include "runtime/atomic.hpp"
-#include "runtime/prefetch.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/threadLocalStorage.hpp"
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "atomic_windows_x86.inline.hpp"
-# include "orderAccess_windows_x86.inline.hpp"
-# include "prefetch_windows_x86.inline.hpp"
-#endif
 
 // Contains inlined functions for class Thread and ThreadLocalStorage
 
--- a/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,7 +26,6 @@
 #ifndef OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
 #define OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
 
-#include "orderAccess_aix_ppc.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_ppc.hpp"
--- a/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -91,8 +91,9 @@
 
 // Frame information (pc, sp, fp) retrieved via ucontext
 // always looks like a C-frame according to the frame
-// conventions in frame_ppc64.hpp.
-address os::Aix::ucontext_get_pc(ucontext_t * uc) {
+// conventions in frame_ppc.hpp.
+
+address os::Aix::ucontext_get_pc(const ucontext_t * uc) {
   return (address)uc->uc_mcontext.jmp_context.iar;
 }
 
@@ -486,7 +487,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 // thread stack
 
-size_t os::Aix::min_stack_allowed = 768*K;
+size_t os::Aix::min_stack_allowed = 128*K;
 
 // Aix is always in floating stack mode. The stack size for a new
 // thread can be set via pthread_attr_setstacksize().
@@ -499,7 +500,7 @@
   // because of the strange 'fallback logic' in os::create_thread().
   // Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
   // specify a different stack size for compiler threads!
-  size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K);
+  size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
   return s;
 }
 
--- a/src/os_cpu/aix_ppc/vm/os_aix_ppc.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/aix_ppc/vm/os_aix_ppc.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -23,8 +23,8 @@
  *
  */
 
-#ifndef OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
+#ifndef OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
+#define OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
 
   static void setup_fpu() {}
 
@@ -32,4 +32,4 @@
   // Note: Currently only used in 64 bit Windows implementations
   static bool register_code_area(char *low, char *high) { return true; }
 
-#endif // OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
+#endif // OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
--- a/src/os_cpu/aix_ppc/vm/prefetch_aix_ppc.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/aix_ppc/vm/prefetch_aix_ppc.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -23,8 +23,8 @@
  *
  */
 
-#ifndef OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
-#define OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
+#ifndef OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
+#define OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
 
 #include "runtime/prefetch.hpp"
 
@@ -55,4 +55,4 @@
 #endif
 }
 
-#endif // OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
+#endif // OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
--- a/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -23,8 +23,8 @@
  *
  */
 
-#ifndef OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
+#ifndef OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
+#define OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
 
   // Processor dependent parts of ThreadLocalStorage
 
@@ -33,4 +33,4 @@
     return (Thread *) os::thread_local_storage_at(thread_index());
   }
 
-#endif // OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
+#endif // OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
--- a/src/os_cpu/aix_ppc/vm/thread_aix_ppc.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/aix_ppc/vm/thread_aix_ppc.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -23,8 +23,8 @@
  *
  */
 
-#ifndef OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
+#ifndef OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
+#define OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
 
  private:
   void pd_initialize() {
@@ -76,4 +76,4 @@
 
   intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
 
-#endif // OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
+#endif // OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
--- a/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,7 +26,6 @@
 #ifndef OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
 #define OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
 
-#include "orderAccess_linux_ppc.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "vm_version_ppc.hpp"
--- a/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -307,7 +307,7 @@
                // doesn't work for us. We use:
                ((NativeInstruction*)pc)->is_safepoint_poll()) {
         if (TraceTraps) {
-          tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", pc);
+          tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc));
         }
         stub = SharedRuntime::get_poll_stub(pc);
       }
@@ -316,7 +316,7 @@
       else if (sig == SIGTRAP && TrapBasedICMissChecks &&
                nativeInstruction_at(pc)->is_sigtrap_ic_miss_check()) {
         if (TraceTraps) {
-          tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+          tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc));
         }
         stub = SharedRuntime::get_ic_miss_stub();
       }
@@ -325,7 +325,7 @@
       else if (sig == SIGTRAP && TrapBasedNullChecks &&
                nativeInstruction_at(pc)->is_sigtrap_null_check()) {
         if (TraceTraps) {
-          tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+          tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc));
         }
         stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
       }
@@ -335,7 +335,7 @@
                CodeCache::contains((void*) pc) &&
                !MacroAssembler::needs_explicit_null_check((intptr_t) info->si_addr)) {
         if (TraceTraps) {
-          tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc);
+          tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc));
         }
         stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
       }
@@ -345,7 +345,7 @@
       else if (sig == SIGTRAP && TrapBasedRangeChecks &&
                nativeInstruction_at(pc)->is_sigtrap_range_check()) {
         if (TraceTraps) {
-          tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+          tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc));
         }
         stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
       }
@@ -453,7 +453,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 // thread stack
 
-size_t os::Linux::min_stack_allowed = 768*K;
+size_t os::Linux::min_stack_allowed = 128*K;
 
 bool os::Linux::supports_variable_stack_size() { return true; }
 
@@ -572,7 +572,7 @@
   st->cr();
 
   intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
-  st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
+  st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp));
   print_hex_dump(st, (address)sp, (address)(sp + 128), sizeof(intptr_t));
   st->cr();
 
@@ -580,7 +580,7 @@
   // point to garbage if entry point in an nmethod is corrupted. Leave
   // this at the end, and hope for the best.
   address pc = os::Linux::ucontext_get_pc(uc);
-  st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
+  st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc));
   print_hex_dump(st, pc - 64, pc + 64, /*instrsize=*/4);
   st->cr();
 }
--- a/src/os_cpu/linux_ppc/vm/prefetch_linux_ppc.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/linux_ppc/vm/prefetch_linux_ppc.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -47,4 +47,4 @@
     );
 }
 
-#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_OJDKPPC_HPP
+#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_PPC_INLINE_HPP
--- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -118,7 +118,7 @@
       *ret_sp = os::Linux::ucontext_get_sp(uc);
     }
     if (ret_fp) {
-      *ret_fp = os::Linux::ucontext_get_fp(uc);
+      *ret_fp = (intptr_t*)NULL;
     }
   } else {
     // construct empty ExtendedPC for return value checking
@@ -136,18 +136,15 @@
 
 frame os::fetch_frame_from_context(void* ucVoid) {
   intptr_t* sp;
-  intptr_t* fp;
-  ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
-  return frame(sp, fp, epc.pc());
+  ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, NULL);
+  return frame(sp, frame::unpatchable, epc.pc());
 }
 
 frame os::get_sender_for_C_frame(frame* fr) {
-  return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
+  return frame(fr->sender_sp(), frame::unpatchable, fr->sender_pc());
 }
 
 frame os::current_frame() {
-  fprintf(stderr, "current_frame()");
-
   intptr_t* sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
   frame myframe(sp, frame::unpatchable,
                 CAST_FROM_FN_PTR(address, os::current_frame));
--- a/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -55,7 +55,7 @@
 
   if (detect_niagara()) {
     NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on Niagara");)
-    features = niagara1_m;
+    features = niagara1_m | T_family_m;
   }
 
   return features;
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -909,7 +909,7 @@
    */
   char* hint = (char*) (Linux::initial_thread_stack_bottom() -
                         ((StackYellowPages + StackRedPages + 1) * page_size));
-  char* codebuf = os::reserve_memory(page_size, hint);
+  char* codebuf = os::attempt_reserve_memory_at(page_size, hint);
   if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
     return; // No matter, we tried, best effort.
   }
--- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -24,6 +24,7 @@
 
 // no precompiled headers
 #include "asm/macroAssembler.hpp"
+#include "macroAssembler_sparc.hpp"
 #include "classfile/classLoader.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
@@ -483,6 +484,15 @@
       }
 #endif  // COMPILER2
 
+#ifdef GRAAL
+      else if (sig == SIGILL && info->si_trapno == 0x100 + ST_RESERVED_FOR_USER_0) {
+	printf("SIGILL 0x%x 0x%x\n", info->si_trapno, ST_RESERVED_FOR_USER_0);
+	uc->uc_mcontext.gregs[REG_PC] = (greg_t)pc+4;
+	uc->uc_mcontext.gregs[REG_nPC] = (greg_t)npc + 4;
+	return true;
+      }
+#endif // GRAAL
+
       else if (sig == SIGSEGV && info->si_code > 0 && !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) {
         // Determination of interpreter/vtable stub/compiled code null exception
         stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
--- a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,10 +26,216 @@
 #include "runtime/os.hpp"
 #include "vm_version_sparc.hpp"
 
-# include <sys/auxv.h>
-# include <sys/auxv_SPARC.h>
-# include <sys/systeminfo.h>
-# include <kstat.h>
+#include <sys/auxv.h>
+#include <sys/auxv_SPARC.h>
+#include <sys/systeminfo.h>
+#include <kstat.h>
+#include <picl.h>
+#include <dlfcn.h>
+#include <link.h>
+
+extern "C" static int PICL_get_l1_data_cache_line_size_helper(picl_nodehdl_t nodeh, void *result);
+extern "C" static int PICL_get_l2_cache_line_size_helper(picl_nodehdl_t nodeh, void *result);
+
+// Functions from the library we need (signatures should match those in picl.h)
+extern "C" {
+  typedef int (*picl_initialize_func_t)(void);
+  typedef int (*picl_shutdown_func_t)(void);
+  typedef int (*picl_get_root_func_t)(picl_nodehdl_t *nodehandle);
+  typedef int (*picl_walk_tree_by_class_func_t)(picl_nodehdl_t rooth,
+      const char *classname, void *c_args,
+      int (*callback_fn)(picl_nodehdl_t hdl, void *args));
+  typedef int (*picl_get_prop_by_name_func_t)(picl_nodehdl_t nodeh, const char *nm,
+      picl_prophdl_t *ph);
+  typedef int (*picl_get_propval_func_t)(picl_prophdl_t proph, void *valbuf, size_t sz);
+  typedef int (*picl_get_propinfo_func_t)(picl_prophdl_t proph, picl_propinfo_t *pi);
+}
+
+class PICL {
+  // Pointers to functions in the library
+  picl_initialize_func_t _picl_initialize;
+  picl_shutdown_func_t _picl_shutdown;
+  picl_get_root_func_t _picl_get_root;
+  picl_walk_tree_by_class_func_t _picl_walk_tree_by_class;
+  picl_get_prop_by_name_func_t _picl_get_prop_by_name;
+  picl_get_propval_func_t _picl_get_propval;
+  picl_get_propinfo_func_t _picl_get_propinfo;
+  // Handle to the library that is returned by dlopen
+  void *_dl_handle;
+
+  bool open_library();
+  void close_library();
+
+  template<typename FuncType> bool bind(FuncType& func, const char* name);
+  bool bind_library_functions();
+
+  // Get a value of the integer property. The value in the tree can be either 32 or 64 bit
+  // depending on the platform. The result is converted to int.
+  int get_int_property(picl_nodehdl_t nodeh, const char* name, int* result) {
+    picl_propinfo_t pinfo;
+    picl_prophdl_t proph;
+    if (_picl_get_prop_by_name(nodeh, name, &proph) != PICL_SUCCESS ||
+        _picl_get_propinfo(proph, &pinfo) != PICL_SUCCESS) {
+      return PICL_FAILURE;
+    }
+
+    if (pinfo.type != PICL_PTYPE_INT && pinfo.type != PICL_PTYPE_UNSIGNED_INT) {
+      assert(false, "Invalid property type");
+      return PICL_FAILURE;
+    }
+    if (pinfo.size == sizeof(int64_t)) {
+      int64_t val;
+      if (_picl_get_propval(proph, &val, sizeof(int64_t)) != PICL_SUCCESS) {
+        return PICL_FAILURE;
+      }
+      *result = static_cast<int>(val);
+    } else if (pinfo.size == sizeof(int32_t)) {
+      int32_t val;
+      if (_picl_get_propval(proph, &val, sizeof(int32_t)) != PICL_SUCCESS) {
+        return PICL_FAILURE;
+      }
+      *result = static_cast<int>(val);
+    } else {
+      assert(false, "Unexpected integer property size");
+      return PICL_FAILURE;
+    }
+    return PICL_SUCCESS;
+  }
+
+  // Visitor and a state machine that visits integer properties and verifies that the
+  // values are the same. Stores the unique value observed.
+  class UniqueValueVisitor {
+    PICL *_picl;
+    enum {
+      INITIAL,        // Start state, no assignments happened
+      ASSIGNED,       // Assigned a value
+      INCONSISTENT    // Inconsistent value seen
+    } _state;
+    int _value;
+  public:
+    UniqueValueVisitor(PICL* picl) : _picl(picl), _state(INITIAL) { }
+    int value() {
+      assert(_state == ASSIGNED, "Precondition");
+      return _value;
+    }
+    void set_value(int value) {
+      assert(_state == INITIAL, "Precondition");
+      _value = value;
+      _state = ASSIGNED;
+    }
+    bool is_initial()       { return _state == INITIAL;      }
+    bool is_assigned()      { return _state == ASSIGNED;     }
+    bool is_inconsistent()  { return _state == INCONSISTENT; }
+    void set_inconsistent() { _state = INCONSISTENT;         }
+
+    static int visit(picl_nodehdl_t nodeh, const char* name, void *arg) {
+      UniqueValueVisitor *state = static_cast<UniqueValueVisitor*>(arg);
+      PICL* picl = state->_picl;
+      assert(!state->is_inconsistent(), "Precondition");
+      int curr;
+      if (picl->get_int_property(nodeh, name, &curr) == PICL_SUCCESS) {
+        if (!state->is_assigned()) { // first iteration
+          state->set_value(curr);
+        } else if (curr != state->value()) { // following iterations
+          state->set_inconsistent();
+        }
+      }
+      if (state->is_inconsistent()) {
+        return PICL_WALK_TERMINATE;
+      }
+      return PICL_WALK_CONTINUE;
+    }
+  };
+
+  int _L1_data_cache_line_size;
+  int _L2_cache_line_size;
+public:
+  static int get_l1_data_cache_line_size(picl_nodehdl_t nodeh, void *state) {
+    return UniqueValueVisitor::visit(nodeh, "l1-dcache-line-size", state);
+  }
+  static int get_l2_cache_line_size(picl_nodehdl_t nodeh, void *state) {
+    return UniqueValueVisitor::visit(nodeh, "l2-cache-line-size", state);
+  }
+
+  PICL() : _L1_data_cache_line_size(0), _L2_cache_line_size(0), _dl_handle(NULL) {
+    if (!open_library()) {
+      return;
+    }
+    if (_picl_initialize() == PICL_SUCCESS) {
+      picl_nodehdl_t rooth;
+      if (_picl_get_root(&rooth) == PICL_SUCCESS) {
+        UniqueValueVisitor L1_state(this);
+        // Visit all "cpu" class instances
+        _picl_walk_tree_by_class(rooth, "cpu", &L1_state, PICL_get_l1_data_cache_line_size_helper);
+        if (L1_state.is_initial()) { // Still initial, iteration found no values
+          // Try walk all "core" class instances, it might be a Fujitsu machine
+          _picl_walk_tree_by_class(rooth, "core", &L1_state, PICL_get_l1_data_cache_line_size_helper);
+        }
+        if (L1_state.is_assigned()) { // Is there a value?
+          _L1_data_cache_line_size = L1_state.value();
+        }
+
+        UniqueValueVisitor L2_state(this);
+        _picl_walk_tree_by_class(rooth, "cpu", &L2_state, PICL_get_l2_cache_line_size_helper);
+        if (L2_state.is_initial()) {
+          _picl_walk_tree_by_class(rooth, "core", &L2_state, PICL_get_l2_cache_line_size_helper);
+        }
+        if (L2_state.is_assigned()) {
+          _L2_cache_line_size = L2_state.value();
+        }
+      }
+      _picl_shutdown();
+    }
+    close_library();
+  }
+
+  unsigned int L1_data_cache_line_size() const { return _L1_data_cache_line_size; }
+  unsigned int L2_cache_line_size() const      { return _L2_cache_line_size;      }
+};
+
+extern "C" static int PICL_get_l1_data_cache_line_size_helper(picl_nodehdl_t nodeh, void *result) {
+  return PICL::get_l1_data_cache_line_size(nodeh, result);
+}
+extern "C" static int PICL_get_l2_cache_line_size_helper(picl_nodehdl_t nodeh, void *result) {
+  return PICL::get_l2_cache_line_size(nodeh, result);
+}
+
+template<typename FuncType>
+bool PICL::bind(FuncType& func, const char* name) {
+  func = reinterpret_cast<FuncType>(dlsym(_dl_handle, name));
+  return func != NULL;
+}
+
+bool PICL::bind_library_functions() {
+  assert(_dl_handle != NULL, "library should be open");
+  return bind(_picl_initialize,         "picl_initialize"        ) &&
+         bind(_picl_shutdown,           "picl_shutdown"          ) &&
+         bind(_picl_get_root,           "picl_get_root"          ) &&
+         bind(_picl_walk_tree_by_class, "picl_walk_tree_by_class") &&
+         bind(_picl_get_prop_by_name,   "picl_get_prop_by_name"  ) &&
+         bind(_picl_get_propval,        "picl_get_propval"       ) &&
+         bind(_picl_get_propinfo,       "picl_get_propinfo"      );
+}
+
+bool PICL::open_library() {
+  _dl_handle = dlopen("libpicl.so.1", RTLD_LAZY);
+  if (_dl_handle == NULL) {
+    warning("PICL (libpicl.so.1) is missing. Performance will not be optimal.");
+    return false;
+  }
+  if (!bind_library_functions()) {
+    assert(false, "unexpected PICL API change");
+    close_library();
+    return false;
+  }
+  return true;
+}
+
+void PICL::close_library() {
+  assert(_dl_handle != NULL, "library should be open");
+  dlclose(_dl_handle);
+  _dl_handle = NULL;
+}
 
 // We need to keep these here as long as we have to build on Solaris
 // versions before 10.
@@ -137,6 +343,21 @@
 #endif
     if (av & AV_SPARC_AES)       features |= aes_instructions_m;
 
+#ifndef AV_SPARC_SHA1
+#define AV_SPARC_SHA1   0x00400000  /* sha1 instruction supported */
+#endif
+    if (av & AV_SPARC_SHA1)         features |= sha1_instruction_m;
+
+#ifndef AV_SPARC_SHA256
+#define AV_SPARC_SHA256 0x00800000  /* sha256 instruction supported */
+#endif
+    if (av & AV_SPARC_SHA256)       features |= sha256_instruction_m;
+
+#ifndef AV_SPARC_SHA512
+#define AV_SPARC_SHA512 0x01000000  /* sha512 instruction supported */
+#endif
+    if (av & AV_SPARC_SHA512)       features |= sha512_instruction_m;
+
   } else {
     // getisax(2) failed, use the old legacy code.
 #ifndef PRODUCT
@@ -248,5 +469,9 @@
     kstat_close(kc);
   }
 
+  // Figure out cache line sizes using PICL
+  PICL picl;
+  _L2_cache_line_size      = picl.L2_cache_line_size();
+
   return features;
 }
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -635,7 +635,11 @@
 #ifndef PRODUCT
 void os::verify_stack_alignment() {
 #ifdef AMD64
-  assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
+  // The current_stack_pointer() calls generated get_previous_sp stub routine.
+  // Only enable the assert after the routine becomes available.
+  if (StubRoutines::code1() != NULL) {
+    assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
+  }
 #endif
 }
 #endif
--- a/src/share/tools/IdealGraphVisualizer/Graal/src/com/sun/hotspot/igv/graal/filters/color.filter	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/tools/IdealGraphVisualizer/Graal/src/com/sun/hotspot/igv/graal/filters/color.filter	Fri Apr 10 16:58:26 2015 -0700
@@ -6,6 +6,8 @@
 var lightBlue = new java.awt.Color(200, 200, 250);
 var gray = new java.awt.Color(220, 220, 220);
 var violet = new java.awt.Color(201, 148, 199);
+var black = new java.awt.Color(0, 0, 0);
+
 colorize("category", "controlSink", red);
 colorize("category", "controlSplit", red);
 colorize("category", "merge", red);
@@ -20,6 +22,7 @@
 colorize("class", "ParameterNode", gray);
 colorize("class", "ConstantNode", gray);
 colorize("class", "GuardNode", violet);
+colorize("class", "BlackholeNode", black);
 
 var f = new com.sun.hotspot.igv.graal.filters.GraalEdgeColorFilter();
 f.setUsageColor("Successor", red);
--- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Fri Apr 10 16:58:26 2015 -0700
@@ -453,9 +453,17 @@
                 } else if (scopes.peek().getCalls().size() > 2 && m == scopes.peek().last(-2).getMethod()) {
                     scopes.push(scopes.peek().last(-2));
                 } else {
-                    System.out.println(site.getMethod());
-                    System.out.println(m);
-                    throw new InternalError("call site and parse don't match");
+                    // C1 prints multiple method tags during inlining when it narrows method being inlinied.
+                    // Example:
+                    //   ...
+                    //   <method id="813" holder="694" name="toString" return="695" flags="1" bytes="36" iicount="1"/>
+                    //   <call method="813" instr="invokevirtual"/>
+                    //   <inline_success reason="receiver is statically known"/>
+                    //   <method id="814" holder="792" name="toString" return="695" flags="1" bytes="5" iicount="3"/>
+                    //   <parse method="814">
+                    //   ...
+                    site.setMethod(m);
+                    scopes.push(site);
                 }
             }
         } else if (qname.equals("parse_done")) {
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Fri Apr 10 16:58:26 2015 -0700
@@ -397,7 +397,7 @@
                 "/export:JVM_GetThreadStateNames "+
                 "/export:JVM_GetThreadStateValues "+
                 "/export:JVM_InitAgentProperties");
-        addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib");
+        addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib;version.lib");
         addAttr(rv, "OutputFile", outDll);
         addAttr(rv, "SuppressStartupBanner", "true");
         addAttr(rv, "ModuleDefinitionFile", outDir+Util.sep+"vm.def");
--- a/src/share/vm/asm/codeBuffer.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/asm/codeBuffer.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -133,6 +133,10 @@
   // free any overflow storage
   delete _overflow_arena;
 
+  // Claim is that stack allocation ensures resources are cleaned up.
+  // This is resource clean up, let's hope that all were properly copied out.
+  free_strings();
+
 #ifdef ASSERT
   // Save allocation type to execute assert in ~ResourceObj()
   // which is called after this destructor.
@@ -268,7 +272,7 @@
 
 GrowableArray<int>* CodeBuffer::create_patch_overflow() {
   if (_overflow_arena == NULL) {
-    _overflow_arena = new (mtCode) Arena();
+    _overflow_arena = new (mtCode) Arena(mtCode);
   }
   return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
 }
@@ -704,7 +708,7 @@
   relocate_code_to(&dest);
 
   // transfer strings and comments from buffer to blob
-  dest_blob->set_strings(_strings);
+  dest_blob->set_strings(_code_strings);
 
   // Done moving code bytes; were they the right size?
   assert(round_to(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity");
@@ -1003,11 +1007,11 @@
 
 
 void CodeBuffer::block_comment(intptr_t offset, const char * comment) {
-  _strings.add_comment(offset, comment);
+  _code_strings.add_comment(offset, comment);
 }
 
 const char* CodeBuffer::code_string(const char* str) {
-  return _strings.add_string(str);
+  return _code_strings.add_string(str);
 }
 
 class CodeString: public CHeapObj<mtCode> {
@@ -1073,6 +1077,7 @@
 }
 
 void CodeStrings::add_comment(intptr_t offset, const char * comment) {
+  check_valid();
   CodeString* c      = new CodeString(comment, offset);
   CodeString* inspos = (_strings == NULL) ? NULL : find_last(offset);
 
@@ -1088,11 +1093,32 @@
 }
 
 void CodeStrings::assign(CodeStrings& other) {
+  other.check_valid();
+  // Cannot do following because CodeStrings constructor is not alway run!
+  assert(is_null(), "Cannot assign onto non-empty CodeStrings");
   _strings = other._strings;
+  other.set_null_and_invalidate();
+}
+
+// Deep copy of CodeStrings for consistent memory management.
+// Only used for actual disassembly so this is cheaper than reference counting
+// for the "normal" fastdebug case.
+void CodeStrings::copy(CodeStrings& other) {
+  other.check_valid();
+  check_valid();
+  assert(is_null(), "Cannot copy onto non-empty CodeStrings");
+  CodeString* n = other._strings;
+  CodeString** ps = &_strings;
+  while (n != NULL) {
+    *ps = new CodeString(n->string(),n->offset());
+    ps = &((*ps)->_next);
+    n = n->next();
+  }
 }
 
 void CodeStrings::print_block_comment(outputStream* stream, intptr_t offset) const {
-  if (_strings != NULL) {
+    check_valid();
+    if (_strings != NULL) {
     CodeString* c = find(offset);
     while (c && c->offset() == offset) {
       stream->bol();
@@ -1104,7 +1130,7 @@
   }
 }
 
-
+// Also sets isNull()
 void CodeStrings::free() {
   CodeString* n = _strings;
   while (n) {
@@ -1114,10 +1140,11 @@
     delete n;
     n = p;
   }
-  _strings = NULL;
+  set_null_and_invalidate();
 }
 
 const char* CodeStrings::add_string(const char * string) {
+  check_valid();
   CodeString* s = new CodeString(string);
   s->set_next(_strings);
   _strings = s;
--- a/src/share/vm/asm/codeBuffer.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/asm/codeBuffer.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -27,6 +27,7 @@
 
 #include "code/oopRecorder.hpp"
 #include "code/relocInfo.hpp"
+#include "utilities/debug.hpp"
 
 class CodeStrings;
 class PhaseCFG;
@@ -245,15 +246,39 @@
 private:
 #ifndef PRODUCT
   CodeString* _strings;
+#ifdef ASSERT
+  // Becomes true after copy-out, forbids further use.
+  bool _defunct; // Zero bit pattern is "valid", see memset call in decode_env::decode_env
+#endif
 #endif
 
   CodeString* find(intptr_t offset) const;
   CodeString* find_last(intptr_t offset) const;
 
+  void set_null_and_invalidate() {
+#ifndef PRODUCT
+    _strings = NULL;
+#ifdef ASSERT
+    _defunct = true;
+#endif
+#endif
+  }
+
 public:
   CodeStrings() {
 #ifndef PRODUCT
     _strings = NULL;
+#ifdef ASSERT
+    _defunct = false;
+#endif
+#endif
+  }
+
+  bool is_null() {
+#ifdef ASSERT
+    return _strings == NULL;
+#else
+    return true;
 #endif
   }
 
@@ -261,8 +286,17 @@
 
   void add_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
   void print_block_comment(outputStream* stream, intptr_t offset) const PRODUCT_RETURN;
+  // MOVE strings from other to this; invalidate other.
   void assign(CodeStrings& other)  PRODUCT_RETURN;
+  // COPY strings from other to this; leave other valid.
+  void copy(CodeStrings& other)  PRODUCT_RETURN;
   void free() PRODUCT_RETURN;
+  // Guarantee that _strings are used at most once; assign invalidates a buffer.
+  inline void check_valid() const {
+#ifdef ASSERT
+    assert(!_defunct, "Use of invalid CodeStrings");
+#endif
+  }
 };
 
 // A CodeBuffer describes a memory space into which assembly
@@ -330,7 +364,7 @@
   csize_t      _total_size;     // size in bytes of combined memory buffer
 
   OopRecorder* _oop_recorder;
-  CodeStrings  _strings;
+  CodeStrings  _code_strings;
   OopRecorder  _default_oop_recorder;  // override with initialize_oop_recorder
   Arena*       _overflow_arena;
 
@@ -531,7 +565,13 @@
   void initialize_oop_recorder(OopRecorder* r);
 
   OopRecorder* oop_recorder() const   { return _oop_recorder; }
-  CodeStrings& strings()              { return _strings; }
+  CodeStrings& strings()              { return _code_strings; }
+
+  void free_strings() {
+    if (!_code_strings.is_null()) {
+      _code_strings.free(); // sets _strings Null as a side-effect.
+    }
+  }
 
   // Code generation
   void relocate(address at, RelocationHolder const& rspec, int format = 0) {
--- a/src/share/vm/asm/register.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/asm/register.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -275,4 +275,101 @@
   );
 }
 
+inline void assert_different_registers(
+  AbstractRegister a,
+  AbstractRegister b,
+  AbstractRegister c,
+  AbstractRegister d,
+  AbstractRegister e,
+  AbstractRegister f,
+  AbstractRegister g,
+  AbstractRegister h,
+  AbstractRegister i,
+  AbstractRegister j
+) {
+  assert(
+    a != b && a != c && a != d && a != e && a != f && a != g && a != h && a != i && a != j
+           && b != c && b != d && b != e && b != f && b != g && b != h && b != i && b != j
+                     && c != d && c != e && c != f && c != g && c != h && c != i && c != j
+                               && d != e && d != f && d != g && d != h && d != i && d != j
+                                         && e != f && e != g && e != h && e != i && e != j
+                                                   && f != g && f != h && f != i && f != j
+                                                             && g != h && g != i && g != j
+                                                                       && h != i && h != j
+                                                                                 && i != j,
+    err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
+                ", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
+                ", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT
+                ", i=" INTPTR_FORMAT ", j=" INTPTR_FORMAT "",
+                p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i), p2i(j))
+  );
+}
+
+inline void assert_different_registers(
+  AbstractRegister a,
+  AbstractRegister b,
+  AbstractRegister c,
+  AbstractRegister d,
+  AbstractRegister e,
+  AbstractRegister f,
+  AbstractRegister g,
+  AbstractRegister h,
+  AbstractRegister i,
+  AbstractRegister j,
+  AbstractRegister k
+) {
+  assert(
+    a != b && a != c && a != d && a != e && a != f && a != g && a != h && a != i && a != j && a !=k
+           && b != c && b != d && b != e && b != f && b != g && b != h && b != i && b != j && b !=k
+                     && c != d && c != e && c != f && c != g && c != h && c != i && c != j && c !=k
+                               && d != e && d != f && d != g && d != h && d != i && d != j && d !=k
+                                         && e != f && e != g && e != h && e != i && e != j && e !=k
+                                                   && f != g && f != h && f != i && f != j && f !=k
+                                                             && g != h && g != i && g != j && g !=k
+                                                                       && h != i && h != j && h !=k
+                                                                                 && i != j && i !=k
+                                                                                           && j !=k,
+    err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
+                ", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
+                ", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT
+                ", i=" INTPTR_FORMAT ", j=" INTPTR_FORMAT ", k=" INTPTR_FORMAT "",
+                p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i), p2i(j), p2i(k))
+  );
+}
+
+inline void assert_different_registers(
+  AbstractRegister a,
+  AbstractRegister b,
+  AbstractRegister c,
+  AbstractRegister d,
+  AbstractRegister e,
+  AbstractRegister f,
+  AbstractRegister g,
+  AbstractRegister h,
+  AbstractRegister i,
+  AbstractRegister j,
+  AbstractRegister k,
+  AbstractRegister l
+) {
+  assert(
+    a != b && a != c && a != d && a != e && a != f && a != g && a != h && a != i && a != j && a !=k && a !=l
+           && b != c && b != d && b != e && b != f && b != g && b != h && b != i && b != j && b !=k && b !=l
+                     && c != d && c != e && c != f && c != g && c != h && c != i && c != j && c !=k && c !=l
+                               && d != e && d != f && d != g && d != h && d != i && d != j && d !=k && d !=l
+                                         && e != f && e != g && e != h && e != i && e != j && e !=k && e !=l
+                                                   && f != g && f != h && f != i && f != j && f !=k && f !=l
+                                                             && g != h && g != i && g != j && g !=k && g !=l
+                                                                       && h != i && h != j && h !=k && h !=l
+                                                                                 && i != j && i !=k && i !=l
+                                                                                           && j !=k && j !=l
+                                                                                                    && k !=l,
+    err_msg_res("registers must be different: a=" INTPTR_FORMAT ", b=" INTPTR_FORMAT
+                ", c=" INTPTR_FORMAT ", d=" INTPTR_FORMAT ", e=" INTPTR_FORMAT
+                ", f=" INTPTR_FORMAT ", g=" INTPTR_FORMAT ", h=" INTPTR_FORMAT
+                ", i=" INTPTR_FORMAT ", j=" INTPTR_FORMAT ", k=" INTPTR_FORMAT
+                ", l=" INTPTR_FORMAT "",
+                p2i(a), p2i(b), p2i(c), p2i(d), p2i(e), p2i(f), p2i(g), p2i(h), p2i(i), p2i(j), p2i(k), p2i(l))
+  );
+}
+
 #endif // SHARE_VM_ASM_REGISTER_HPP
--- a/src/share/vm/c1/c1_Canonicalizer.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/c1/c1_Canonicalizer.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -327,7 +327,7 @@
   if (t2->is_constant()) {
     switch (t2->tag()) {
       case intTag   : if (t2->as_IntConstant()->value() == 0)  set_canonical(x->x()); return;
-      case longTag  : if (t2->as_IntConstant()->value() == 0)  set_canonical(x->x()); return;
+      case longTag  : if (t2->as_LongConstant()->value() == (jlong)0)  set_canonical(x->x()); return;
       default       : ShouldNotReachHere();
     }
   }
@@ -808,28 +808,41 @@
 
 static bool match_index_and_scale(Instruction*  instr,
                                   Instruction** index,
-                                  int*          log2_scale,
-                                  Instruction** instr_to_unpin) {
-  *instr_to_unpin = NULL;
-
-  // Skip conversion ops
+                                  int*          log2_scale) {
+  // Skip conversion ops. This works only on 32bit because of the implicit l2i that the
+  // unsafe performs.
+#ifndef _LP64
   Convert* convert = instr->as_Convert();
-  if (convert != NULL) {
+  if (convert != NULL && convert->op() == Bytecodes::_i2l) {
+    assert(convert->value()->type() == intType, "invalid input type");
     instr = convert->value();
   }
+#endif
 
   ShiftOp* shift = instr->as_ShiftOp();
   if (shift != NULL) {
-    if (shift->is_pinned()) {
-      *instr_to_unpin = shift;
+    if (shift->op() == Bytecodes::_lshl) {
+      assert(shift->x()->type() == longType, "invalid input type");
+    } else {
+#ifndef _LP64
+      if (shift->op() == Bytecodes::_ishl) {
+        assert(shift->x()->type() == intType, "invalid input type");
+      } else {
+        return false;
+      }
+#else
+      return false;
+#endif
     }
+
+
     // Constant shift value?
     Constant* con = shift->y()->as_Constant();
     if (con == NULL) return false;
     // Well-known type and value?
     IntConstant* val = con->type()->as_IntConstant();
-    if (val == NULL) return false;
-    if (shift->x()->type() != intType) return false;
+    assert(val != NULL, "Should be an int constant");
+
     *index = shift->x();
     int tmp_scale = val->value();
     if (tmp_scale >= 0 && tmp_scale < 4) {
@@ -842,31 +855,42 @@
 
   ArithmeticOp* arith = instr->as_ArithmeticOp();
   if (arith != NULL) {
-    if (arith->is_pinned()) {
-      *instr_to_unpin = arith;
+    // See if either arg is a known constant
+    Constant* con = arith->x()->as_Constant();
+    if (con != NULL) {
+      *index = arith->y();
+    } else {
+      con = arith->y()->as_Constant();
+      if (con == NULL) return false;
+      *index = arith->x();
     }
+    long const_value;
     // Check for integer multiply
-    if (arith->op() == Bytecodes::_imul) {
-      // See if either arg is a known constant
-      Constant* con = arith->x()->as_Constant();
-      if (con != NULL) {
-        *index = arith->y();
+    if (arith->op() == Bytecodes::_lmul) {
+      assert((*index)->type() == longType, "invalid input type");
+      LongConstant* val = con->type()->as_LongConstant();
+      assert(val != NULL, "expecting a long constant");
+      const_value = val->value();
+    } else {
+#ifndef _LP64
+      if (arith->op() == Bytecodes::_imul) {
+        assert((*index)->type() == intType, "invalid input type");
+        IntConstant* val = con->type()->as_IntConstant();
+        assert(val != NULL, "expecting an int constant");
+        const_value = val->value();
       } else {
-        con = arith->y()->as_Constant();
-        if (con == NULL) return false;
-        *index = arith->x();
+        return false;
       }
-      if ((*index)->type() != intType) return false;
-      // Well-known type and value?
-      IntConstant* val = con->type()->as_IntConstant();
-      if (val == NULL) return false;
-      switch (val->value()) {
-      case 1: *log2_scale = 0; return true;
-      case 2: *log2_scale = 1; return true;
-      case 4: *log2_scale = 2; return true;
-      case 8: *log2_scale = 3; return true;
-      default:            return false;
-      }
+#else
+      return false;
+#endif
+    }
+    switch (const_value) {
+    case 1: *log2_scale = 0; return true;
+    case 2: *log2_scale = 1; return true;
+    case 4: *log2_scale = 2; return true;
+    case 8: *log2_scale = 3; return true;
+    default:            return false;
     }
   }
 
@@ -879,29 +903,37 @@
                   Instruction** base,
                   Instruction** index,
                   int*          log2_scale) {
-  Instruction* instr_to_unpin = NULL;
   ArithmeticOp* root = x->base()->as_ArithmeticOp();
   if (root == NULL) return false;
   // Limit ourselves to addition for now
   if (root->op() != Bytecodes::_ladd) return false;
+
+  bool match_found = false;
   // Try to find shift or scale op
-  if (match_index_and_scale(root->y(), index, log2_scale, &instr_to_unpin)) {
+  if (match_index_and_scale(root->y(), index, log2_scale)) {
     *base = root->x();
-  } else if (match_index_and_scale(root->x(), index, log2_scale, &instr_to_unpin)) {
+    match_found = true;
+  } else if (match_index_and_scale(root->x(), index, log2_scale)) {
     *base = root->y();
-  } else if (root->y()->as_Convert() != NULL) {
+    match_found = true;
+  } else if (NOT_LP64(root->y()->as_Convert() != NULL) LP64_ONLY(false)) {
+    // Skipping i2l works only on 32bit because of the implicit l2i that the unsafe performs.
+    // 64bit needs a real sign-extending conversion.
     Convert* convert = root->y()->as_Convert();
-    if (convert->op() == Bytecodes::_i2l && convert->value()->type() == intType) {
+    if (convert->op() == Bytecodes::_i2l) {
+      assert(convert->value()->type() == intType, "should be an int");
       // pick base and index, setting scale at 1
       *base  = root->x();
       *index = convert->value();
       *log2_scale = 0;
-    } else {
-      return false;
+      match_found = true;
     }
-  } else {
-    // doesn't match any expected sequences
-    return false;
+  }
+  // The default solution
+  if (!match_found) {
+    *base = root->x();
+    *index = root->y();
+    *log2_scale = 0;
   }
 
   // If the value is pinned then it will be always be computed so
--- a/src/share/vm/c1/c1_Compiler.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/c1/c1_Compiler.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -48,7 +48,7 @@
 
 void Compiler::init_c1_runtime() {
   BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
-  Arena* arena = new (mtCompiler) Arena();
+  Arena* arena = new (mtCompiler) Arena(mtCompiler);
   Runtime1::initialize(buffer_blob);
   FrameMap::initialize();
   // initialize data structures
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -2061,7 +2061,7 @@
   bool will_link;
   ciKlass* klass = stream()->get_klass(will_link);
   assert(klass->is_instance_klass(), "must be an instance klass");
-  NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before);
+  NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass());
   _memory->new_instance(new_instance);
   apush(append_split(new_instance));
 }
@@ -3962,10 +3962,15 @@
   // Clear out bytecode stream
   scope_data()->set_stream(NULL);
 
+  CompileLog* log = compilation()->log();
+  if (log != NULL) log->head("parse method='%d'", log->identify(callee));
+
   // Ready to resume parsing in callee (either in the same block we
   // were in before or in the callee's start block)
   iterate_all_blocks(callee_start_block == NULL);
 
+  if (log != NULL) log->done("parse");
+
   // If we bailed out during parsing, return immediately (this is bad news)
   if (bailed_out())
       return false;
--- a/src/share/vm/c1/c1_Instruction.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/c1/c1_Instruction.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1291,16 +1291,18 @@
 LEAF(NewInstance, StateSplit)
  private:
   ciInstanceKlass* _klass;
+  bool _is_unresolved;
 
  public:
   // creation
-  NewInstance(ciInstanceKlass* klass, ValueStack* state_before)
+  NewInstance(ciInstanceKlass* klass, ValueStack* state_before, bool is_unresolved)
   : StateSplit(instanceType, state_before)
-  , _klass(klass)
+  , _klass(klass), _is_unresolved(is_unresolved)
   {}
 
   // accessors
   ciInstanceKlass* klass() const                 { return _klass; }
+  bool is_unresolved() const                     { return _is_unresolved; }
 
   virtual bool needs_exception_state() const     { return false; }
 
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -466,8 +466,11 @@
 }
 
 
-void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info) {
-  if (!obj->is_loaded() || PatchALot) {
+void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
+  /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
+   * is active and the class hasn't yet been resolved we need to emit a patch that resolves
+   * the class. */
+  if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
     assert(info != NULL, "info must be set if class is not loaded");
     __ klass2reg_patch(NULL, r, info);
   } else {
@@ -660,9 +663,18 @@
   __ unlock_object(hdr, object, lock, scratch, slow_path);
 }
 
-
-void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
-  klass2reg_with_patching(klass_reg, klass, info);
+#ifndef PRODUCT
+void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
+  if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
+    tty->print_cr("   ###class not loaded at new bci %d", new_instance->printable_bci());
+  } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
+    tty->print_cr("   ###class not resolved at new bci %d", new_instance->printable_bci());
+  }
+}
+#endif
+
+void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
+  klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
   // If klass is not loaded we do not know if the klass has finalizers:
   if (UseFastNewInstance && klass->is_loaded()
       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
@@ -2030,6 +2042,8 @@
   }
 }
 
+// Here UnsafeGetRaw may have x->base() and x->index() be int or long
+// on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
   LIRItem base(x->base(), this);
   LIRItem idx(this);
@@ -2044,50 +2058,73 @@
 
   int   log2_scale = 0;
   if (x->has_index()) {
-    assert(x->index()->type()->tag() == intTag, "should not find non-int index");
     log2_scale = x->log2_scale();
   }
 
   assert(!x->has_index() || idx.value() == x->index(), "should match");
 
   LIR_Opr base_op = base.result();
+  LIR_Opr index_op = idx.result();
 #ifndef _LP64
-  if (x->base()->type()->tag() == longTag) {
+  if (base_op->type() == T_LONG) {
     base_op = new_register(T_INT);
     __ convert(Bytecodes::_l2i, base.result(), base_op);
-  } else {
-    assert(x->base()->type()->tag() == intTag, "must be");
+  }
+  if (x->has_index()) {
+    if (index_op->type() == T_LONG) {
+      LIR_Opr long_index_op = index_op;
+      if (index_op->is_constant()) {
+        long_index_op = new_register(T_LONG);
+        __ move(index_op, long_index_op);
+      }
+      index_op = new_register(T_INT);
+      __ convert(Bytecodes::_l2i, long_index_op, index_op);
+    } else {
+      assert(x->index()->type()->tag() == intTag, "must be");
+    }
   }
+  // At this point base and index should be all ints.
+  assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
+  assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
+#else
+  if (x->has_index()) {
+    if (index_op->type() == T_INT) {
+      if (!index_op->is_constant()) {
+        index_op = new_register(T_LONG);
+        __ convert(Bytecodes::_i2l, idx.result(), index_op);
+      }
+    } else {
+      assert(index_op->type() == T_LONG, "must be");
+      if (index_op->is_constant()) {
+        index_op = new_register(T_LONG);
+        __ move(idx.result(), index_op);
+      }
+    }
+  }
+  // At this point base is a long non-constant
+  // Index is a long register or a int constant.
+  // We allow the constant to stay an int because that would allow us a more compact encoding by
+  // embedding an immediate offset in the address expression. If we have a long constant, we have to
+  // move it into a register first.
+  assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
+  assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
+                            (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
 #endif
 
   BasicType dst_type = x->basic_type();
-  LIR_Opr index_op = idx.result();
 
   LIR_Address* addr;
   if (index_op->is_constant()) {
     assert(log2_scale == 0, "must not have a scale");
+    assert(index_op->type() == T_INT, "only int constants supported");
     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
   } else {
 #ifdef X86
-#ifdef _LP64
-    if (!index_op->is_illegal() && index_op->type() == T_INT) {
-      LIR_Opr tmp = new_pointer_register();
-      __ convert(Bytecodes::_i2l, index_op, tmp);
-      index_op = tmp;
-    }
-#endif
     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
 #elif defined(ARM)
     addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
 #else
     if (index_op->is_illegal() || log2_scale == 0) {
-#ifdef _LP64
-      if (!index_op->is_illegal() && index_op->type() == T_INT) {
-        LIR_Opr tmp = new_pointer_register();
-        __ convert(Bytecodes::_i2l, index_op, tmp);
-        index_op = tmp;
-      }
-#endif
       addr = new LIR_Address(base_op, index_op, dst_type);
     } else {
       LIR_Opr tmp = new_pointer_register();
@@ -2114,7 +2151,6 @@
   BasicType type = x->basic_type();
 
   if (x->has_index()) {
-    assert(x->index()->type()->tag() == intTag, "should not find non-int index");
     log2_scale = x->log2_scale();
   }
 
@@ -2137,38 +2173,39 @@
   set_no_result(x);
 
   LIR_Opr base_op = base.result();
+  LIR_Opr index_op = idx.result();
+
 #ifndef _LP64
-  if (x->base()->type()->tag() == longTag) {
+  if (base_op->type() == T_LONG) {
     base_op = new_register(T_INT);
     __ convert(Bytecodes::_l2i, base.result(), base_op);
-  } else {
-    assert(x->base()->type()->tag() == intTag, "must be");
+  }
+  if (x->has_index()) {
+    if (index_op->type() == T_LONG) {
+      index_op = new_register(T_INT);
+      __ convert(Bytecodes::_l2i, idx.result(), index_op);
+    }
   }
+  // At this point base and index should be all ints and not constants
+  assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
+  assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
+#else
+  if (x->has_index()) {
+    if (index_op->type() == T_INT) {
+      index_op = new_register(T_LONG);
+      __ convert(Bytecodes::_i2l, idx.result(), index_op);
+    }
+  }
+  // At this point base and index are long and non-constant
+  assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
+  assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
 #endif
 
-  LIR_Opr index_op = idx.result();
   if (log2_scale != 0) {
     // temporary fix (platform dependent code without shift on Intel would be better)
-    index_op = new_pointer_register();
-#ifdef _LP64
-    if(idx.result()->type() == T_INT) {
-      __ convert(Bytecodes::_i2l, idx.result(), index_op);
-    } else {
-#endif
-      // TODO: ARM also allows embedded shift in the address
-      __ move(idx.result(), index_op);
-#ifdef _LP64
-    }
-#endif
+    // TODO: ARM also allows embedded shift in the address
     __ shift_left(index_op, log2_scale, index_op);
   }
-#ifdef _LP64
-  else if(!index_op->is_illegal() && index_op->type() == T_INT) {
-    LIR_Opr tmp = new_pointer_register();
-    __ convert(Bytecodes::_i2l, index_op, tmp);
-    index_op = tmp;
-  }
-#endif
 
   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
   __ move(value.result(), addr);
--- a/src/share/vm/c1/c1_LIRGenerator.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/c1/c1_LIRGenerator.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -169,6 +169,8 @@
     return this;
   }
 
+  void print_if_not_loaded(const NewInstance* new_instance) PRODUCT_RETURN;
+
 #ifdef ASSERT
   LIR_List* lir(const char * file, int line) const {
     _lir->set_file_and_line(file, line);
@@ -307,7 +309,7 @@
 
   void store_stack_parameter (LIR_Opr opr, ByteSize offset_from_sp_in_bytes);
 
-  void klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info);
+  void klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve = false);
 
   // this loads the length and compares against the index
   void array_range_check          (LIR_Opr array, LIR_Opr index, CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info);
@@ -325,7 +327,7 @@
   void monitor_enter (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info);
   void monitor_exit  (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no);
 
-  void new_instance    (LIR_Opr  dst, ciInstanceKlass* klass, LIR_Opr  scratch1, LIR_Opr  scratch2, LIR_Opr  scratch3,  LIR_Opr scratch4, LIR_Opr  klass_reg, CodeEmitInfo* info);
+  void new_instance    (LIR_Opr  dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr  scratch1, LIR_Opr  scratch2, LIR_Opr  scratch3,  LIR_Opr scratch4, LIR_Opr  klass_reg, CodeEmitInfo* info);
 
   // machine dependent
   void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
--- a/src/share/vm/c1/c1_LinearScan.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/c1/c1_LinearScan.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1628,25 +1628,22 @@
   Interval* precolored_cpu_intervals, *not_precolored_cpu_intervals;
   Interval* precolored_fpu_intervals, *not_precolored_fpu_intervals;
 
-  create_unhandled_lists(&precolored_cpu_intervals, &not_precolored_cpu_intervals, is_precolored_cpu_interval, is_virtual_cpu_interval);
-  if (has_fpu_registers()) {
-    create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
-#ifdef ASSERT
-  } else {
-    // fpu register allocation is omitted because no virtual fpu registers are present
-    // just check this again...
-    create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
-    assert(not_precolored_fpu_intervals == Interval::end(), "missed an uncolored fpu interval");
-#endif
-  }
-
   // allocate cpu registers
+  create_unhandled_lists(&precolored_cpu_intervals, &not_precolored_cpu_intervals,
+                         is_precolored_cpu_interval, is_virtual_cpu_interval);
+
+  // allocate fpu registers
+  create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals,
+                         is_precolored_fpu_interval, is_virtual_fpu_interval);
+
+  // the fpu interval allocation cannot be moved down below with the fpu section as
+  // the cpu_lsw.walk() changes interval positions.
+
   LinearScanWalker cpu_lsw(this, precolored_cpu_intervals, not_precolored_cpu_intervals);
   cpu_lsw.walk();
   cpu_lsw.finish_allocation();
 
   if (has_fpu_registers()) {
-    // allocate fpu registers
     LinearScanWalker fpu_lsw(this, precolored_fpu_intervals, not_precolored_fpu_intervals);
     fpu_lsw.walk();
     fpu_lsw.finish_allocation();
--- a/src/share/vm/c1/c1_Runtime1.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -544,13 +544,18 @@
     // normal bytecode execution.
     thread->clear_exception_oop_and_pc();
 
+    Handle original_exception(thread, exception());
+
     continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
     // If an exception was thrown during exception dispatch, the exception oop may have changed
     thread->set_exception_oop(exception());
     thread->set_exception_pc(pc);
 
     // the exception cache is used only by non-implicit exceptions
-    if (continuation != NULL) {
+    // Update the exception cache only when there didn't happen
+    // another exception during the computation of the compiled
+    // exception handler.
+    if (continuation != NULL && original_exception() == exception()) {
       nm->add_handler_for_exception_and_pc(exception, pc, continuation);
     }
   }
@@ -1018,6 +1023,7 @@
               n_copy->set_data((intx) (load_klass()));
             } else {
               assert(mirror() != NULL, "klass not set");
+              // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
               n_copy->set_data(cast_from_oop<intx>(mirror()));
             }
 
--- a/src/share/vm/c1/c1_globals.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/c1/c1_globals.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -287,9 +287,6 @@
   develop(bool, InstallMethods, true,                                       \
           "Install methods at the end of successful compilations")          \
                                                                             \
-  product(intx, CompilationRepeat, 0,                                       \
-          "Number of times to recompile method before returning result")    \
-                                                                            \
   develop(intx, NMethodSizeLimit, (64*K)*wordSize,                          \
           "Maximum size of a compiled method.")                             \
                                                                             \
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -89,8 +89,8 @@
 public:
   ArgumentMap *_vars;
   ArgumentMap *_stack;
-  short _stack_height;
-  short _max_stack;
+  int _stack_height;
+  int _max_stack;
   bool _initialized;
   ArgumentMap empty_map;
 
--- a/src/share/vm/ci/ciEnv.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/ci/ciEnv.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -51,6 +51,7 @@
 #include "runtime/init.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "runtime/thread.inline.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/macros.hpp"
 #ifdef COMPILER1
@@ -85,7 +86,8 @@
 
 // ------------------------------------------------------------------
 // ciEnv::ciEnv
-ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
+ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter)
+  : _ciEnv_arena(mtCompiler) {
   VM_ENTRY_MARK;
 
   // Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
@@ -138,7 +140,7 @@
   _the_min_jint_string = NULL;
 }
 
-ciEnv::ciEnv(Arena* arena) {
+ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
   ASSERT_IN_VM;
 
   // Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
@@ -557,7 +559,12 @@
     oop obj = cpool->resolved_references()->obj_at(cache_index);
     if (obj != NULL) {
       ciObject* ciobj = get_object(obj);
-      return ciConstant(T_OBJECT, ciobj);
+      if (ciobj->is_array()) {
+        return ciConstant(T_ARRAY, ciobj);
+      } else {
+        assert(ciobj->is_instance(), "should be an instance");
+        return ciConstant(T_OBJECT, ciobj);
+      }
     }
     index = cpool->object_to_cp_index(cache_index);
   }
@@ -584,8 +591,12 @@
       }
     }
     ciObject* constant = get_object(string);
-    assert (constant->is_instance(), "must be an instance, or not? ");
-    return ciConstant(T_OBJECT, constant);
+    if (constant->is_array()) {
+      return ciConstant(T_ARRAY, constant);
+    } else {
+      assert (constant->is_instance(), "must be an instance, or not? ");
+      return ciConstant(T_OBJECT, constant);
+    }
   } else if (tag.is_klass() || tag.is_unresolved_klass()) {
     // 4881222: allow ldc to take a class type
     ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore_will_link, accessor);
@@ -1110,9 +1121,6 @@
 // ------------------------------------------------------------------
 // ciEnv::record_failure()
 void ciEnv::record_failure(const char* reason) {
-  if (log() != NULL) {
-    log()->elem("failure reason='%s'", reason);
-  }
   if (_failure_reason == NULL) {
     // Record the first failure reason.
     _failure_reason = reason;
--- a/src/share/vm/ci/ciEnv.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/ci/ciEnv.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -184,6 +184,10 @@
     }
   }
 
+  void ensure_metadata_alive(ciMetadata* m) {
+    _factory->ensure_metadata_alive(m);
+  }
+
   ciInstance* get_instance(oop o) {
     if (o == NULL) return NULL;
     return get_object(o)->as_instance();
--- a/src/share/vm/ci/ciKlass.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/ci/ciKlass.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -43,6 +43,7 @@
   friend class ciMethod;
   friend class ciMethodData;
   friend class ciObjArrayKlass;
+  friend class ciReceiverTypeData;
 
 private:
   ciSymbol* _name;
--- a/src/share/vm/ci/ciMethod.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/ci/ciMethod.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -68,7 +68,10 @@
 // ciMethod::ciMethod
 //
 // Loaded method.
-ciMethod::ciMethod(methodHandle h_m) : ciMetadata(h_m()) {
+ciMethod::ciMethod(methodHandle h_m, ciInstanceKlass* holder) :
+  ciMetadata(h_m()),
+  _holder(holder)
+{
   assert(h_m() != NULL, "no null method");
 
   // These fields are always filled in in loaded methods.
@@ -124,7 +127,6 @@
   // generating _signature may allow GC and therefore move m.
   // These fields are always filled in.
   _name = env->get_symbol(h_m()->name());
-  _holder = env->get_instance_klass(h_m()->method_holder());
   ciSymbol* sig_symbol = env->get_symbol(h_m()->signature());
   constantPoolHandle cpool = h_m()->constants();
   _signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
@@ -1106,6 +1108,22 @@
 }
 
 // ------------------------------------------------------------------
+// ciMethod::has_option_value
+//
+template<typename T>
+bool ciMethod::has_option_value(const char* option, T& value) {
+  check_is_loaded();
+  VM_ENTRY_MARK;
+  methodHandle mh(THREAD, get_Method());
+  return CompilerOracle::has_option_value(mh, option, value);
+}
+// Explicit instantiation for all OptionTypes supported.
+template bool ciMethod::has_option_value<intx>(const char* option, intx& value);
+template bool ciMethod::has_option_value<uintx>(const char* option, uintx& value);
+template bool ciMethod::has_option_value<bool>(const char* option, bool& value);
+template bool ciMethod::has_option_value<ccstr>(const char* option, ccstr& value);
+
+// ------------------------------------------------------------------
 // ciMethod::can_be_compiled
 //
 // Have previous compilations of this method succeeded?
--- a/src/share/vm/ci/ciMethod.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/ci/ciMethod.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -90,7 +90,7 @@
   BCEscapeAnalyzer*   _bcea;
 #endif
 
-  ciMethod(methodHandle h_m);
+  ciMethod(methodHandle h_m, ciInstanceKlass* holder);
   ciMethod(ciInstanceKlass* holder, ciSymbol* name, ciSymbol* signature, ciInstanceKlass* accessor);
 
   Method* get_Method() const {
@@ -264,6 +264,8 @@
   bool should_print_assembly();
   bool break_at_execute();
   bool has_option(const char *option);
+  template<typename T>
+  bool has_option_value(const char* option, T& value);
   bool can_be_compiled();
   bool can_be_osr_compiled(int entry_bci);
   void set_not_compilable(const char* reason = NULL);
--- a/src/share/vm/ci/ciMethodData.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/ci/ciMethodData.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -170,6 +170,7 @@
     Klass* k = data->as_ReceiverTypeData()->receiver(row);
     if (k != NULL) {
       ciKlass* klass = CURRENT_ENV->get_klass(k);
+      CURRENT_ENV->ensure_metadata_alive(klass);
       set_receiver(row, klass);
     }
   }
@@ -191,6 +192,7 @@
 void ciSpeculativeTrapData::translate_from(const ProfileData* data) {
   Method* m = data->as_SpeculativeTrapData()->method();
   ciMethod* ci_m = CURRENT_ENV->get_method(m);
+  CURRENT_ENV->ensure_metadata_alive(ci_m);
   set_method(ci_m);
 }
 
--- a/src/share/vm/ci/ciMethodData.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/ci/ciMethodData.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -70,6 +70,7 @@
     Klass* v = TypeEntries::valid_klass(k);
     if (v != NULL) {
       ciKlass* klass = CURRENT_ENV->get_klass(v);
+      CURRENT_ENV->ensure_metadata_alive(klass);
       return with_status(klass, k);
     }
     return with_status(NULL, k);
--- a/src/share/vm/ci/ciObjectFactory.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/ci/ciObjectFactory.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,6 +46,9 @@
 #include "oops/oop.inline.hpp"
 #include "oops/oop.inline2.hpp"
 #include "runtime/fieldType.hpp"
+#if INCLUDE_ALL_GCS
+# include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#endif
 
 // ciObjectFactory
 //
@@ -109,7 +112,7 @@
   // This Arena is long lived and exists in the resource mark of the
   // compiler thread that initializes the initial ciObjectFactory which
   // creates the shared ciObjects that all later ciObjectFactories use.
-  Arena* arena = new (mtCompiler) Arena();
+  Arena* arena = new (mtCompiler) Arena(mtCompiler);
   ciEnv initial(arena);
   ciEnv* env = ciEnv::current();
   env->_factory->init_shared_objects();
@@ -236,7 +239,7 @@
 ciObject* ciObjectFactory::get(oop key) {
   ASSERT_IN_VM;
 
-  assert(key == NULL || Universe::heap()->is_in_reserved(key), "must be");
+  assert(Universe::heap()->is_in_reserved(key), "must be");
 
   NonPermObject* &bucket = find_non_perm(key);
   if (bucket != NULL) {
@@ -257,10 +260,10 @@
 }
 
 // ------------------------------------------------------------------
-// ciObjectFactory::get
+// ciObjectFactory::get_metadata
 //
-// Get the ciObject corresponding to some oop.  If the ciObject has
-// already been created, it is returned.  Otherwise, a new ciObject
+// Get the ciMetadata corresponding to some Metadata. If the ciMetadata has
+// already been created, it is returned. Otherwise, a new ciMetadata
 // is created.
 ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
   ASSERT_IN_VM;
@@ -287,9 +290,9 @@
   }
 #endif
   if (!is_found_at(index, key, _ci_metadata)) {
-    // The ciObject does not yet exist.  Create it and insert it
+    // The ciMetadata does not yet exist. Create it and insert it
     // into the cache.
-    ciMetadata* new_object = create_new_object(key);
+    ciMetadata* new_object = create_new_metadata(key);
     init_ident_of(new_object);
     assert(new_object->is_metadata(), "must be");
 
@@ -341,15 +344,28 @@
 }
 
 // ------------------------------------------------------------------
-// ciObjectFactory::create_new_object
+// ciObjectFactory::create_new_metadata
 //
-// Create a new ciObject from a Metadata*.
+// Create a new ciMetadata from a Metadata*.
 //
-// Implementation note: this functionality could be virtual behavior
-// of the oop itself.  For now, we explicitly marshal the object.
-ciMetadata* ciObjectFactory::create_new_object(Metadata* o) {
+// Implementation note: in order to keep Metadata live, an auxiliary ciObject
+// is used, which points to it's holder.
+ciMetadata* ciObjectFactory::create_new_metadata(Metadata* o) {
   EXCEPTION_CONTEXT;
 
+  // Hold metadata from unloading by keeping it's holder alive.
+  if (_initialized && o->is_klass()) {
+    Klass* holder = ((Klass*)o);
+    if (holder->oop_is_instance() && InstanceKlass::cast(holder)->is_anonymous()) {
+      // Though ciInstanceKlass records class loader oop, it's not enough to keep
+      // VM anonymous classes alive (loader == NULL). Klass holder should be used instead.
+      // It is enough to record a ciObject, since cached elements are never removed
+      // during ciObjectFactory lifetime. ciObjectFactory itself is created for
+      // every compilation and lives for the whole duration of the compilation.
+      ciObject* h = get(holder->klass_holder());
+    }
+  }
+
   if (o->is_klass()) {
     KlassHandle h_k(THREAD, (Klass*)o);
     Klass* k = (Klass*)o;
@@ -362,18 +378,51 @@
     }
   } else if (o->is_method()) {
     methodHandle h_m(THREAD, (Method*)o);
-    return new (arena()) ciMethod(h_m);
+    ciEnv *env = CURRENT_THREAD_ENV;
+    ciInstanceKlass* holder = env->get_instance_klass(h_m()->method_holder());
+    return new (arena()) ciMethod(h_m, holder);
   } else if (o->is_methodData()) {
     // Hold methodHandle alive - might not be necessary ???
     methodHandle h_m(THREAD, ((MethodData*)o)->method());
     return new (arena()) ciMethodData((MethodData*)o);
   }
 
-  // The oop is of some type not supported by the compiler interface.
+  // The Metadata* is of some type not supported by the compiler interface.
   ShouldNotReachHere();
   return NULL;
 }
 
+// ------------------------------------------------------------------
+// ciObjectFactory::ensure_metadata_alive
+//
+// Ensure that the metadata wrapped by the ciMetadata is kept alive by GC.
+// This is primarily useful for metadata which is considered as weak roots
+// by the GC but need to be strong roots if reachable from a current compilation.
+//
+void ciObjectFactory::ensure_metadata_alive(ciMetadata* m) {
+  ASSERT_IN_VM; // We're handling raw oops here.
+
+#if INCLUDE_ALL_GCS
+  if (!UseG1GC) {
+    return;
+  }
+  Klass* metadata_owner_klass;
+  if (m->is_klass()) {
+    metadata_owner_klass = m->as_klass()->get_Klass();
+  } else if (m->is_method()) {
+    metadata_owner_klass = m->as_method()->get_Method()->constants()->pool_holder();
+  } else {
+    fatal("Not implemented for other types of metadata");
+  }
+
+  oop metadata_holder = metadata_owner_klass->klass_holder();
+  if (metadata_holder != NULL) {
+    G1SATBCardTableModRefBS::enqueue(metadata_holder);
+  }
+
+#endif
+}
+
 //------------------------------------------------------------------
 // ciObjectFactory::get_unloaded_method
 //
@@ -667,7 +716,7 @@
 // If there is no entry in the cache corresponding to this oop, return
 // the null tail of the bucket into which the oop should be inserted.
 ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) {
-  assert(Universe::heap()->is_in_reserved_or_null(key), "must be");
+  assert(Universe::heap()->is_in_reserved(key), "must be");
   ciMetadata* klass = get_metadata(key->klass());
   NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS];
   for (NonPermObject* p; (p = (*bp)) != NULL; bp = &p->next()) {
--- a/src/share/vm/ci/ciObjectFactory.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/ci/ciObjectFactory.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -73,7 +73,9 @@
   void insert(int index, ciMetadata* obj, GrowableArray<ciMetadata*>* objects);
 
   ciObject* create_new_object(oop o);
-  ciMetadata* create_new_object(Metadata* o);
+  ciMetadata* create_new_metadata(Metadata* o);
+
+  void ensure_metadata_alive(ciMetadata* m);
 
   static bool is_equal(NonPermObject* p, oop key) {
     return p->object()->get_oop() == key;
--- a/src/share/vm/ci/ciTypeFlow.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/ci/ciTypeFlow.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -35,6 +35,8 @@
 #include "interpreter/bytecode.hpp"
 #include "interpreter/bytecodes.hpp"
 #include "memory/allocation.inline.hpp"
+#include "opto/compile.hpp"
+#include "opto/node.hpp"
 #include "runtime/deoptimization.hpp"
 #include "utilities/growableArray.hpp"
 
@@ -730,7 +732,7 @@
     if (obj->is_null_object()) {
       push_null();
     } else {
-      assert(obj->is_instance(), "must be java_mirror of klass");
+      assert(obj->is_instance() || obj->is_array(), "must be java_mirror of klass");
       push_object(obj->klass());
     }
   } else {
@@ -2646,7 +2648,7 @@
       assert (!blk->has_pre_order(), "");
       blk->set_next_pre_order();
 
-      if (_next_pre_order >= MaxNodeLimit / 2) {
+      if (_next_pre_order >= (int)Compile::current()->max_node_limit() / 2) {
         // Too many basic blocks.  Bail out.
         // This can happen when try/finally constructs are nested to depth N,
         // and there is O(2**N) cloning of jsr bodies.  See bug 4697245!
--- a/src/share/vm/classfile/classFileParser.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/classFileParser.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -31,6 +31,9 @@
 #include "classfile/javaClasses.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
+#if INCLUDE_CDS
+#include "classfile/systemDictionaryShared.hpp"
+#endif
 #include "classfile/verificationType.hpp"
 #include "classfile/verifier.hpp"
 #include "classfile/vmSymbols.hpp"
@@ -60,6 +63,7 @@
 #include "services/threadService.hpp"
 #include "utilities/array.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
 
 // We generally try to create the oops directly when parsing, rather than
 // allocating temporary data structures and copying the bytes twice. A
@@ -2531,7 +2535,7 @@
 Array<Method*>* ClassFileParser::parse_methods(bool is_interface,
                                                AccessFlags* promoted_flags,
                                                bool* has_final_method,
-                                               bool* has_default_methods,
+                                               bool* declares_default_methods,
                                                TRAPS) {
   ClassFileStream* cfs = stream();
   cfs->guarantee_more(2, CHECK_NULL);  // length
@@ -2550,11 +2554,11 @@
       if (method->is_final()) {
         *has_final_method = true;
       }
-      if (is_interface && !(*has_default_methods)
-        && !method->is_abstract() && !method->is_static()
-        && !method->is_private()) {
-        // default method
-        *has_default_methods = true;
+      // declares_default_methods: declares concrete instance methods, any access flags
+      // used for interface initialization, and default method inheritance analysis
+      if (is_interface && !(*declares_default_methods)
+        && !method->is_abstract() && !method->is_static()) {
+        *declares_default_methods = true;
       }
       _methods->at_put(index, method());
     }
@@ -2786,6 +2790,11 @@
   ClassFileStream* cfs = stream();
   u1* current_start = cfs->current();
 
+  guarantee_property(attribute_byte_length >= sizeof(u2),
+                     "Invalid BootstrapMethods attribute length %u in class file %s",
+                     attribute_byte_length,
+                     CHECK);
+
   cfs->guarantee_more(attribute_byte_length, CHECK);
 
   int attribute_array_length = cfs->get_u2_fast();
@@ -2794,11 +2803,6 @@
                      "Short length on BootstrapMethods in class file %s",
                      CHECK);
 
-  guarantee_property(attribute_byte_length >= sizeof(u2),
-                     "Invalid BootstrapMethods attribute length %u in class file %s",
-                     attribute_byte_length,
-                     CHECK);
-
   // The attribute contains a counted array of counted tuples of shorts,
   // represending bootstrap specifiers:
   //    length*{bootstrap_method_index, argument_count*{argument_index}}
@@ -3060,21 +3064,39 @@
   }
 }
 
-// Transfer ownership of metadata allocated to the InstanceKlass.
-void ClassFileParser::apply_parsed_class_metadata(
-                                            instanceKlassHandle this_klass,
-                                            int java_fields_count, TRAPS) {
-  // Assign annotations if needed
-  if (_annotations != NULL || _type_annotations != NULL ||
-      _fields_annotations != NULL || _fields_type_annotations != NULL) {
+// Create the Annotations object that will
+// hold the annotations array for the Klass.
+void ClassFileParser::create_combined_annotations(TRAPS) {
+    if (_annotations == NULL &&
+        _type_annotations == NULL &&
+        _fields_annotations == NULL &&
+        _fields_type_annotations == NULL) {
+      // Don't create the Annotations object unnecessarily.
+      return;
+    }
+
     Annotations* annotations = Annotations::allocate(_loader_data, CHECK);
     annotations->set_class_annotations(_annotations);
     annotations->set_class_type_annotations(_type_annotations);
     annotations->set_fields_annotations(_fields_annotations);
     annotations->set_fields_type_annotations(_fields_type_annotations);
-    this_klass->set_annotations(annotations);
-  }
-
+
+    // This is the Annotations object that will be
+    // assigned to InstanceKlass being constructed.
+    _combined_annotations = annotations;
+
+    // The annotations arrays below has been transfered the
+    // _combined_annotations so these fields can now be cleared.
+    _annotations             = NULL;
+    _type_annotations        = NULL;
+    _fields_annotations      = NULL;
+    _fields_type_annotations = NULL;
+}
+
+// Transfer ownership of metadata allocated to the InstanceKlass.
+void ClassFileParser::apply_parsed_class_metadata(
+                                            instanceKlassHandle this_klass,
+                                            int java_fields_count, TRAPS) {
   _cp->set_pool_holder(this_klass());
   this_klass->set_constants(_cp);
   this_klass->set_fields(_fields, java_fields_count);
@@ -3082,6 +3104,7 @@
   this_klass->set_inner_classes(_inner_classes);
   this_klass->set_local_interfaces(_local_interfaces);
   this_klass->set_transitive_interfaces(_transitive_interfaces);
+  this_klass->set_annotations(_combined_annotations);
 
   // Clear out these fields so they don't get deallocated by the destructor
   clear_class_metadata();
@@ -3693,6 +3716,7 @@
   JvmtiCachedClassFileData *cached_class_file = NULL;
   Handle class_loader(THREAD, loader_data->class_loader());
   bool has_default_methods = false;
+  bool declares_default_methods = false;
   ResourceMark rm(THREAD);
 
   ClassFileStream* cfs = stream();
@@ -3747,7 +3771,15 @@
   instanceKlassHandle nullHandle;
 
   // Figure out whether we can skip format checking (matching classic VM behavior)
-  _need_verify = Verifier::should_verify_for(class_loader(), verify);
+  if (DumpSharedSpaces) {
+    // verify == true means it's a 'remote' class (i.e., non-boot class)
+    // Verification decision is based on BytecodeVerificationRemote flag
+    // for those classes.
+    _need_verify = (verify) ? BytecodeVerificationRemote :
+                              BytecodeVerificationLocal;
+  } else {
+    _need_verify = Verifier::should_verify_for(class_loader(), verify);
+  }
 
   // Set the verify flag in stream
   cfs->set_verify(_need_verify);
@@ -3766,6 +3798,18 @@
   u2 minor_version = cfs->get_u2_fast();
   u2 major_version = cfs->get_u2_fast();
 
+  if (DumpSharedSpaces && major_version < JAVA_1_5_VERSION) {
+    ResourceMark rm;
+    warning("Pre JDK 1.5 class not supported by CDS: %u.%u %s",
+            major_version,  minor_version, name->as_C_string());
+    Exceptions::fthrow(
+      THREAD_AND_LOCATION,
+      vmSymbols::java_lang_UnsupportedClassVersionError(),
+      "Unsupported major.minor version for dump time %u.%u",
+      major_version,
+      minor_version);
+  }
+
   // Check version numbers - we check this even with verifier off
   if (!is_supported_version(major_version, minor_version)) {
     if (name == NULL) {
@@ -3873,6 +3917,18 @@
       if (cfs->source() != NULL) tty->print(" from %s", cfs->source());
       tty->print_cr("]");
     }
+#if INCLUDE_CDS
+    if (DumpLoadedClassList != NULL && cfs->source() != NULL && classlist_file->is_open()) {
+      // Only dump the classes that can be stored into CDS archive
+      if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
+        if (name != NULL) {
+          ResourceMark rm(THREAD);
+          classlist_file->print_cr("%s", name->as_C_string());
+          classlist_file->flush();
+        }
+      }
+    }
+#endif
 
     u2 super_class_index = cfs->get_u2_fast();
     instanceKlassHandle super_klass = parse_super_class(super_class_index,
@@ -3898,13 +3954,20 @@
     Array<Method*>* methods = parse_methods(access_flags.is_interface(),
                                             &promoted_flags,
                                             &has_final_method,
-                                            &has_default_methods,
+                                            &declares_default_methods,
                                             CHECK_(nullHandle));
+    if (declares_default_methods) {
+      has_default_methods = true;
+    }
 
     // Additional attributes
     ClassAnnotationCollector parsed_annotations;
     parse_classfile_attributes(&parsed_annotations, CHECK_(nullHandle));
 
+    // Finalize the Annotations metadata object,
+    // now that all annotation arrays have been created.
+    create_combined_annotations(CHECK_(nullHandle));
+
     // Make sure this is the end of class file stream
     guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle));
 
@@ -4042,6 +4105,7 @@
     this_klass->set_minor_version(minor_version);
     this_klass->set_major_version(major_version);
     this_klass->set_has_default_methods(has_default_methods);
+    this_klass->set_declares_default_methods(declares_default_methods);
 
     if (!host_klass.is_null()) {
       assert (this_klass->is_anonymous(), "should be the same");
@@ -4112,8 +4176,8 @@
     }
 
     // Allocate mirror and initialize static fields
-    java_lang_Class::create_mirror(this_klass, protection_domain, CHECK_(nullHandle));
-
+    java_lang_Class::create_mirror(this_klass, class_loader, protection_domain,
+                                   CHECK_(nullHandle));
 
     // Generate any default methods - default methods are interface methods
     // that have a default implementation.  This is new with Lambda project.
@@ -4135,8 +4199,12 @@
         tty->print("[Loaded %s from %s]\n", this_klass->external_name(),
                    cfs->source());
       } else if (class_loader.is_null()) {
-        if (THREAD->is_Java_thread()) {
-          Klass* caller = ((JavaThread*)THREAD)->security_get_caller_class(1);
+        Klass* caller =
+            THREAD->is_Java_thread()
+                ? ((JavaThread*)THREAD)->security_get_caller_class(1)
+                : NULL;
+        // caller can be NULL, for example, during a JVMTI VM_Init hook
+        if (caller != NULL) {
           tty->print("[Loaded %s by instance of %s]\n",
                      this_klass->external_name(),
                      InstanceKlass::cast(caller)->external_name());
@@ -4204,10 +4272,27 @@
   InstanceKlass::deallocate_interfaces(_loader_data, _super_klass(),
                                        _local_interfaces, _transitive_interfaces);
 
-  MetadataFactory::free_array<u1>(_loader_data, _annotations);
-  MetadataFactory::free_array<u1>(_loader_data, _type_annotations);
-  Annotations::free_contents(_loader_data, _fields_annotations);
-  Annotations::free_contents(_loader_data, _fields_type_annotations);
+  if (_combined_annotations != NULL) {
+    // After all annotations arrays have been created, they are installed into the
+    // Annotations object that will be assigned to the InstanceKlass being created.
+
+    // Deallocate the Annotations object and the installed annotations arrays.
+    _combined_annotations->deallocate_contents(_loader_data);
+
+    // If the _combined_annotations pointer is non-NULL,
+    // then the other annotations fields should have been cleared.
+    assert(_annotations             == NULL, "Should have been cleared");
+    assert(_type_annotations        == NULL, "Should have been cleared");
+    assert(_fields_annotations      == NULL, "Should have been cleared");
+    assert(_fields_type_annotations == NULL, "Should have been cleared");
+  } else {
+    // If the annotations arrays were not installed into the Annotations object,
+    // then they have to be deallocated explicitly.
+    MetadataFactory::free_array<u1>(_loader_data, _annotations);
+    MetadataFactory::free_array<u1>(_loader_data, _type_annotations);
+    Annotations::free_contents(_loader_data, _fields_annotations);
+    Annotations::free_contents(_loader_data, _fields_type_annotations);
+  }
 
   clear_class_metadata();
 
--- a/src/share/vm/classfile/classFileParser.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/classFileParser.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -75,6 +75,7 @@
   Array<u2>*       _inner_classes;
   Array<Klass*>*   _local_interfaces;
   Array<Klass*>*   _transitive_interfaces;
+  Annotations*     _combined_annotations;
   AnnotationArray* _annotations;
   AnnotationArray* _type_annotations;
   Array<AnnotationArray*>* _fields_annotations;
@@ -86,6 +87,8 @@
   void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; }
   void set_class_sde_buffer(char* x, int len)  { _sde_buffer = x; _sde_length = len; }
 
+  void create_combined_annotations(TRAPS);
+
   void init_parsed_class_attributes(ClassLoaderData* loader_data) {
     _loader_data = loader_data;
     _synthetic_flag = false;
@@ -110,6 +113,7 @@
     _inner_classes = NULL;
     _local_interfaces = NULL;
     _transitive_interfaces = NULL;
+    _combined_annotations = NULL;
     _annotations = _type_annotations = NULL;
     _fields_annotations = _fields_type_annotations = NULL;
   }
@@ -247,7 +251,7 @@
   Array<Method*>* parse_methods(bool is_interface,
                                 AccessFlags* promoted_flags,
                                 bool* has_final_method,
-                                bool* has_default_method,
+                                bool* declares_default_methods,
                                 TRAPS);
   intArray* sort_methods(Array<Method*>* methods);
 
--- a/src/share/vm/classfile/classFileStream.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/classFileStream.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
   THROW_MSG(vmSymbols::java_lang_ClassFormatError(), "Truncated class file");
 }
 
-ClassFileStream::ClassFileStream(u1* buffer, int length, char* source) {
+ClassFileStream::ClassFileStream(u1* buffer, int length, const char* source) {
   _buffer_start = buffer;
   _buffer_end   = buffer + length;
   _current      = buffer;
--- a/src/share/vm/classfile/classFileStream.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/classFileStream.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,20 +53,20 @@
   u1*   _buffer_start; // Buffer bottom
   u1*   _buffer_end;   // Buffer top (one past last element)
   u1*   _current;      // Current buffer position
-  char* _source;       // Source of stream (directory name, ZIP/JAR archive name)
+  const char* _source; // Source of stream (directory name, ZIP/JAR archive name)
   bool  _need_verify;  // True if verification is on for the class file
 
   void truncated_file_error(TRAPS);
  public:
   // Constructor
-  ClassFileStream(u1* buffer, int length, char* source);
+  ClassFileStream(u1* buffer, int length, const char* source);
 
   // Buffer access
   u1* buffer() const           { return _buffer_start; }
   int length() const           { return _buffer_end - _buffer_start; }
   u1* current() const          { return _current; }
   void set_current(u1* pos)    { _current = pos; }
-  char* source() const         { return _source; }
+  const char* source() const   { return _source; }
   void set_verify(bool flag)   { _need_verify = flag; }
 
   void check_truncated_file(bool b, TRAPS) {
--- a/src/share/vm/classfile/classLoader.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/classLoader.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,8 +26,13 @@
 #include "classfile/classFileParser.hpp"
 #include "classfile/classFileStream.hpp"
 #include "classfile/classLoader.hpp"
+#include "classfile/classLoaderExt.hpp"
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/javaClasses.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedPathsMiscInfo.hpp"
+#include "classfile/sharedClassUtil.hpp"
+#endif
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
@@ -35,6 +40,7 @@
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/oopMapCache.hpp"
 #include "memory/allocation.inline.hpp"
+#include "memory/filemap.hpp"
 #include "memory/generation.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/universe.inline.hpp"
@@ -131,8 +137,12 @@
 
 ClassPathEntry* ClassLoader::_first_entry         = NULL;
 ClassPathEntry* ClassLoader::_last_entry          = NULL;
+int             ClassLoader::_num_entries         = 0;
 PackageHashtable* ClassLoader::_package_hash_table = NULL;
 
+#if INCLUDE_CDS
+SharedPathsMiscInfo* ClassLoader::_shared_paths_misc_info = NULL;
+#endif
 // helper routines
 bool string_starts_with(const char* str, const char* str_to_find) {
   size_t str_len = strlen(str);
@@ -196,9 +206,10 @@
   return false;
 }
 
-ClassPathDirEntry::ClassPathDirEntry(char* dir) : ClassPathEntry() {
-  _dir = NEW_C_HEAP_ARRAY(char, strlen(dir)+1, mtClass);
-  strcpy(_dir, dir);
+ClassPathDirEntry::ClassPathDirEntry(const char* dir) : ClassPathEntry() {
+  char* copy = NEW_C_HEAP_ARRAY(char, strlen(dir)+1, mtClass);
+  strcpy(copy, dir);
+  _dir = copy;
 }
 
 
@@ -211,6 +222,14 @@
   // check if file exists
   struct stat st;
   if (os::stat(path, &st) == 0) {
+#if INCLUDE_CDS
+    if (DumpSharedSpaces) {
+      // We have already check in ClassLoader::check_shared_classpath() that the directory is empty, so
+      // we should never find a file underneath it -- unless user has added a new file while we are running
+      // the dump, in which case let's quit!
+      ShouldNotReachHere();
+    }
+#endif
     // found file, open it
     int file_handle = os::open(path, 0, 0);
     if (file_handle != -1) {
@@ -234,8 +253,9 @@
 
 ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name) : ClassPathEntry() {
   _zip = zip;
-  _zip_name = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass);
-  strcpy(_zip_name, zip_name);
+  char *copy = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass);
+  strcpy(copy, zip_name);
+  _zip_name = copy;
 }
 
 ClassPathZipEntry::~ClassPathZipEntry() {
@@ -245,13 +265,13 @@
   FREE_C_HEAP_ARRAY(char, _zip_name, mtClass);
 }
 
-ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
-  // enable call to C land
+u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) {
+    // enable call to C land
   JavaThread* thread = JavaThread::current();
   ThreadToNativeFromVM ttn(thread);
   // check whether zip archive contains name
-  jint filesize, name_len;
-  jzentry* entry = (*FindEntry)(_zip, name, &filesize, &name_len);
+  jint name_len;
+  jzentry* entry = (*FindEntry)(_zip, name, filesize, &name_len);
   if (entry == NULL) return NULL;
   u1* buffer;
   char name_buf[128];
@@ -262,19 +282,33 @@
     filename = NEW_RESOURCE_ARRAY(char, name_len + 1);
   }
 
-  // file found, get pointer to class in mmaped jar file.
+  // file found, get pointer to the entry in mmapped jar file.
   if (ReadMappedEntry == NULL ||
       !(*ReadMappedEntry)(_zip, entry, &buffer, filename)) {
-      // mmaped access not available, perhaps due to compression,
+      // mmapped access not available, perhaps due to compression,
       // read contents into resource array
-      buffer     = NEW_RESOURCE_ARRAY(u1, filesize);
+      int size = (*filesize) + ((nul_terminate) ? 1 : 0);
+      buffer = NEW_RESOURCE_ARRAY(u1, size);
       if (!(*ReadEntry)(_zip, entry, buffer, filename)) return NULL;
   }
+
+  // return result
+  if (nul_terminate) {
+    buffer[*filesize] = 0;
+  }
+  return buffer;
+}
+
+ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
+  jint filesize;
+  u1* buffer = open_entry(name, &filesize, false, CHECK_NULL);
+  if (buffer == NULL) {
+    return NULL;
+  }
   if (UsePerfData) {
     ClassLoader::perf_sys_classfile_bytes_read()->inc(filesize);
   }
-  // return result
-  return new ClassFileStream(buffer, filesize, _zip_name);    // Resource allocated
+  return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated
 }
 
 // invoke function for each entry in the zip file
@@ -289,12 +323,13 @@
   }
 }
 
-LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
+LazyClassPathEntry::LazyClassPathEntry(const char* path, const struct stat* st, bool throw_exception) : ClassPathEntry() {
   _path = strdup(path);
   _st = *st;
   _meta_index = NULL;
   _resolved_entry = NULL;
   _has_error = false;
+  _throw_exception = throw_exception;
 }
 
 bool LazyClassPathEntry::is_jar_file() {
@@ -306,7 +341,11 @@
     return (ClassPathEntry*) _resolved_entry;
   }
   ClassPathEntry* new_entry = NULL;
-  new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, CHECK_NULL);
+  new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, _throw_exception, CHECK_NULL);
+  if (!_throw_exception && new_entry == NULL) {
+    assert(!HAS_PENDING_EXCEPTION, "must be");
+    return NULL;
+  }
   {
     ThreadCritical tc;
     if (_resolved_entry == NULL) {
@@ -340,6 +379,23 @@
   return true;
 }
 
+u1* LazyClassPathEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) {
+  if (_has_error) {
+    return NULL;
+  }
+  ClassPathEntry* cpe = resolve_entry(THREAD);
+  if (cpe == NULL) {
+    _has_error = true;
+    return NULL;
+  } else if (cpe->is_jar_file()) {
+    return ((ClassPathZipEntry*)cpe)->open_entry(name, filesize, nul_terminate,THREAD);
+  } else {
+    ShouldNotReachHere();
+    *filesize = 0;
+    return NULL;
+  }
+}
+
 static void print_meta_index(LazyClassPathEntry* entry,
                              GrowableArray<char*>& meta_packages) {
   tty->print("[Meta index for %s=", entry->name());
@@ -350,15 +406,62 @@
   tty->print_cr("]");
 }
 
+#if INCLUDE_CDS
+void ClassLoader::exit_with_path_failure(const char* error, const char* message) {
+  assert(DumpSharedSpaces, "only called at dump time");
+  tty->print_cr("Hint: enable -XX:+TraceClassPaths to diagnose the failure");
+  vm_exit_during_initialization(error, message);
+}
+#endif
 
-void ClassLoader::setup_meta_index() {
+void ClassLoader::trace_class_path(const char* msg, const char* name) {
+  if (!TraceClassPaths) {
+    return;
+  }
+
+  if (msg) {
+    tty->print("%s", msg);
+  }
+  if (name) {
+    if (strlen(name) < 256) {
+      tty->print("%s", name);
+    } else {
+      // For very long paths, we need to print each character separately,
+      // as print_cr() has a length limit
+      while (name[0] != '\0') {
+        tty->print("%c", name[0]);
+        name++;
+      }
+    }
+  }
+  if (msg && msg[0] == '[') {
+    tty->print_cr("]");
+  } else {
+    tty->cr();
+  }
+}
+
+void ClassLoader::setup_bootstrap_meta_index() {
   // Set up meta index which allows us to open boot jars lazily if
   // class data sharing is enabled
+  const char* meta_index_path = Arguments::get_meta_index_path();
+  const char* meta_index_dir  = Arguments::get_meta_index_dir();
+  setup_meta_index(meta_index_path, meta_index_dir, 0);
+}
+
+void ClassLoader::setup_meta_index(const char* meta_index_path, const char* meta_index_dir, int start_index) {
   const char* known_version = "% VERSION 2";
-  char* meta_index_path = Arguments::get_meta_index_path();
-  char* meta_index_dir  = Arguments::get_meta_index_dir();
   FILE* file = fopen(meta_index_path, "r");
   int line_no = 0;
+#if INCLUDE_CDS
+  if (DumpSharedSpaces) {
+    if (file != NULL) {
+      _shared_paths_misc_info->add_required_file(meta_index_path);
+    } else {
+      _shared_paths_misc_info->add_nonexist_path(meta_index_path);
+    }
+  }
+#endif
   if (file != NULL) {
     ResourceMark rm;
     LazyClassPathEntry* cur_entry = NULL;
@@ -393,7 +496,7 @@
           // Hand off current packages to current lazy entry (if any)
           if ((cur_entry != NULL) &&
               (boot_class_path_packages.length() > 0)) {
-            if (TraceClassLoading && Verbose) {
+            if ((TraceClassLoading || TraceClassPaths) && Verbose) {
               print_meta_index(cur_entry, boot_class_path_packages);
             }
             MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
@@ -404,8 +507,10 @@
           boot_class_path_packages.clear();
 
           // Find lazy entry corresponding to this jar file
-          for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next()) {
-            if (entry->is_lazy() &&
+          int count = 0;
+          for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next(), count++) {
+            if (count >= start_index &&
+                entry->is_lazy() &&
                 string_starts_with(entry->name(), meta_index_dir) &&
                 string_ends_with(entry->name(), &package_name[2])) {
               cur_entry = (LazyClassPathEntry*) entry;
@@ -442,7 +547,7 @@
     // Hand off current packages to current lazy entry (if any)
     if ((cur_entry != NULL) &&
         (boot_class_path_packages.length() > 0)) {
-      if (TraceClassLoading && Verbose) {
+      if ((TraceClassLoading || TraceClassPaths) && Verbose) {
         print_meta_index(cur_entry, boot_class_path_packages);
       }
       MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
@@ -453,36 +558,96 @@
   }
 }
 
+#if INCLUDE_CDS
+void ClassLoader::check_shared_classpath(const char *path) {
+  if (strcmp(path, "") == 0) {
+    exit_with_path_failure("Cannot have empty path in archived classpaths", NULL);
+  }
+
+  struct stat st;
+  if (os::stat(path, &st) == 0) {
+    if ((st.st_mode & S_IFREG) != S_IFREG) { // is directory
+      if (!os::dir_is_empty(path)) {
+        tty->print_cr("Error: non-empty directory '%s'", path);
+        exit_with_path_failure("CDS allows only empty directories in archived classpaths", NULL);
+      }
+    }
+  }
+}
+#endif
+
 void ClassLoader::setup_bootstrap_search_path() {
   assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
-  char* sys_class_path = os::strdup(Arguments::get_sysclasspath());
-  if (TraceClassLoading && Verbose) {
-    tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path);
+  const char* sys_class_path = Arguments::get_sysclasspath();
+  if (PrintSharedArchiveAndExit) {
+    // Don't print sys_class_path - this is the bootcp of this current VM process, not necessarily
+    // the same as the bootcp of the shared archive.
+  } else {
+    trace_class_path("[Bootstrap loader class path=", sys_class_path);
+  }
+#if INCLUDE_CDS
+  if (DumpSharedSpaces) {
+    _shared_paths_misc_info->add_boot_classpath(sys_class_path);
   }
+#endif
+  setup_search_path(sys_class_path);
+}
 
-  int len = (int)strlen(sys_class_path);
+#if INCLUDE_CDS
+int ClassLoader::get_shared_paths_misc_info_size() {
+  return _shared_paths_misc_info->get_used_bytes();
+}
+
+void* ClassLoader::get_shared_paths_misc_info() {
+  return _shared_paths_misc_info->buffer();
+}
+
+bool ClassLoader::check_shared_paths_misc_info(void *buf, int size) {
+  SharedPathsMiscInfo* checker = SharedClassUtil::allocate_shared_paths_misc_info((char*)buf, size);
+  bool result = checker->check();
+  delete checker;
+  return result;
+}
+#endif
+
+void ClassLoader::setup_search_path(const char *class_path, bool canonicalize) {
+  int offset = 0;
+  int len = (int)strlen(class_path);
   int end = 0;
 
   // Iterate over class path entries
   for (int start = 0; start < len; start = end) {
-    while (sys_class_path[end] && sys_class_path[end] != os::path_separator()[0]) {
+    while (class_path[end] && class_path[end] != os::path_separator()[0]) {
       end++;
     }
-    char* path = NEW_C_HEAP_ARRAY(char, end-start+1, mtClass);
-    strncpy(path, &sys_class_path[start], end-start);
-    path[end-start] = '\0';
-    update_class_path_entry_list(path, false);
-    FREE_C_HEAP_ARRAY(char, path, mtClass);
-    while (sys_class_path[end] == os::path_separator()[0]) {
+    EXCEPTION_MARK;
+    ResourceMark rm(THREAD);
+    char* path = NEW_RESOURCE_ARRAY(char, end - start + 1);
+    strncpy(path, &class_path[start], end - start);
+    path[end - start] = '\0';
+    if (canonicalize) {
+      char* canonical_path = NEW_RESOURCE_ARRAY(char, JVM_MAXPATHLEN + 1);
+      if (get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
+        path = canonical_path;
+      }
+    }
+    update_class_path_entry_list(path, /*check_for_duplicates=*/canonicalize);
+#if INCLUDE_CDS
+    if (DumpSharedSpaces) {
+      check_shared_classpath(path);
+    }
+#endif
+    while (class_path[end] == os::path_separator()[0]) {
       end++;
     }
   }
 }
 
-ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
+ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const struct stat* st,
+                                                     bool lazy, bool throw_exception, TRAPS) {
   JavaThread* thread = JavaThread::current();
   if (lazy) {
-    return new LazyClassPathEntry(path, st);
+    return new LazyClassPathEntry(path, st, throw_exception);
   }
   ClassPathEntry* new_entry = NULL;
   if ((st->st_mode & S_IFREG) == S_IFREG) {
@@ -491,7 +656,11 @@
     char canonical_path[JVM_MAXPATHLEN];
     if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
       // This matches the classic VM
-      THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
+      if (throw_exception) {
+        THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
+      } else {
+        return NULL;
+      }
     }
     char* error_msg = NULL;
     jzfile* zip;
@@ -503,7 +672,7 @@
     }
     if (zip != NULL && error_msg == NULL) {
       new_entry = new ClassPathZipEntry(zip, path);
-      if (TraceClassLoading) {
+      if (TraceClassLoading || TraceClassPaths) {
         tty->print_cr("[Opened %s]", path);
       }
     } else {
@@ -517,12 +686,16 @@
         msg = NEW_RESOURCE_ARRAY(char, len); ;
         jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
       }
-      THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
+      if (throw_exception) {
+        THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
+      } else {
+        return NULL;
+      }
     }
   } else {
     // Directory
     new_entry = new ClassPathDirEntry(path);
-    if (TraceClassLoading) {
+    if (TraceClassLoading || TraceClassPaths) {
       tty->print_cr("[Path %s]", path);
     }
   }
@@ -537,11 +710,8 @@
   struct stat st;
   if (os::stat(path, &st) == 0) {
     if ((st.st_mode & S_IFREG) == S_IFREG) {
-      char orig_path[JVM_MAXPATHLEN];
       char canonical_path[JVM_MAXPATHLEN];
-
-      strcpy(orig_path, path);
-      if (get_canonical_path(orig_path, canonical_path, JVM_MAXPATHLEN)) {
+      if (get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
         char* error_msg = NULL;
         jzfile* zip;
         {
@@ -583,23 +753,37 @@
       _last_entry = new_entry;
     }
   }
+  _num_entries ++;
 }
 
-void ClassLoader::update_class_path_entry_list(char *path,
-                                               bool check_for_duplicates) {
+// Returns true IFF the file/dir exists and the entry was successfully created.
+bool ClassLoader::update_class_path_entry_list(const char *path,
+                                               bool check_for_duplicates,
+                                               bool throw_exception) {
   struct stat st;
   if (os::stat(path, &st) == 0) {
     // File or directory found
     ClassPathEntry* new_entry = NULL;
     Thread* THREAD = Thread::current();
-    new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, CHECK);
+    new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, throw_exception, CHECK_(false));
+    if (new_entry == NULL) {
+      return false;
+    }
     // The kernel VM adds dynamically to the end of the classloader path and
     // doesn't reorder the bootclasspath which would break java.lang.Package
     // (see PackageInfo).
     // Add new entry to linked list
     if (!check_for_duplicates || !contains_entry(new_entry)) {
-      add_to_list(new_entry);
+      ClassLoaderExt::add_class_path_entry(path, check_for_duplicates, new_entry);
     }
+    return true;
+  } else {
+#if INCLUDE_CDS
+    if (DumpSharedSpaces) {
+      _shared_paths_misc_info->add_nonexist_path(path);
+    }
+#endif
+    return false;
   }
 }
 
@@ -758,10 +942,10 @@
     assert(n == number_of_entries(), "just checking");
   }
 
-  void copy_table(char** top, char* end, PackageHashtable* table);
+  CDS_ONLY(void copy_table(char** top, char* end, PackageHashtable* table);)
 };
 
-
+#if INCLUDE_CDS
 void PackageHashtable::copy_table(char** top, char* end,
                                   PackageHashtable* table) {
   // Copy (relocate) the table to the shared space.
@@ -769,33 +953,30 @@
 
   // Calculate the space needed for the package name strings.
   int i;
-  int n = 0;
-  for (i = 0; i < table_size(); ++i) {
-    for (PackageInfo* pp = table->bucket(i);
-                      pp != NULL;
-                      pp = pp->next()) {
-      n += (int)(strlen(pp->pkgname()) + 1);
-    }
-  }
-  if (*top + n + sizeof(intptr_t) >= end) {
-    report_out_of_shared_space(SharedMiscData);
-  }
-
-  // Copy the table data (the strings) to the shared space.
-  n = align_size_up(n, sizeof(HeapWord));
-  *(intptr_t*)(*top) = n;
-  *top += sizeof(intptr_t);
+  intptr_t* tableSize = (intptr_t*)(*top);
+  *top += sizeof(intptr_t);  // For table size
+  char* tableStart = *top;
 
   for (i = 0; i < table_size(); ++i) {
     for (PackageInfo* pp = table->bucket(i);
                       pp != NULL;
                       pp = pp->next()) {
       int n1 = (int)(strlen(pp->pkgname()) + 1);
+      if (*top + n1 >= end) {
+        report_out_of_shared_space(SharedMiscData);
+      }
       pp->set_pkgname((char*)memcpy(*top, pp->pkgname(), n1));
       *top += n1;
     }
   }
   *top = (char*)align_size_up((intptr_t)*top, sizeof(HeapWord));
+  if (*top >= end) {
+    report_out_of_shared_space(SharedMiscData);
+  }
+
+  // Write table size
+  intptr_t len = *top - (char*)tableStart;
+  *tableSize = len;
 }
 
 
@@ -806,7 +987,7 @@
 void ClassLoader::copy_package_info_table(char** top, char* end) {
   _package_hash_table->copy_table(top, end, _package_hash_table);
 }
-
+#endif
 
 PackageInfo* ClassLoader::lookup_package(const char *pkgname) {
   const char *cp = strrchr(pkgname, '/');
@@ -899,7 +1080,8 @@
 
 instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
   ResourceMark rm(THREAD);
-  EventMark m("loading class %s", h_name->as_C_string());
+  const char* class_name = h_name->as_C_string();
+  EventMark m("loading class %s", class_name);
   ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);
 
   stringStream st;
@@ -907,18 +1089,24 @@
   // st.print("%s.class", h_name->as_utf8());
   st.print_raw(h_name->as_utf8());
   st.print_raw(".class");
-  char* name = st.as_string();
+  const char* file_name = st.as_string();
+  ClassLoaderExt::Context context(class_name, file_name, THREAD);
 
   // Lookup stream for parsing .class file
   ClassFileStream* stream = NULL;
   int classpath_index = 0;
+  ClassPathEntry* e = NULL;
+  instanceKlassHandle h;
   {
     PerfClassTraceTime vmtimer(perf_sys_class_lookup_time(),
                                ((JavaThread*) THREAD)->get_thread_stat()->perf_timers_addr(),
                                PerfClassTraceTime::CLASS_LOAD);
-    ClassPathEntry* e = _first_entry;
+    e = _first_entry;
     while (e != NULL) {
-      stream = e->open_stream(name, CHECK_NULL);
+      stream = e->open_stream(file_name, CHECK_NULL);
+      if (!context.check(stream, classpath_index)) {
+        return h; // NULL
+      }
       if (stream != NULL) {
         break;
       }
@@ -927,9 +1115,7 @@
     }
   }
 
-  instanceKlassHandle h;
   if (stream != NULL) {
-
     // class file found, parse it
     ClassFileParser parser(stream);
     ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
@@ -939,12 +1125,19 @@
                                                        loader_data,
                                                        protection_domain,
                                                        parsed_name,
-                                                       false,
-                                                       CHECK_(h));
-
-    // add to package table
-    if (add_package(name, classpath_index, THREAD)) {
-      h = result;
+                                                       context.should_verify(classpath_index),
+                                                       THREAD);
+    if (HAS_PENDING_EXCEPTION) {
+      ResourceMark rm;
+      if (DumpSharedSpaces) {
+        tty->print_cr("Preload Error: Failed to load %s", class_name);
+      }
+      return h;
+    }
+    h = context.record_result(classpath_index, e, result, THREAD);
+  } else {
+    if (DumpSharedSpaces) {
+      tty->print_cr("Preload Warning: Cannot find %s", class_name);
     }
   }
 
@@ -1039,14 +1232,27 @@
 
   // lookup zip library entry points
   load_zip_library();
+#if INCLUDE_CDS
   // initialize search path
+  if (DumpSharedSpaces) {
+    _shared_paths_misc_info = SharedClassUtil::allocate_shared_paths_misc_info();
+  }
+#endif
   setup_bootstrap_search_path();
   if (LazyBootClassLoader) {
     // set up meta index which makes boot classpath initialization lazier
-    setup_meta_index();
+    setup_bootstrap_meta_index();
   }
 }
 
+#if INCLUDE_CDS
+void ClassLoader::initialize_shared_path() {
+  if (DumpSharedSpaces) {
+    ClassLoaderExt::setup_search_paths();
+    _shared_paths_misc_info->write_jint(0); // see comments in SharedPathsMiscInfo::check()
+  }
+}
+#endif
 
 jlong ClassLoader::classloader_time_ms() {
   return UsePerfData ?
@@ -1090,11 +1296,17 @@
 }
 
 
-bool ClassLoader::get_canonical_path(char* orig, char* out, int len) {
+bool ClassLoader::get_canonical_path(const char* orig, char* out, int len) {
   assert(orig != NULL && out != NULL && len > 0, "bad arguments");
   if (CanonicalizeEntry != NULL) {
-    JNIEnv* env = JavaThread::current()->jni_environment();
-    if ((CanonicalizeEntry)(env, os::native_path(orig), out, len) < 0) {
+    JavaThread* THREAD = JavaThread::current();
+    JNIEnv* env = THREAD->jni_environment();
+    ResourceMark rm(THREAD);
+
+    // os::native_path writes into orig_copy
+    char* orig_copy = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, strlen(orig)+1);
+    strcpy(orig_copy, orig);
+    if ((CanonicalizeEntry)(env, os::native_path(orig_copy), out, len) < 0) {
       return false;
     }
   } else {
@@ -1412,7 +1624,7 @@
               if (TieredCompilation && TieredStopAtLevel >= CompLevel_full_optimization) {
                 // Clobber the first compile and force second tier compilation
                 nmethod* nm = m->code();
-                if (nm != NULL) {
+                if (nm != NULL && !m->is_method_handle_intrinsic()) {
                   // Throw out the code so that the code cache doesn't fill up
                   nm->make_not_entrant();
                   m->clear_code();
@@ -1431,7 +1643,7 @@
             }
 
             nmethod* nm = m->code();
-            if (nm != NULL) {
+            if (nm != NULL && !m->is_method_handle_intrinsic()) {
               // Throw out the code so that the code cache doesn't fill up
               nm->make_not_entrant();
               m->clear_code();
--- a/src/share/vm/classfile/classLoader.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/classLoader.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -72,11 +72,11 @@
 
 class ClassPathDirEntry: public ClassPathEntry {
  private:
-  char* _dir;           // Name of directory
+  const char* _dir;           // Name of directory
  public:
   bool is_jar_file()  { return false;  }
   const char* name()  { return _dir; }
-  ClassPathDirEntry(char* dir);
+  ClassPathDirEntry(const char* dir);
   ClassFileStream* open_stream(const char* name, TRAPS);
   // Debugging
   NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
@@ -100,13 +100,14 @@
 
 class ClassPathZipEntry: public ClassPathEntry {
  private:
-  jzfile* _zip;        // The zip archive
-  char*   _zip_name;   // Name of zip archive
+  jzfile* _zip;              // The zip archive
+  const char*   _zip_name;   // Name of zip archive
  public:
   bool is_jar_file()  { return true;  }
   const char* name()  { return _zip_name; }
   ClassPathZipEntry(jzfile* zip, const char* zip_name);
   ~ClassPathZipEntry();
+  u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
   ClassFileStream* open_stream(const char* name, TRAPS);
   void contents_do(void f(const char* name, void* context), void* context);
   // Debugging
@@ -122,16 +123,18 @@
 // For lazier loading of boot class path entries
 class LazyClassPathEntry: public ClassPathEntry {
  private:
-  char* _path; // dir or file
+  const char* _path; // dir or file
   struct stat _st;
   MetaIndex* _meta_index;
   bool _has_error;
+  bool _throw_exception;
   volatile ClassPathEntry* _resolved_entry;
+ public:
   ClassPathEntry* resolve_entry(TRAPS);
- public:
   bool is_jar_file();
   const char* name()  { return _path; }
-  LazyClassPathEntry(char* path, const struct stat* st);
+  LazyClassPathEntry(const char* path, const struct stat* st, bool throw_exception);
+  u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
   ClassFileStream* open_stream(const char* name, TRAPS);
   void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
   virtual bool is_lazy();
@@ -142,6 +145,7 @@
 
 class PackageHashtable;
 class PackageInfo;
+class SharedPathsMiscInfo;
 template <MEMFLAGS F> class HashtableBucket;
 
 class ClassLoader: AllStatic {
@@ -149,7 +153,7 @@
   enum SomeConstants {
     package_hash_table_size = 31  // Number of buckets
   };
- private:
+ protected:
   friend class LazyClassPathEntry;
 
   // Performance counters
@@ -191,10 +195,15 @@
   static ClassPathEntry* _first_entry;
   // Last entry in linked list of ClassPathEntry instances
   static ClassPathEntry* _last_entry;
+  static int _num_entries;
+
   // Hash table used to keep track of loaded packages
   static PackageHashtable* _package_hash_table;
   static const char* _shared_archive;
 
+  // Info used by CDS
+  CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;)
+
   // Hash function
   static unsigned int hash(const char *s, int n);
   // Returns the package file name corresponding to the specified package
@@ -205,20 +214,24 @@
   static bool add_package(const char *pkgname, int classpath_index, TRAPS);
 
   // Initialization
-  static void setup_meta_index();
+  static void setup_bootstrap_meta_index();
+  static void setup_meta_index(const char* meta_index_path, const char* meta_index_dir,
+                               int start_index);
   static void setup_bootstrap_search_path();
+  static void setup_search_path(const char *class_path, bool canonicalize=false);
+
   static void load_zip_library();
-  static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st,
-                                                 bool lazy, TRAPS);
+  static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st,
+                                                 bool lazy, bool throw_exception, TRAPS);
 
   // Canonicalizes path names, so strcmp will work properly. This is mainly
   // to avoid confusing the zip library
-  static bool get_canonical_path(char* orig, char* out, int len);
+  static bool get_canonical_path(const char* orig, char* out, int len);
  public:
   static int crc32(int crc, const char* buf, int len);
-  // Used by the kernel jvm.
-  static void update_class_path_entry_list(char *path,
-                                           bool check_for_duplicates);
+  static bool update_class_path_entry_list(const char *path,
+                                           bool check_for_duplicates,
+                                           bool throw_exception=true);
   static void print_bootclasspath();
 
   // Timing
@@ -301,6 +314,7 @@
 
   // Initialization
   static void initialize();
+  CDS_ONLY(static void initialize_shared_path();)
   static void create_package_info_table();
   static void create_package_info_table(HashtableBucket<mtClass> *t, int length,
                                         int number_of_entries);
@@ -315,10 +329,25 @@
     return e;
   }
 
+  static int num_classpath_entries() {
+    return _num_entries;
+  }
+
+#if INCLUDE_CDS
   // Sharing dump and restore
   static void copy_package_info_buckets(char** top, char* end);
   static void copy_package_info_table(char** top, char* end);
 
+  static void  check_shared_classpath(const char *path);
+  static void  finalize_shared_paths_misc_info();
+  static int   get_shared_paths_misc_info_size();
+  static void* get_shared_paths_misc_info();
+  static bool  check_shared_paths_misc_info(void* info, int size);
+  static void  exit_with_path_failure(const char* error, const char* message);
+#endif
+
+  static void  trace_class_path(const char* msg, const char* name = NULL);
+
   // VM monitoring and management support
   static jlong classloader_time_ms();
   static jlong class_method_total_size();
@@ -342,7 +371,7 @@
 
   // Force compilation of all methods in all classes in bootstrap class path (stress test)
 #ifndef PRODUCT
- private:
+ protected:
   static int _compile_the_world_class_counter;
   static int _compile_the_world_method_counter;
  public:
--- a/src/share/vm/classfile/classLoaderData.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/classLoaderData.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -64,16 +64,19 @@
 #include "utilities/growableArray.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
-
 #if INCLUDE_TRACE
- #include "trace/tracing.hpp"
+#include "trace/tracing.hpp"
 #endif
 
 ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
 
 ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
   _class_loader(h_class_loader()),
-  _is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially
+  _is_anonymous(is_anonymous),
+  // An anonymous class loader data doesn't have anything to keep
+  // it from being unloaded during parsing of the anonymous class.
+  // The null-class-loader should always be kept alive.
+  _keep_alive(is_anonymous || h_class_loader.is_null()),
   _metaspace(NULL), _unloading(false), _klasses(NULL),
   _claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
   _next(NULL), _dependencies(dependencies),
@@ -317,12 +320,45 @@
   }
 }
 
+#ifdef ASSERT
+class AllAliveClosure : public OopClosure {
+  BoolObjectClosure* _is_alive_closure;
+  bool _found_dead;
+ public:
+  AllAliveClosure(BoolObjectClosure* is_alive_closure) : _is_alive_closure(is_alive_closure), _found_dead(false) {}
+  template <typename T> void do_oop_work(T* p) {
+    T heap_oop = oopDesc::load_heap_oop(p);
+    if (!oopDesc::is_null(heap_oop)) {
+      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+      if (!_is_alive_closure->do_object_b(obj)) {
+        _found_dead = true;
+      }
+    }
+  }
+  void do_oop(oop* p)       { do_oop_work<oop>(p); }
+  void do_oop(narrowOop* p) { do_oop_work<narrowOop>(p); }
+  bool found_dead()         { return _found_dead; }
+};
+#endif
+
+oop ClassLoaderData::keep_alive_object() const {
+  assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
+  return is_anonymous() ? _klasses->java_mirror() : class_loader();
+}
+
 bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const {
-  bool alive =
-    is_anonymous() ?
-       is_alive_closure->do_object_b(_klasses->java_mirror()) :
-       class_loader() == NULL || is_alive_closure->do_object_b(class_loader());
-  assert(!alive || claimed(), "must be claimed");
+  bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
+      || is_alive_closure->do_object_b(keep_alive_object());
+
+#ifdef ASSERT
+  if (alive) {
+    AllAliveClosure all_alive_closure(is_alive_closure);
+    KlassToOopClosure klass_closure(&all_alive_closure);
+    const_cast<ClassLoaderData*>(this)->oops_do(&all_alive_closure, &klass_closure, false);
+    assert(!all_alive_closure.found_dead(), err_msg("Found dead oop in alive cld: " PTR_FORMAT, p2i(this)));
+  }
+#endif
+
   return alive;
 }
 
@@ -601,11 +637,36 @@
 
 void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
   if (ClassUnloading) {
-    ClassLoaderData::the_null_class_loader_data()->oops_do(f, klass_closure, must_claim);
-    // keep any special CLDs alive.
-    ClassLoaderDataGraph::keep_alive_oops_do(f, klass_closure, must_claim);
+    keep_alive_oops_do(f, klass_closure, must_claim);
   } else {
-    ClassLoaderDataGraph::oops_do(f, klass_closure, must_claim);
+    oops_do(f, klass_closure, must_claim);
+  }
+}
+
+void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
+  for (ClassLoaderData* cld = _head; cl != NULL && cld != NULL; cld = cld->next()) {
+    cl->do_cld(cld);
+  }
+}
+
+void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
+  for (ClassLoaderData* cld = _head;  cld != NULL; cld = cld->_next) {
+    CLDClosure* closure = cld->keep_alive() ? strong : weak;
+    if (closure != NULL) {
+      closure->do_cld(cld);
+    }
+  }
+}
+
+void ClassLoaderDataGraph::keep_alive_cld_do(CLDClosure* cl) {
+  roots_cld_do(cl, NULL);
+}
+
+void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
+  if (ClassUnloading) {
+    keep_alive_cld_do(cl);
+  } else {
+    cld_do(cl);
   }
 }
 
@@ -660,6 +721,16 @@
   return array;
 }
 
+bool ClassLoaderDataGraph::unload_list_contains(const void* x) {
+  assert(SafepointSynchronize::is_at_safepoint(), "only safe to call at safepoint");
+  for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
+    if (cld->metaspace_or_null() != NULL && cld->metaspace_or_null()->contains(x)) {
+      return true;
+    }
+  }
+  return false;
+}
+
 #ifndef PRODUCT
 bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
   for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
@@ -675,7 +746,7 @@
 
 // Move class loader data from main list to the unloaded list for unloading
 // and deallocation later.
-bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
+bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure, bool clean_alive) {
   ClassLoaderData* data = _head;
   ClassLoaderData* prev = NULL;
   bool seen_dead_loader = false;
@@ -684,16 +755,8 @@
   // purging and we don't want to rewalk the previously unloaded class loader data.
   _saved_unloading = _unloading;
 
-  // mark metadata seen on the stack and code cache so we can delete
-  // unneeded entries.
-  bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
-  MetadataOnStackMark md_on_stack;
   while (data != NULL) {
-    if (data->keep_alive() || data->is_alive(is_alive_closure)) {
-      if (has_redefined_a_class) {
-        data->classes_do(InstanceKlass::purge_previous_versions);
-      }
-      data->free_deallocate_list();
+    if (data->is_alive(is_alive_closure)) {
       prev = data;
       data = data->next();
       continue;
@@ -715,6 +778,11 @@
     _unloading = dead;
   }
 
+  if (clean_alive) {
+    // Clean previous versions and the deallocate list.
+    ClassLoaderDataGraph::clean_metaspaces();
+  }
+
   if (seen_dead_loader) {
     post_class_unload_events();
   }
@@ -722,6 +790,26 @@
   return seen_dead_loader;
 }
 
+void ClassLoaderDataGraph::clean_metaspaces() {
+  // mark metadata seen on the stack and code cache so we can delete unneeded entries.
+  bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
+  MetadataOnStackMark md_on_stack(has_redefined_a_class);
+
+  if (has_redefined_a_class) {
+    // purge_previous_versions also cleans weak method links. Because
+    // one method's MDO can reference another method from another
+    // class loader, we need to first clean weak method links for all
+    // class loaders here. Below, we can then free redefined methods
+    // for all class loaders.
+    for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
+      data->classes_do(InstanceKlass::purge_previous_versions);
+    }
+  }
+
+  // Need to purge the previous version before deallocating.
+  free_deallocate_lists();
+}
+
 void ClassLoaderDataGraph::purge() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
   ClassLoaderData* list = _unloading;
@@ -749,6 +837,14 @@
 #endif
 }
 
+void ClassLoaderDataGraph::free_deallocate_lists() {
+  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+    // We need to keep this data until InstanceKlass::purge_previous_version has been
+    // called on all alive classes. See the comment in ClassLoaderDataGraph::clean_metaspaces.
+    cld->free_deallocate_list();
+  }
+}
+
 // CDS support
 
 // Global metaspaces for writing information to the shared archive.  When
@@ -780,6 +876,60 @@
   return _rw_metaspace;
 }
 
+ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic()
+    : _next_klass(NULL) {
+  ClassLoaderData* cld = ClassLoaderDataGraph::_head;
+  Klass* klass = NULL;
+
+  // Find the first klass in the CLDG.
+  while (cld != NULL) {
+    klass = cld->_klasses;
+    if (klass != NULL) {
+      _next_klass = klass;
+      return;
+    }
+    cld = cld->next();
+  }
+}
+
+Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass) {
+  Klass* next = klass->next_link();
+  if (next != NULL) {
+    return next;
+  }
+
+  // No more klasses in the current CLD. Time to find a new CLD.
+  ClassLoaderData* cld = klass->class_loader_data();
+  while (next == NULL) {
+    cld = cld->next();
+    if (cld == NULL) {
+      break;
+    }
+    next = cld->_klasses;
+  }
+
+  return next;
+}
+
+Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
+  Klass* head = (Klass*)_next_klass;
+
+  while (head != NULL) {
+    Klass* next = next_klass_in_cldg(head);
+
+    Klass* old_head = (Klass*)Atomic::cmpxchg_ptr(next, &_next_klass, head);
+
+    if (old_head == head) {
+      return head; // Won the CAS.
+    }
+
+    head = old_head;
+  }
+
+  // Nothing more for the iterator to hand out.
+  assert(head == NULL, err_msg("head is " PTR_FORMAT ", expected not null:", p2i(head)));
+  return NULL;
+}
 
 ClassLoaderDataGraphMetaspaceIterator::ClassLoaderDataGraphMetaspaceIterator() {
   _data = ClassLoaderDataGraph::_head;
@@ -833,4 +983,4 @@
   event.commit();
 }
 
-#endif /* INCLUDE_TRACE */
+#endif // INCLUDE_TRACE
--- a/src/share/vm/classfile/classLoaderData.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/classLoaderData.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -31,9 +31,9 @@
 #include "memory/metaspaceCounters.hpp"
 #include "runtime/mutex.hpp"
 #include "utilities/growableArray.hpp"
-
+#include "utilities/macros.hpp"
 #if INCLUDE_TRACE
-# include "utilities/ticks.hpp"
+#include "utilities/ticks.hpp"
 #endif
 
 //
@@ -59,6 +59,7 @@
 class ClassLoaderDataGraph : public AllStatic {
   friend class ClassLoaderData;
   friend class ClassLoaderDataGraphMetaspaceIterator;
+  friend class ClassLoaderDataGraphKlassIteratorAtomic;
   friend class VMStructs;
  private:
   // All CLDs (except the null CLD) can be reached by walking _head->_next->...
@@ -71,18 +72,26 @@
 
   static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
   static void post_class_unload_events(void);
+  static void clean_metaspaces();
  public:
   static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
   static void purge();
   static void clear_claimed_marks();
+  // oops do
   static void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
+  static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
   static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
-  static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
+  // cld do
+  static void cld_do(CLDClosure* cl);
+  static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
+  static void keep_alive_cld_do(CLDClosure* cl);
+  static void always_strong_cld_do(CLDClosure* cl);
+  // klass do
   static void classes_do(KlassClosure* klass_closure);
   static void classes_do(void f(Klass* const));
   static void loaded_classes_do(KlassClosure* klass_closure);
   static void classes_unloading_do(void f(Klass* const));
-  static bool do_unloading(BoolObjectClosure* is_alive);
+  static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive);
 
   // CMS support.
   static void remember_new_clds(bool remember) { _saved_head = (remember ? _head : NULL); }
@@ -98,10 +107,13 @@
     }
   }
 
+  static void free_deallocate_lists();
+
   static void dump_on(outputStream * const out) PRODUCT_RETURN;
   static void dump() { dump_on(tty); }
   static void verify();
 
+  static bool unload_list_contains(const void* x);
 #ifndef PRODUCT
   static bool contains_loader_data(ClassLoaderData* loader_data);
 #endif
@@ -134,6 +146,7 @@
   };
 
   friend class ClassLoaderDataGraph;
+  friend class ClassLoaderDataGraphKlassIteratorAtomic;
   friend class ClassLoaderDataGraphMetaspaceIterator;
   friend class MetaDataFactory;
   friend class Method;
@@ -149,7 +162,7 @@
                            // classes in the class loader are allocated.
   Mutex* _metaspace_lock;  // Locks the metaspace for allocations and setup.
   bool _unloading;         // true if this class loader goes away
-  bool _keep_alive;        // if this CLD can be unloaded for anonymous loaders
+  bool _keep_alive;        // if this CLD is kept alive without a keep_alive_object().
   bool _is_anonymous;      // if this CLD is for an anonymous class
   volatile int _claimed;   // true if claimed, for example during GC traces.
                            // To avoid applying oop closure more than once.
@@ -195,7 +208,6 @@
 
   void unload();
   bool keep_alive() const       { return _keep_alive; }
-  bool is_alive(BoolObjectClosure* is_alive_closure) const;
   void classes_do(void f(Klass*));
   void loaded_classes_do(KlassClosure* klass_closure);
   void classes_do(void f(InstanceKlass*));
@@ -207,6 +219,9 @@
   MetaWord* allocate(size_t size);
 
  public:
+
+  bool is_alive(BoolObjectClosure* is_alive_closure) const;
+
   // Accessors
   Metaspace* metaspace_or_null() const     { return _metaspace; }
 
@@ -240,13 +255,16 @@
 
   oop class_loader() const      { return _class_loader; }
 
+  // The object the GC is using to keep this ClassLoaderData alive.
+  oop keep_alive_object() const;
+
   // Returns true if this class loader data is for a loader going away.
   bool is_unloading() const     {
     assert(!(is_the_null_class_loader_data() && _unloading), "The null class loader can never be unloaded");
     return _unloading;
   }
-  // Anonymous class loader data doesn't have anything to keep them from
-  // being unloaded during parsing the anonymous class.
+
+  // Used to make sure that this CLD is not unloaded.
   void set_keep_alive(bool value) { _keep_alive = value; }
 
   unsigned int identity_hash() {
@@ -287,6 +305,16 @@
   void initialize_shared_metaspaces();
 };
 
+// An iterator that distributes Klasses to parallel worker threads.
+class ClassLoaderDataGraphKlassIteratorAtomic : public StackObj {
+  volatile Klass* _next_klass;
+ public:
+  ClassLoaderDataGraphKlassIteratorAtomic();
+  Klass* next_klass();
+ private:
+  static Klass* next_klass_in_cldg(Klass* klass);
+};
+
 class ClassLoaderDataGraphMetaspaceIterator : public StackObj {
   ClassLoaderData* _data;
  public:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/classLoaderExt.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
+#define SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
+
+#include "classfile/classLoader.hpp"
+
+class ClassLoaderExt: public ClassLoader { // AllStatic
+public:
+
+  class Context {
+    const char* _file_name;
+  public:
+    Context(const char* class_name, const char* file_name, TRAPS) {
+      _file_name = file_name;
+    }
+
+    bool check(ClassFileStream* stream, const int classpath_index) {
+      return true;
+    }
+
+    bool should_verify(int classpath_index) {
+      return false;
+    }
+
+    instanceKlassHandle record_result(const int classpath_index,
+                                      ClassPathEntry* e, instanceKlassHandle result, TRAPS) {
+      if (ClassLoader::add_package(_file_name, classpath_index, THREAD)) {
+        if (DumpSharedSpaces) {
+          result->set_shared_classpath_index(classpath_index);
+        }
+        return result;
+      } else {
+        return instanceKlassHandle(); // NULL
+      }
+    }
+  };
+
+
+  static void add_class_path_entry(const char* path, bool check_for_duplicates,
+                                   ClassPathEntry* new_entry) {
+    ClassLoader::add_to_list(new_entry);
+  }
+  static void append_boot_classpath(ClassPathEntry* new_entry) {
+    ClassLoader::add_to_list(new_entry);
+  }
+  static void setup_search_paths() {}
+
+  static void init_lookup_cache(TRAPS) {}
+  static void copy_lookup_cache_to_archive(char** top, char* end) {}
+  static char* restore_lookup_cache_from_archive(char* buffer) {return buffer;}
+  static inline bool is_lookup_cache_enabled() {return false;}
+
+  static bool known_to_not_exist(JNIEnv *env, jobject loader, const char *classname, TRAPS) {return false;}
+  static jobjectArray get_lookup_cache_urls(JNIEnv *env, jobject loader, TRAPS) {return NULL;}
+  static jintArray get_lookup_cache(JNIEnv *env, jobject loader, const char *pkgname, TRAPS) {return NULL;}
+};
+
+#endif // SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
--- a/src/share/vm/classfile/dictionary.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/dictionary.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,6 +28,7 @@
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiRedefineClassesTrace.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/hashtable.inline.hpp"
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
@@ -129,15 +130,13 @@
 }
 
 
-bool Dictionary::do_unloading() {
+void Dictionary::do_unloading() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  bool class_was_unloaded = false;
-  int  index = 0; // Defined here for portability! Do not move
 
   // Remove unloadable entries and classes from system dictionary
   // The placeholder array has been handled in always_strong_oops_do.
   DictionaryEntry* probe = NULL;
-  for (index = 0; index < table_size(); index++) {
+  for (int index = 0; index < table_size(); index++) {
     for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
       probe = *p;
       Klass* e = probe->klass();
@@ -157,16 +156,8 @@
         // Do we need to delete this system dictionary entry?
         if (loader_data->is_unloading()) {
           // If the loader is not live this entry should always be
-          // removed (will never be looked up again). Note that this is
-          // not the same as unloading the referred class.
-          if (k_def_class_loader_data == loader_data) {
-            // This is the defining entry, so the referred class is about
-            // to be unloaded.
-            class_was_unloaded = true;
-          }
-          // Also remove this system dictionary entry.
+          // removed (will never be looked up again).
           purge_entry = true;
-
         } else {
           // The loader in this entry is alive. If the klass is dead,
           // (determined by checking the defining class loader)
@@ -195,9 +186,51 @@
       p = probe->next_addr();
     }
   }
-  return class_was_unloaded;
+}
+
+void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
+  // Skip the strong roots probe marking if the closures are the same.
+  if (strong == weak) {
+    oops_do(strong);
+    return;
+  }
+
+  for (int index = 0; index < table_size(); index++) {
+    for (DictionaryEntry *probe = bucket(index);
+                          probe != NULL;
+                          probe = probe->next()) {
+      Klass* e = probe->klass();
+      ClassLoaderData* loader_data = probe->loader_data();
+      if (is_strongly_reachable(loader_data, e)) {
+        probe->set_strongly_reachable();
+      }
+    }
+  }
+  _pd_cache_table->roots_oops_do(strong, weak);
 }
 
+void Dictionary::remove_classes_in_error_state() {
+  assert(DumpSharedSpaces, "supported only when dumping");
+  DictionaryEntry* probe = NULL;
+  for (int index = 0; index < table_size(); index++) {
+    for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
+      probe = *p;
+      InstanceKlass* ik = InstanceKlass::cast(probe->klass());
+      if (ik->is_in_error_state()) { // purge this entry
+        *p = probe->next();
+        if (probe == _current_class_entry) {
+          _current_class_entry = NULL;
+        }
+        free_entry(probe);
+        ResourceMark rm;
+        tty->print_cr("Preload Warning: Removed error class: %s", ik->external_name());
+        continue;
+      }
+
+      p = probe->next_addr();
+    }
+  }
+}
 
 void Dictionary::always_strong_oops_do(OopClosure* blk) {
   // Follow all system classes and temporary placeholders in dictionary; only
@@ -489,6 +522,23 @@
   }
 }
 
+void ProtectionDomainCacheTable::roots_oops_do(OopClosure* strong, OopClosure* weak) {
+  for (int index = 0; index < table_size(); index++) {
+    for (ProtectionDomainCacheEntry* probe = bucket(index);
+                                     probe != NULL;
+                                     probe = probe->next()) {
+      if (probe->is_strongly_reachable()) {
+        probe->reset_strongly_reachable();
+        probe->oops_do(strong);
+      } else {
+        if (weak != NULL) {
+          probe->oops_do(weak);
+        }
+      }
+    }
+  }
+}
+
 uint ProtectionDomainCacheTable::bucket_size() {
   return sizeof(ProtectionDomainCacheEntry);
 }
@@ -655,16 +705,17 @@
 
 
 // ----------------------------------------------------------------------------
-#ifndef PRODUCT
 
-void Dictionary::print() {
+void Dictionary::print(bool details) {
   ResourceMark rm;
   HandleMark   hm;
 
-  tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
-                 table_size(), number_of_entries());
-  tty->print_cr("^ indicates that initiating loader is different from "
-                "defining loader");
+  if (details) {
+    tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
+                   table_size(), number_of_entries());
+    tty->print_cr("^ indicates that initiating loader is different from "
+                  "defining loader");
+  }
 
   for (int index = 0; index < table_size(); index++) {
     for (DictionaryEntry* probe = bucket(index);
@@ -675,21 +726,28 @@
       ClassLoaderData* loader_data =  probe->loader_data();
       bool is_defining_class =
          (loader_data == InstanceKlass::cast(e)->class_loader_data());
-      tty->print("%s%s", is_defining_class ? " " : "^",
+      tty->print("%s%s", ((!details) || is_defining_class) ? " " : "^",
                    e->external_name());
 
+      if (details) {
         tty->print(", loader ");
-      loader_data->print_value();
+        if (loader_data != NULL) {
+          loader_data->print_value();
+        } else {
+          tty->print("NULL");
+        }
+      }
       tty->cr();
     }
   }
-  tty->cr();
-  _pd_cache_table->print();
+
+  if (details) {
+    tty->cr();
+    _pd_cache_table->print();
+  }
   tty->cr();
 }
 
-#endif
-
 void Dictionary::verify() {
   guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
 
--- a/src/share/vm/classfile/dictionary.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/dictionary.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -89,6 +89,7 @@
   // GC support
   void oops_do(OopClosure* f);
   void always_strong_oops_do(OopClosure* blk);
+  void roots_oops_do(OopClosure* strong, OopClosure* weak);
 
   void always_strong_classes_do(KlassClosure* closure);
 
@@ -99,6 +100,7 @@
   void methods_do(void f(Method*));
 
   void unlink(BoolObjectClosure* is_alive);
+  void remove_classes_in_error_state();
 
   // Classes loaded by the bootstrap loader are always strongly reachable.
   // If we're not doing class unloading, all classes are strongly reachable.
@@ -107,9 +109,8 @@
     return (loader_data->is_the_null_class_loader_data() || !ClassUnloading);
   }
 
-  // Unload (that is, break root links to) all unmarked classes and
-  // loaders.  Returns "true" iff something was unloaded.
-  bool do_unloading();
+  // Unload (that is, break root links to) all unmarked classes and loaders.
+  void do_unloading();
 
   // Protection domains
   Klass* find(int index, unsigned int hash, Symbol* name,
@@ -126,9 +127,7 @@
 
   ProtectionDomainCacheEntry* cache_get(oop protection_domain);
 
-#ifndef PRODUCT
-  void print();
-#endif
+  void print(bool details = true);
   void verify();
 };
 
@@ -218,6 +217,7 @@
   // GC support
   void oops_do(OopClosure* f);
   void always_strong_oops_do(OopClosure* f);
+  void roots_oops_do(OopClosure* strong, OopClosure* weak);
 
   static uint bucket_size();
 
--- a/src/share/vm/classfile/javaClasses.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -41,6 +41,7 @@
 #include "oops/method.hpp"
 #include "oops/symbol.hpp"
 #include "oops/typeArrayOop.hpp"
+#include "prims/jvmtiRedefineClassesTrace.hpp"
 #include "runtime/fieldDescriptor.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -463,12 +464,11 @@
   return true;
 }
 
-void java_lang_String::print(Handle java_string, outputStream* st) {
-  oop          obj    = java_string();
-  assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");
-  typeArrayOop value  = java_lang_String::value(obj);
-  int          offset = java_lang_String::offset(obj);
-  int          length = java_lang_String::length(obj);
+void java_lang_String::print(oop java_string, outputStream* st) {
+  assert(java_string->klass() == SystemDictionary::String_klass(), "must be java_string");
+  typeArrayOop value  = java_lang_String::value(java_string);
+  int          offset = java_lang_String::offset(java_string);
+  int          length = java_lang_String::length(java_string);
 
   int end = MIN2(length, 100);
   if (value == NULL) {
@@ -549,7 +549,7 @@
       }
     }
   }
-  create_mirror(k, Handle(NULL), CHECK);
+  create_mirror(k, Handle(NULL), Handle(NULL), CHECK);
 }
 
 void java_lang_Class::initialize_mirror_fields(KlassHandle k,
@@ -575,7 +575,8 @@
   InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, mirror, CHECK);
 }
 
-void java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) {
+void java_lang_Class::create_mirror(KlassHandle k, Handle class_loader,
+                                    Handle protection_domain, TRAPS) {
   assert(k->java_mirror() == NULL, "should only assign mirror once");
   // Use this moment of initialization to cache modifier_flags also,
   // to support Class.getModifiers().  Instance classes recalculate
@@ -630,6 +631,10 @@
       }
     }
 
+    // set the classLoader field in the java_lang_Class instance
+    assert(class_loader() == k->class_loader(), "should be same");
+    set_class_loader(mirror(), class_loader());
+
     // Setup indirection from klass->mirror last
     // after any exceptions can happen during allocations.
     if (!k.is_null()) {
@@ -691,6 +696,18 @@
 }
 
 
+void java_lang_Class::set_class_loader(oop java_class, oop loader) {
+  // jdk7 runs Queens in bootstrapping and jdk8-9 has no coordinated pushes yet.
+  if (_class_loader_offset != 0) {
+    java_class->obj_field_put(_class_loader_offset, loader);
+  }
+}
+
+oop java_lang_Class::class_loader(oop java_class) {
+  assert(_class_loader_offset != 0, "must be set");
+  return java_class->obj_field(_class_loader_offset);
+}
+
 oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
   // This should be improved by adding a field at the Java level or by
   // introducing a new VM klass (see comment in ClassFileParser)
@@ -850,6 +867,12 @@
   compute_optional_offset(classRedefinedCount_offset,
                           klass_oop, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
 
+  // Needs to be optional because the old build runs Queens during bootstrapping
+  // and jdk8-9 doesn't have coordinated pushes yet.
+  compute_optional_offset(_class_loader_offset,
+                 klass_oop, vmSymbols::classLoader_name(),
+                 vmSymbols::classloader_signature());
+
   CLASS_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
 }
 
@@ -2759,12 +2782,35 @@
   return (Metadata*)mname->address_field(_vmtarget_offset);
 }
 
+bool java_lang_invoke_MemberName::is_method(oop mname) {
+  assert(is_instance(mname), "must be MemberName");
+  return (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0;
+}
+
 #if INCLUDE_JVMTI
 // Can be executed on VM thread only
-void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Metadata* ref) {
-  assert((is_instance(mname) && (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0), "wrong type");
+void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Method* old_method,
+                                                  Method* new_method, bool* trace_name_printed) {
+  assert(is_method(mname), "wrong type");
   assert(Thread::current()->is_VM_thread(), "not VM thread");
-  mname->address_field_put(_vmtarget_offset, (address)ref);
+
+  Method* target = (Method*)mname->address_field(_vmtarget_offset);
+  if (target == old_method) {
+    mname->address_field_put(_vmtarget_offset, (address)new_method);
+
+    if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
+      if (!(*trace_name_printed)) {
+        // RC_TRACE_MESG macro has an embedded ResourceMark
+        RC_TRACE_MESG(("adjust: name=%s",
+                       old_method->method_holder()->external_name()));
+        *trace_name_printed = true;
+      }
+      // RC_TRACE macro has an embedded ResourceMark
+      RC_TRACE(0x00400000, ("MemberName method update: %s(%s)",
+                            new_method->name()->as_C_string(),
+                            new_method->signature()->as_C_string()));
+    }
+  }
 }
 #endif // INCLUDE_JVMTI
 
@@ -3089,6 +3135,7 @@
 int java_lang_Class::_array_klass_offset;
 int java_lang_Class::_oop_size_offset;
 int java_lang_Class::_static_oop_field_count_offset;
+int java_lang_Class::_class_loader_offset;
 int java_lang_Class::_protection_domain_offset;
 int java_lang_Class::_init_lock_offset;
 int java_lang_Class::_signers_offset;
--- a/src/share/vm/classfile/javaClasses.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/javaClasses.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -213,7 +213,7 @@
   }
 
   // Debugging
-  static void print(Handle java_string, outputStream* st);
+  static void print(oop java_string, outputStream* st);
   friend class JavaClasses;
 };
 
@@ -244,19 +244,23 @@
   static int _protection_domain_offset;
   static int _init_lock_offset;
   static int _signers_offset;
+  static int _class_loader_offset;
 
   static bool offsets_computed;
   static int classRedefinedCount_offset;
+
   static GrowableArray<Klass*>* _fixup_mirror_list;
 
   static void set_init_lock(oop java_class, oop init_lock);
   static void set_protection_domain(oop java_class, oop protection_domain);
+  static void set_class_loader(oop java_class, oop class_loader);
   static void initialize_mirror_fields(KlassHandle k, Handle mirror, Handle protection_domain, TRAPS);
  public:
   static void compute_offsets();
 
   // Instance creation
-  static void create_mirror(KlassHandle k, Handle protection_domain, TRAPS);
+  static void create_mirror(KlassHandle k, Handle class_loader,
+                            Handle protection_domain, TRAPS);
   static void fixup_mirror(KlassHandle k, TRAPS);
   static oop  create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
   // Conversion
@@ -294,6 +298,8 @@
   static objArrayOop  signers(oop java_class);
   static void set_signers(oop java_class, objArrayOop signers);
 
+  static oop class_loader(oop java_class);
+
   static int oop_size(oop java_class);
   static void set_oop_size(oop java_class, int size);
   static int static_oop_field_count(oop java_class);
@@ -1095,7 +1101,8 @@
   static Metadata*      vmtarget(oop mname);
   static void       set_vmtarget(oop mname, Metadata* target);
 #if INCLUDE_JVMTI
-  static void       adjust_vmtarget(oop mname, Metadata* target);
+  static void       adjust_vmtarget(oop mname, Method* old_method, Method* new_method,
+                                    bool* trace_name_printed);
 #endif // INCLUDE_JVMTI
 
   static intptr_t       vmindex(oop mname);
@@ -1109,6 +1116,8 @@
     return obj != NULL && is_subclass(obj->klass());
   }
 
+  static bool is_method(oop obj);
+
   // Relevant integer codes (keep these in synch. with MethodHandleNatives.Constants):
   enum {
     MN_IS_METHOD            = 0x00010000, // method (not constructor)
--- a/src/share/vm/classfile/metadataOnStackMark.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/metadataOnStackMark.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -31,24 +31,25 @@
 #include "runtime/synchronizer.hpp"
 #include "runtime/thread.hpp"
 #include "services/threadService.hpp"
-#include "utilities/growableArray.hpp"
-
+#include "utilities/chunkedList.hpp"
 
-// Keep track of marked on-stack metadata so it can be cleared.
-GrowableArray<Metadata*>* _marked_objects = NULL;
+volatile MetadataOnStackBuffer* MetadataOnStackMark::_used_buffers = NULL;
+volatile MetadataOnStackBuffer* MetadataOnStackMark::_free_buffers = NULL;
+
 NOT_PRODUCT(bool MetadataOnStackMark::_is_active = false;)
 
 // Walk metadata on the stack and mark it so that redefinition doesn't delete
 // it.  Class unloading also walks the previous versions and might try to
 // delete it, so this class is used by class unloading also.
-MetadataOnStackMark::MetadataOnStackMark() {
+MetadataOnStackMark::MetadataOnStackMark(bool visit_code_cache) {
   assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
+  assert(_used_buffers == NULL, "sanity check");
   NOT_PRODUCT(_is_active = true;)
-  if (_marked_objects == NULL) {
-    _marked_objects = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(1000, true);
+
+  Threads::metadata_do(Metadata::mark_on_stack);
+  if (visit_code_cache) {
+    CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
   }
-  Threads::metadata_do(Metadata::mark_on_stack);
-  CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
   CompileBroker::mark_on_stack();
   JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
   ThreadService::metadata_do(Metadata::mark_on_stack);
@@ -59,15 +60,93 @@
   // Unmark everything that was marked.   Can't do the same walk because
   // redefine classes messes up the code cache so the set of methods
   // might not be the same.
-  for (int i = 0; i< _marked_objects->length(); i++) {
-    _marked_objects->at(i)->set_on_stack(false);
+
+  retire_buffer_for_thread(Thread::current());
+
+  MetadataOnStackBuffer* buffer = const_cast<MetadataOnStackBuffer* >(_used_buffers);
+  while (buffer != NULL) {
+    // Clear on stack state for all metadata.
+    size_t size = buffer->size();
+    for (size_t i  = 0; i < size; i++) {
+      Metadata* md = buffer->at(i);
+      md->set_on_stack(false);
+    }
+
+    MetadataOnStackBuffer* next = buffer->next_used();
+
+    // Move the buffer to the free list.
+    buffer->clear();
+    buffer->set_next_used(NULL);
+    buffer->set_next_free(const_cast<MetadataOnStackBuffer*>(_free_buffers));
+    _free_buffers = buffer;
+
+    // Step to next used buffer.
+    buffer = next;
   }
-  _marked_objects->clear();   // reuse growable array for next time.
+
+  _used_buffers = NULL;
+
   NOT_PRODUCT(_is_active = false;)
 }
 
+void MetadataOnStackMark::retire_buffer(MetadataOnStackBuffer* buffer) {
+  if (buffer == NULL) {
+    return;
+  }
+
+  MetadataOnStackBuffer* old_head;
+
+  do {
+    old_head = const_cast<MetadataOnStackBuffer*>(_used_buffers);
+    buffer->set_next_used(old_head);
+  } while (Atomic::cmpxchg_ptr(buffer, &_used_buffers, old_head) != old_head);
+}
+
+void MetadataOnStackMark::retire_buffer_for_thread(Thread* thread) {
+  retire_buffer(thread->metadata_on_stack_buffer());
+  thread->set_metadata_on_stack_buffer(NULL);
+}
+
+bool MetadataOnStackMark::has_buffer_for_thread(Thread* thread) {
+  return thread->metadata_on_stack_buffer() != NULL;
+}
+
+MetadataOnStackBuffer* MetadataOnStackMark::allocate_buffer() {
+  MetadataOnStackBuffer* allocated;
+  MetadataOnStackBuffer* new_head;
+
+  do {
+    allocated = const_cast<MetadataOnStackBuffer*>(_free_buffers);
+    if (allocated == NULL) {
+      break;
+    }
+    new_head = allocated->next_free();
+  } while (Atomic::cmpxchg_ptr(new_head, &_free_buffers, allocated) != allocated);
+
+  if (allocated == NULL) {
+    allocated = new MetadataOnStackBuffer();
+  }
+
+  assert(!allocated->is_full(), err_msg("Should not be full: " PTR_FORMAT, p2i(allocated)));
+
+  return allocated;
+}
+
 // Record which objects are marked so we can unmark the same objects.
-void MetadataOnStackMark::record(Metadata* m) {
+void MetadataOnStackMark::record(Metadata* m, Thread* thread) {
   assert(_is_active, "metadata on stack marking is active");
-  _marked_objects->push(m);
+
+  MetadataOnStackBuffer* buffer =  thread->metadata_on_stack_buffer();
+
+  if (buffer != NULL && buffer->is_full()) {
+    retire_buffer(buffer);
+    buffer = NULL;
+  }
+
+  if (buffer == NULL) {
+    buffer = allocate_buffer();
+    thread->set_metadata_on_stack_buffer(buffer);
+  }
+
+  buffer->push(m);
 }
--- a/src/share/vm/classfile/metadataOnStackMark.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/metadataOnStackMark.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,9 +26,12 @@
 #define SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP
 
 #include "memory/allocation.hpp"
+#include "utilities/chunkedList.hpp"
 
 class Metadata;
 
+typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
+
 // Helper class to mark and unmark metadata used on the stack as either handles
 // or executing methods, so that it can't be deleted during class redefinition
 // and class unloading.
@@ -36,10 +39,20 @@
 // metadata during parsing, relocated methods, and methods in backtraces.
 class MetadataOnStackMark : public StackObj {
   NOT_PRODUCT(static bool _is_active;)
+
+  static volatile MetadataOnStackBuffer* _used_buffers;
+  static volatile MetadataOnStackBuffer* _free_buffers;
+
+  static MetadataOnStackBuffer* allocate_buffer();
+  static void retire_buffer(MetadataOnStackBuffer* buffer);
+
  public:
-  MetadataOnStackMark();
-  ~MetadataOnStackMark();
-  static void record(Metadata* m);
+  MetadataOnStackMark(bool visit_code_cache);
+   ~MetadataOnStackMark();
+
+  static void record(Metadata* m, Thread* thread);
+  static void retire_buffer_for_thread(Thread* thread);
+  static bool has_buffer_for_thread(Thread* thread);
 };
 
 #endif // SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/sharedClassUtil.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
+#define SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
+
+#include "classfile/sharedPathsMiscInfo.hpp"
+#include "memory/filemap.hpp"
+
+class SharedClassUtil : AllStatic {
+public:
+
+  static SharedPathsMiscInfo* allocate_shared_paths_misc_info() {
+    return new SharedPathsMiscInfo();
+  }
+
+  static SharedPathsMiscInfo* allocate_shared_paths_misc_info(char* buf, int size) {
+    return new SharedPathsMiscInfo(buf, size);
+  }
+
+  static FileMapInfo::FileMapHeader* allocate_file_map_header() {
+    return new FileMapInfo::FileMapHeader();
+  }
+
+  static size_t file_map_header_size() {
+    return sizeof(FileMapInfo::FileMapHeader);
+  }
+
+  static size_t shared_class_path_entry_size() {
+    return sizeof(SharedClassPathEntry);
+  }
+
+  static void update_shared_classpath(ClassPathEntry *cpe,
+                                      SharedClassPathEntry* ent,
+                                      time_t timestamp,
+                                      long filesize, TRAPS) {
+    ent->_timestamp = timestamp;
+    ent->_filesize  = filesize;
+  }
+
+  static void initialize(TRAPS) {}
+
+  inline static bool is_shared_boot_class(Klass* klass) {
+    return (klass->_shared_class_path_index >= 0);
+  }
+};
+
+#endif // SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/sharedPathsMiscInfo.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoader.hpp"
+#include "classfile/classLoaderData.inline.hpp"
+#include "classfile/sharedPathsMiscInfo.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/metaspaceShared.hpp"
+#include "runtime/arguments.hpp"
+
+void SharedPathsMiscInfo::add_path(const char* path, int type) {
+  if (TraceClassPaths) {
+    tty->print("[type=%s] ", type_name(type));
+    trace_class_path("[Add misc shared path ", path);
+  }
+  write(path, strlen(path) + 1);
+  write_jint(jint(type));
+}
+
+void SharedPathsMiscInfo::ensure_size(size_t needed_bytes) {
+  assert(_allocated, "cannot modify buffer during validation.");
+  int used = get_used_bytes();
+  int target = used + int(needed_bytes);
+  if (target > _buf_size) {
+    _buf_size = _buf_size * 2 + (int)needed_bytes;
+    _buf_start = REALLOC_C_HEAP_ARRAY(char, _buf_start, _buf_size, mtClass);
+    _cur_ptr = _buf_start + used;
+    _end_ptr = _buf_start + _buf_size;
+  }
+}
+
+void SharedPathsMiscInfo::write(const void* ptr, size_t size) {
+  ensure_size(size);
+  memcpy(_cur_ptr, ptr, size);
+  _cur_ptr += size;
+}
+
+bool SharedPathsMiscInfo::read(void* ptr, size_t size) {
+  if (_cur_ptr + size <= _end_ptr) {
+    memcpy(ptr, _cur_ptr, size);
+    _cur_ptr += size;
+    return true;
+  }
+  return false;
+}
+
+bool SharedPathsMiscInfo::fail(const char* msg, const char* name) {
+  ClassLoader::trace_class_path(msg, name);
+  MetaspaceShared::set_archive_loading_failed();
+  return false;
+}
+
+bool SharedPathsMiscInfo::check() {
+  // The whole buffer must be 0 terminated so that we can use strlen and strcmp
+  // without fear.
+  _end_ptr -= sizeof(jint);
+  if (_cur_ptr >= _end_ptr) {
+    return fail("Truncated archive file header");
+  }
+  if (*_end_ptr != 0) {
+    return fail("Corrupted archive file header");
+  }
+
+  while (_cur_ptr < _end_ptr) {
+    jint type;
+    const char* path = _cur_ptr;
+    _cur_ptr += strlen(path) + 1;
+    if (!read_jint(&type)) {
+      return fail("Corrupted archive file header");
+    }
+    if (TraceClassPaths) {
+      tty->print("[type=%s ", type_name(type));
+      print_path(tty, type, path);
+      tty->print_cr("]");
+    }
+    if (!check(type, path)) {
+      if (!PrintSharedArchiveAndExit) {
+        return false;
+      }
+    } else {
+      trace_class_path("[ok");
+    }
+  }
+
+  return true;
+}
+
+bool SharedPathsMiscInfo::check(jint type, const char* path) {
+  switch (type) {
+  case BOOT:
+    if (strcmp(path, Arguments::get_sysclasspath()) != 0) {
+      return fail("[BOOT classpath mismatch, actual: -Dsun.boot.class.path=", Arguments::get_sysclasspath());
+    }
+    break;
+  case NON_EXIST: // fall-through
+  case REQUIRED:
+    {
+      struct stat st;
+      if (os::stat(path, &st) != 0) {
+        // The file does not actually exist
+        if (type == REQUIRED) {
+          // but we require it to exist -> fail
+          return fail("Required file doesn't exist");
+        }
+      } else {
+        // The file actually exists
+        if (type == NON_EXIST) {
+          // But we want it to not exist -> fail
+          return fail("File must not exist");
+        }
+        time_t    timestamp;
+        long      filesize;
+
+        if (!read_time(&timestamp) || !read_long(&filesize)) {
+          return fail("Corrupted archive file header");
+        }
+        if (timestamp != st.st_mtime) {
+          return fail("Timestamp mismatch");
+        }
+        if (filesize != st.st_size) {
+          return fail("File size mismatch");
+        }
+      }
+    }
+    break;
+
+  default:
+    return fail("Corrupted archive file header");
+  }
+
+  return true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/sharedPathsMiscInfo.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
+#define SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
+
+#include "runtime/os.hpp"
+
+// During dumping time, when processing class paths, we build up the dump-time
+// classpath. The JAR files that exist are stored in the list ClassLoader::_first_entry.
+// However, we need to store other "misc" information for run-time checking, such as
+//
+// + The values of Arguments::get_sysclasspath() used during dumping.
+//
+// + The meta-index file(s) used during dumping (incl modification time and size)
+//
+// + The class path elements specified during dumping but did not exist --
+//   these elements must also be specified at run time, and they also must not
+//   exist at run time.
+//
+// These misc items are stored in a linear buffer in SharedPathsMiscInfo.
+// The storage format is stream oriented to minimize its size.
+//
+// When writing the information to the archive file, SharedPathsMiscInfo is stored in
+// the archive file header. At run-time, this information is used only during initialization
+// (accessed using read() instead of mmap()), and is deallocated afterwards to save space.
+//
+// The SharedPathsMiscInfo class is used for both creating the the information (during
+// dumping time) and validation (at run time). Different constructors are used in the
+// two situations. See below.
+
+class SharedPathsMiscInfo : public CHeapObj<mtClass> {
+protected:
+  char* _buf_start;
+  char* _cur_ptr;
+  char* _end_ptr;
+  int   _buf_size;
+  bool  _allocated;   // was _buf_start allocated by me?
+  void ensure_size(size_t needed_bytes);
+  void add_path(const char* path, int type);
+
+  void write(const void* ptr, size_t size);
+  bool read(void* ptr, size_t size);
+
+  static void trace_class_path(const char* msg, const char* name = NULL) {
+    ClassLoader::trace_class_path(msg, name);
+  }
+protected:
+  static bool fail(const char* msg, const char* name = NULL);
+  virtual bool check(jint type, const char* path);
+
+public:
+  enum {
+    INITIAL_BUF_SIZE = 128
+  };
+  // This constructor is used when creating the misc information (during dump)
+  SharedPathsMiscInfo() {
+    _buf_size = INITIAL_BUF_SIZE;
+    _cur_ptr = _buf_start = NEW_C_HEAP_ARRAY(char, _buf_size, mtClass);
+    _allocated = true;
+  }
+  // This constructor is used when validating the misc info (during run time)
+  SharedPathsMiscInfo(char *buff, int size) {
+    _cur_ptr = _buf_start = buff;
+    _end_ptr = _buf_start + size;
+    _buf_size = size;
+    _allocated = false;
+  }
+  ~SharedPathsMiscInfo() {
+    if (_allocated) {
+      FREE_C_HEAP_ARRAY(char, _buf_start, mtClass);
+    }
+  }
+  int get_used_bytes() {
+    return _cur_ptr - _buf_start;
+  }
+  void* buffer() {
+    return _buf_start;
+  }
+
+  // writing --
+
+  // The path must not exist at run-time
+  void add_nonexist_path(const char* path) {
+    add_path(path, NON_EXIST);
+  }
+
+  // The path must exist and have required size and modification time
+  void add_required_file(const char* path) {
+    add_path(path, REQUIRED);
+
+    struct stat st;
+    if (os::stat(path, &st) != 0) {
+      assert(0, "sanity");
+      ClassLoader::exit_with_path_failure("failed to os::stat(%s)", path); // should not happen
+    }
+    write_time(st.st_mtime);
+    write_long(st.st_size);
+  }
+
+  // The path must exist, and must contain exactly <num_entries> files/dirs
+  void add_boot_classpath(const char* path) {
+    add_path(path, BOOT);
+  }
+  int write_jint(jint num) {
+    write(&num, sizeof(num));
+    return 0;
+  }
+  void write_time(time_t t) {
+    write(&t, sizeof(t));
+  }
+  void write_long(long l) {
+    write(&l, sizeof(l));
+  }
+
+  bool dump_to_file(int fd) {
+    int n = get_used_bytes();
+    return (os::write(fd, _buf_start, n) == (size_t)n);
+  }
+
+  // reading --
+
+  enum {
+    BOOT      = 1,
+    NON_EXIST = 2,
+    REQUIRED  = 3
+  };
+
+  virtual const char* type_name(int type) {
+    switch (type) {
+    case BOOT:      return "BOOT";
+    case NON_EXIST: return "NON_EXIST";
+    case REQUIRED:  return "REQUIRED";
+    default:        ShouldNotReachHere(); return "?";
+    }
+  }
+
+  virtual void print_path(outputStream* out, int type, const char* path) {
+    switch (type) {
+    case BOOT:
+      out->print("Expecting -Dsun.boot.class.path=%s", path);
+      break;
+    case NON_EXIST:
+      out->print("Expecting that %s does not exist", path);
+      break;
+    case REQUIRED:
+      out->print("Expecting that file %s must exist and is not altered", path);
+      break;
+    default:
+      ShouldNotReachHere();
+    }
+  }
+
+  bool check();
+  bool read_jint(jint *ptr) {
+    return read(ptr, sizeof(jint));
+  }
+  bool read_long(long *ptr) {
+    return read(ptr, sizeof(long));
+  }
+  bool read_time(time_t *ptr) {
+    return read(ptr, sizeof(time_t));
+  }
+};
+
+#endif // SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
--- a/src/share/vm/classfile/symbolTable.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/symbolTable.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -36,6 +36,7 @@
 #include "runtime/mutexLocker.hpp"
 #include "utilities/hashtable.inline.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/g1StringDedup.hpp"
 #endif
 
@@ -73,9 +74,9 @@
 void SymbolTable::initialize_symbols(int arena_alloc_size) {
   // Initialize the arena for global symbols, size passed in depends on CDS.
   if (arena_alloc_size == 0) {
-    _arena = new (mtSymbol) Arena();
+    _arena = new (mtSymbol) Arena(mtSymbol);
   } else {
-    _arena = new (mtSymbol) Arena(arena_alloc_size);
+    _arena = new (mtSymbol) Arena(mtSymbol, arena_alloc_size);
   }
 }
 
@@ -204,7 +205,7 @@
     }
   }
   // If the bucket size is too deep check if this hash code is insufficient.
-  if (count >= BasicHashtable<mtSymbol>::rehash_count && !needs_rehashing()) {
+  if (count >= rehash_count && !needs_rehashing()) {
     _needs_rehashing = check_rehash_table(count);
   }
   return NULL;
@@ -655,7 +656,7 @@
     }
   }
   // If the bucket size is too deep check if this hash code is insufficient.
-  if (count >= BasicHashtable<mtSymbol>::rehash_count && !needs_rehashing()) {
+  if (count >= rehash_count && !needs_rehashing()) {
     _needs_rehashing = check_rehash_table(count);
   }
   return NULL;
@@ -704,11 +705,26 @@
   return lookup(chars, length);
 }
 
+// Tell the GC that this string was looked up in the StringTable.
+static void ensure_string_alive(oop string) {
+  // A lookup in the StringTable could return an object that was previously
+  // considered dead. The SATB part of G1 needs to get notified about this
+  // potential resurrection, otherwise the marking might not find the object.
+#if INCLUDE_ALL_GCS
+  if (UseG1GC && string != NULL) {
+    G1SATBCardTableModRefBS::enqueue(string);
+  }
+#endif
+}
 
 oop StringTable::lookup(jchar* name, int len) {
   unsigned int hash = hash_string(name, len);
   int index = the_table()->hash_to_index(hash);
-  return the_table()->lookup(index, name, len, hash);
+  oop string = the_table()->lookup(index, name, len, hash);
+
+  ensure_string_alive(string);
+
+  return string;
 }
 
 
@@ -719,7 +735,10 @@
   oop found_string = the_table()->lookup(index, name, len, hashValue);
 
   // Found
-  if (found_string != NULL) return found_string;
+  if (found_string != NULL) {
+    ensure_string_alive(found_string);
+    return found_string;
+  }
 
   debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
   assert(!Universe::heap()->is_in_reserved(name),
@@ -744,11 +763,17 @@
 
   // Grab the StringTable_lock before getting the_table() because it could
   // change at safepoint.
-  MutexLocker ml(StringTable_lock, THREAD);
+  oop added_or_found;
+  {
+    MutexLocker ml(StringTable_lock, THREAD);
+    // Otherwise, add to symbol to table
+    added_or_found = the_table()->basic_add(index, string, name, len,
+                                  hashValue, CHECK_NULL);
+  }
 
-  // Otherwise, add to symbol to table
-  return the_table()->basic_add(index, string, name, len,
-                                hashValue, CHECK_NULL);
+  ensure_string_alive(added_or_found);
+
+  return added_or_found;
 }
 
 oop StringTable::intern(Symbol* symbol, TRAPS) {
--- a/src/share/vm/classfile/symbolTable.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/symbolTable.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -74,7 +74,7 @@
   operator Symbol*()                             { return _temp; }
 };
 
-class SymbolTable : public Hashtable<Symbol*, mtSymbol> {
+class SymbolTable : public RehashableHashtable<Symbol*, mtSymbol> {
   friend class VMStructs;
   friend class ClassFileParser;
 
@@ -110,10 +110,10 @@
   Symbol* lookup(int index, const char* name, int len, unsigned int hash);
 
   SymbolTable()
-    : Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
+    : RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
 
   SymbolTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
-    : Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
+    : RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
                 number_of_entries) {}
 
   // Arena for permanent symbols (null class loader) that are never unloaded
@@ -252,7 +252,7 @@
   static int parallel_claimed_index()        { return _parallel_claimed_idx; }
 };
 
-class StringTable : public Hashtable<oop, mtSymbol> {
+class StringTable : public RehashableHashtable<oop, mtSymbol> {
   friend class VMStructs;
 
 private:
@@ -278,11 +278,11 @@
   // in the range [start_idx, end_idx).
   static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed);
 
-  StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize,
+  StringTable() : RehashableHashtable<oop, mtSymbol>((int)StringTableSize,
                               sizeof (HashtableEntry<oop, mtSymbol>)) {}
 
   StringTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
-    : Hashtable<oop, mtSymbol>((int)StringTableSize, sizeof (HashtableEntry<oop, mtSymbol>), t,
+    : RehashableHashtable<oop, mtSymbol>((int)StringTableSize, sizeof (HashtableEntry<oop, mtSymbol>), t,
                      number_of_entries) {}
 public:
   // The string table
--- a/src/share/vm/classfile/systemDictionary.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/systemDictionary.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -30,10 +30,15 @@
 #include "classfile/placeholders.hpp"
 #include "classfile/resolutionErrors.hpp"
 #include "classfile/systemDictionary.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#endif
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/interpreter.hpp"
+#include "memory/filemap.hpp"
 #include "memory/gcLocker.hpp"
 #include "memory/oopFactory.hpp"
 #include "oops/instanceKlass.hpp"
@@ -46,12 +51,14 @@
 #include "oops/typeArrayKlass.hpp"
 #include "prims/jvmtiEnvBase.hpp"
 #include "prims/methodHandles.hpp"
+#include "runtime/arguments.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/fieldType.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/signature.hpp"
 #include "services/classLoadingService.hpp"
 #include "services/threadService.hpp"
@@ -60,9 +67,8 @@
 #ifdef GRAAL
 #include "graal/graalRuntime.hpp"
 #endif
-
 #if INCLUDE_TRACE
- #include "trace/tracing.hpp"
+#include "trace/tracing.hpp"
 #endif
 
 Dictionary*            SystemDictionary::_dictionary          = NULL;
@@ -123,6 +129,8 @@
                          CHECK);
 
   _java_system_loader = (oop)result.get_jobject();
+
+  CDS_ONLY(SystemDictionaryShared::initialize(CHECK);)
 }
 
 
@@ -988,6 +996,7 @@
     // as the host_klass
     assert(EnableInvokeDynamic, "");
     guarantee(host_klass->class_loader() == class_loader(), "should be the same");
+    guarantee(!DumpSharedSpaces, "must not create anonymous classes when dumping");
     loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader(), CHECK_NULL);
     loader_data->record_dependency(host_klass(), CHECK_NULL);
   } else {
@@ -1149,7 +1158,7 @@
   return k();
 }
 
-
+#if INCLUDE_CDS
 void SystemDictionary::set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
                                              int number_of_entries) {
   assert(length == _nof_buckets * sizeof(HashtableBucket<mtClass>),
@@ -1182,15 +1191,21 @@
 instanceKlassHandle SystemDictionary::load_shared_class(
                  Symbol* class_name, Handle class_loader, TRAPS) {
   instanceKlassHandle ik (THREAD, find_shared_class(class_name));
-  return load_shared_class(ik, class_loader, THREAD);
+  // Make sure we only return the boot class for the NULL classloader.
+  if (ik.not_null() &&
+      SharedClassUtil::is_shared_boot_class(ik()) && class_loader.is_null()) {
+    Handle protection_domain;
+    return load_shared_class(ik, class_loader, protection_domain, THREAD);
+  }
+  return instanceKlassHandle();
 }
 
-instanceKlassHandle SystemDictionary::load_shared_class(
-                 instanceKlassHandle ik, Handle class_loader, TRAPS) {
-  assert(class_loader.is_null(), "non-null classloader for shared class?");
+instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
+                                                        Handle class_loader,
+                                                        Handle protection_domain, TRAPS) {
   if (ik.not_null()) {
     instanceKlassHandle nh = instanceKlassHandle(); // null Handle
-    Symbol*  class_name = ik->name();
+    Symbol* class_name = ik->name();
 
     // Found the class, now load the superclass and interfaces.  If they
     // are shared, add them to the main system dictionary and reset
@@ -1199,7 +1214,7 @@
     if (ik->super() != NULL) {
       Symbol*  cn = ik->super()->name();
       resolve_super_or_fail(class_name, cn,
-                            class_loader, Handle(), true, CHECK_(nh));
+                            class_loader, protection_domain, true, CHECK_(nh));
     }
 
     Array<Klass*>* interfaces = ik->local_interfaces();
@@ -1212,7 +1227,7 @@
       // reinitialized yet (they will be once the interface classes
       // are loaded)
       Symbol*  name  = k->name();
-      resolve_super_or_fail(class_name, name, class_loader, Handle(), false, CHECK_(nh));
+      resolve_super_or_fail(class_name, name, class_loader, protection_domain, false, CHECK_(nh));
     }
 
     // Adjust methods to recover missing data.  They need addresses for
@@ -1221,30 +1236,45 @@
 
     // Updating methods must be done under a lock so multiple
     // threads don't update these in parallel
-    // Shared classes are all currently loaded by the bootstrap
-    // classloader, so this will never cause a deadlock on
-    // a custom class loader lock.
-
+    //
+    // Shared classes are all currently loaded by either the bootstrap or
+    // internal parallel class loaders, so this will never cause a deadlock
+    // on a custom class loader lock.
+
+    ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
     {
       Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
       check_loader_lock_contention(lockObject, THREAD);
       ObjectLocker ol(lockObject, THREAD, true);
-      ik->restore_unshareable_info(CHECK_(nh));
+      ik->restore_unshareable_info(loader_data, protection_domain, CHECK_(nh));
     }
 
     if (TraceClassLoading) {
       ResourceMark rm;
       tty->print("[Loaded %s", ik->external_name());
       tty->print(" from shared objects file");
+      if (class_loader.not_null()) {
+        tty->print(" by %s", loader_data->loader_name());
+      }
       tty->print_cr("]");
     }
+
+    if (DumpLoadedClassList != NULL && classlist_file->is_open()) {
+      // Only dump the classes that can be stored into CDS archive
+      if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
+        ResourceMark rm(THREAD);
+        classlist_file->print_cr("%s", ik->name()->as_C_string());
+        classlist_file->flush();
+      }
+    }
+
     // notify a class loaded from shared object
     ClassLoadingService::notify_class_loaded(InstanceKlass::cast(ik()),
                                              true /* shared class */);
   }
   return ik;
 }
-
+#endif // INCLUDE_CDS
 
 instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
   instanceKlassHandle nh = instanceKlassHandle(); // null Handle
@@ -1254,8 +1284,10 @@
     // shared spaces.
     instanceKlassHandle k;
     {
+#if INCLUDE_CDS
       PerfTraceTime vmtimer(ClassLoader::perf_shared_classload_time());
       k = load_shared_class(class_name, class_loader, THREAD);
+#endif
     }
 
     if (k.is_null()) {
@@ -1614,7 +1646,6 @@
   Universe::flush_dependents_on(k);
 }
 
-
 // ----------------------------------------------------------------------------
 // GC support
 
@@ -1627,16 +1658,7 @@
 // system dictionary and follows the remaining classes' contents.
 
 void SystemDictionary::always_strong_oops_do(OopClosure* blk) {
-  blk->do_oop(&_java_system_loader);
-  blk->do_oop(&_system_loader_lock_obj);
-#ifdef GRAAL
-  blk->do_oop(&_graal_loader);
-#endif
-
-  dictionary()->always_strong_oops_do(blk);
-
-  // Visit extra methods
-  invoke_method_table()->oops_do(blk);
+  roots_oops_do(blk, NULL);
 }
 
 void SystemDictionary::always_strong_classes_do(KlassClosure* closure) {
@@ -1683,12 +1705,11 @@
 
 // Assumes classes in the SystemDictionary are only unloaded at a safepoint
 // Note: anonymous classes are not in the SD.
-bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
+bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive, bool clean_alive) {
   // First, mark for unload all ClassLoaderData referencing a dead class loader.
-  bool has_dead_loaders = ClassLoaderDataGraph::do_unloading(is_alive);
-  bool unloading_occurred = false;
-  if (has_dead_loaders) {
-    unloading_occurred = dictionary()->do_unloading();
+  bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive, clean_alive);
+  if (unloading_occurred) {
+    dictionary()->do_unloading();
     constraints()->purge_loader_constraints();
     resolution_errors()->purge_resolution_errors();
   }
@@ -1703,12 +1724,24 @@
   return unloading_occurred;
 }
 
+void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
+  strong->do_oop(&_java_system_loader);
+  strong->do_oop(&_system_loader_lock_obj);
+  CDS_ONLY(SystemDictionaryShared::roots_oops_do(strong);)
+  GRAAL_ONLY(strong->do_oop(&_graal_loader);)
+
+  // Adjust dictionary
+  dictionary()->roots_oops_do(strong, weak);
+
+  // Visit extra methods
+  invoke_method_table()->oops_do(strong);
+}
+
 void SystemDictionary::oops_do(OopClosure* f) {
   f->do_oop(&_java_system_loader);
   f->do_oop(&_system_loader_lock_obj);
-#ifdef GRAAL
-  f->do_oop(&_graal_loader);
-#endif
+  CDS_ONLY(SystemDictionaryShared::oops_do(f);)
+  GRAAL_ONLY(f->do_oop(&_graal_loader);)
 
   // Adjust dictionary
   dictionary()->oops_do(f);
@@ -1770,6 +1803,10 @@
   invoke_method_table()->methods_do(f);
 }
 
+void SystemDictionary::remove_classes_in_error_state() {
+  dictionary()->remove_classes_in_error_state();
+}
+
 // ----------------------------------------------------------------------------
 // Lazily load klasses
 
@@ -1895,11 +1932,12 @@
   InstanceKlass::cast(WK_KLASS(Reference_klass))->set_reference_type(REF_OTHER);
   InstanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass));
 
-  initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(PhantomReference_klass), scan, CHECK);
+  initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(Cleaner_klass), scan, CHECK);
   InstanceKlass::cast(WK_KLASS(SoftReference_klass))->set_reference_type(REF_SOFT);
   InstanceKlass::cast(WK_KLASS(WeakReference_klass))->set_reference_type(REF_WEAK);
   InstanceKlass::cast(WK_KLASS(FinalReference_klass))->set_reference_type(REF_FINAL);
   InstanceKlass::cast(WK_KLASS(PhantomReference_klass))->set_reference_type(REF_PHANTOM);
+  InstanceKlass::cast(WK_KLASS(Cleaner_klass))->set_reference_type(REF_CLEANER);
 
   // JSR 292 classes
   WKID jsr292_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass);
@@ -2263,9 +2301,15 @@
     spe = NULL;
     // Must create lots of stuff here, but outside of the SystemDictionary lock.
     m = Method::make_method_handle_intrinsic(iid, signature, CHECK_(empty));
-    CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_highest_tier,
-                                  methodHandle(), CompileThreshold, "MH", CHECK_(empty));
-
+    if (!Arguments::is_interpreter_only()) {
+      // Generate a compiled form of the MH intrinsic.
+      AdapterHandlerLibrary::create_native_wrapper(m);
+      // Check if have the compiled code.
+      if (!m->has_compiled_code()) {
+        THROW_MSG_(vmSymbols::java_lang_VirtualMachineError(),
+                   "out of space in CodeCache for method handle intrinsic", empty);
+      }
+    }
     // Now grab the lock.  We might have to throw away the new method,
     // if a racing thread has managed to install one at the same time.
     {
@@ -2276,12 +2320,12 @@
       if (spe->method() == NULL)
         spe->set_method(m());
     }
-  } else if (spe->method()->code() == NULL) {
-    CompileBroker::compile_method(spe->method(), InvocationEntryBci, CompLevel_highest_tier,
-                                      methodHandle(), CompileThreshold, "MH", CHECK_(empty));
   }
 
-  guarantee(spe != NULL && spe->method() != NULL && (!UseCompiler || spe->method()->code() != NULL), "Could not compile a method handle intrinsic");
+  guarantee(spe != NULL && spe->method() != NULL, "");
+  guarantee(Arguments::is_interpreter_only() || (spe->method()->has_compiled_code() &&
+         spe->method()->code()->entry_point() == spe->method()->from_compiled_entry()),
+         "MH intrinsic invariant");
   return spe->method();
 }
 
@@ -2593,10 +2637,12 @@
 
 
 // ----------------------------------------------------------------------------
-#ifndef PRODUCT
-
-void SystemDictionary::print() {
-  dictionary()->print();
+void SystemDictionary::print_shared(bool details) {
+  shared_dictionary()->print(details);
+}
+
+void SystemDictionary::print(bool details) {
+  dictionary()->print(details);
 
   // Placeholders
   GCMutexLocker mu(SystemDictionary_lock);
@@ -2606,7 +2652,6 @@
   constraints()->print();
 }
 
-#endif
 
 void SystemDictionary::verify() {
   guarantee(dictionary() != NULL, "Verify of system dictionary failed");
@@ -2644,7 +2689,7 @@
                                       class_loader->klass() : (Klass*)NULL);
     event.commit();
   }
-#endif /* INCLUDE_TRACE */
+#endif // INCLUDE_TRACE
 }
 
 #ifndef PRODUCT
--- a/src/share/vm/classfile/systemDictionary.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/systemDictionary.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -111,6 +111,7 @@
   do_klass(SecurityManager_klass,                       java_lang_SecurityManager,                 Pre                 ) \
   do_klass(ProtectionDomain_klass,                      java_security_ProtectionDomain,            Pre                 ) \
   do_klass(AccessControlContext_klass,                  java_security_AccessControlContext,        Pre                 ) \
+  do_klass(SecureClassLoader_klass,                     java_security_SecureClassLoader,           Pre                 ) \
   do_klass(ClassNotFoundException_klass,                java_lang_ClassNotFoundException,          Pre                 ) \
   do_klass(NoClassDefFoundError_klass,                  java_lang_NoClassDefFoundError,            Pre                 ) \
   do_klass(LinkageError_klass,                          java_lang_LinkageError,                    Pre                 ) \
@@ -127,6 +128,7 @@
   do_klass(WeakReference_klass,                         java_lang_ref_WeakReference,               Pre                 ) \
   do_klass(FinalReference_klass,                        java_lang_ref_FinalReference,              Pre                 ) \
   do_klass(PhantomReference_klass,                      java_lang_ref_PhantomReference,            Pre                 ) \
+  do_klass(Cleaner_klass,                               sun_misc_Cleaner,                          Pre                 ) \
   do_klass(Finalizer_klass,                             java_lang_ref_Finalizer,                   Pre                 ) \
                                                                                                                          \
   do_klass(Thread_klass,                                java_lang_Thread,                          Pre                 ) \
@@ -167,6 +169,17 @@
   do_klass(StringBuilder_klass,                         java_lang_StringBuilder,                   Pre                 ) \
   do_klass(misc_Unsafe_klass,                           sun_misc_Unsafe,                           Pre                 ) \
                                                                                                                          \
+  /* support for CDS */                                                                                                  \
+  do_klass(ByteArrayInputStream_klass,                  java_io_ByteArrayInputStream,              Pre                 ) \
+  do_klass(File_klass,                                  java_io_File,                              Pre                 ) \
+  do_klass(URLClassLoader_klass,                        java_net_URLClassLoader,                   Pre                 ) \
+  do_klass(URL_klass,                                   java_net_URL,                              Pre                 ) \
+  do_klass(Jar_Manifest_klass,                          java_util_jar_Manifest,                    Pre                 ) \
+  do_klass(sun_misc_Launcher_klass,                     sun_misc_Launcher,                         Pre                 ) \
+  do_klass(sun_misc_Launcher_AppClassLoader_klass,      sun_misc_Launcher_AppClassLoader,          Pre                 ) \
+  do_klass(sun_misc_Launcher_ExtClassLoader_klass,      sun_misc_Launcher_ExtClassLoader,          Pre                 ) \
+  do_klass(CodeSource_klass,                            java_security_CodeSource,                  Pre                 ) \
+                                                                                                                         \
   /* It's NULL in non-1.4 JDKs. */                                                                                       \
   do_klass(StackTraceElement_klass,                     java_lang_StackTraceElement,               Opt                 ) \
   /* Universe::is_gte_jdk14x_version() is not set up by this point. */                                                   \
@@ -294,7 +307,7 @@
   static Klass* resolve_or_fail(Symbol* class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS);
   // Convenient call for null loader and protection domain.
   static Klass* resolve_or_fail(Symbol* class_name, bool throw_error, TRAPS);
-private:
+protected:
   // handle error translation for resolve_or_null results
   static Klass* handle_resolution_exception(Symbol* class_name, Handle class_loader, Handle protection_domain, bool throw_error, KlassHandle klass_h, TRAPS);
 
@@ -397,17 +410,21 @@
 
   // Unload (that is, break root links to) all unmarked classes and
   // loaders.  Returns "true" iff something was unloaded.
-  static bool do_unloading(BoolObjectClosure* is_alive);
+  static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive = true);
+
+  // Used by DumpSharedSpaces only to remove classes that failed verification
+  static void remove_classes_in_error_state();
 
   static int calculate_systemdictionary_size(int loadedclasses);
 
   // Applies "f->do_oop" to all root oops in the system dictionary.
   static void oops_do(OopClosure* f);
+  static void roots_oops_do(OopClosure* strong, OopClosure* weak);
 
   // System loader lock
   static oop system_loader_lock()           { return _system_loader_lock_obj; }
 
-private:
+protected:
   // Extended Redefine classes support (tbi)
   static void preloaded_classes_do(KlassClosure* f);
   static void lazily_loaded_classes_do(KlassClosure* f);
@@ -420,7 +437,8 @@
   static void set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
                                     int number_of_entries);
   // Printing
-  static void print()                   PRODUCT_RETURN;
+  static void print(bool details = true);
+  static void print_shared(bool details = true);
   static void print_class_statistics()  PRODUCT_RETURN;
   static void print_method_statistics() PRODUCT_RETURN;
 
@@ -509,7 +527,7 @@
 
   static void load_abstract_ownable_synchronizer_klass(TRAPS);
 
-private:
+protected:
   // Tells whether ClassLoader.loadClassInternal is present
   static bool has_loadClassInternal()       { return _has_loadClassInternal; }
 
@@ -545,7 +563,7 @@
 
   // Register a new class loader
   static ClassLoaderData* register_loader(Handle class_loader, TRAPS);
-private:
+protected:
   // Mirrors for primitive classes (created eagerly)
   static oop check_mirror(oop m) {
     assert(m != NULL, "mirror not initialized");
@@ -614,7 +632,7 @@
   static void delete_resolution_error(ConstantPool* pool);
   static Symbol* find_resolution_error(constantPoolHandle pool, int which);
 
- private:
+ protected:
 
   enum Constants {
     _loader_constraint_size = 107,                     // number of entries in constraint table
@@ -665,7 +683,7 @@
   friend class CounterDecay;
   static Klass* try_get_next_class();
 
-private:
+protected:
   static void validate_protection_domain(instanceKlassHandle klass,
                                          Handle class_loader,
                                          Handle protection_domain, TRAPS);
@@ -692,10 +710,10 @@
   static instanceKlassHandle find_or_define_instance_class(Symbol* class_name,
                                                 Handle class_loader,
                                                 instanceKlassHandle k, TRAPS);
-  static instanceKlassHandle load_shared_class(Symbol* class_name,
-                                               Handle class_loader, TRAPS);
   static instanceKlassHandle load_shared_class(instanceKlassHandle ik,
-                                               Handle class_loader, TRAPS);
+                                               Handle class_loader,
+                                               Handle protection_domain,
+                                               TRAPS);
   static instanceKlassHandle load_instance_class(Symbol* class_name, Handle class_loader, TRAPS);
   static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
   static void check_loader_lock_contention(Handle loader_lock, TRAPS);
@@ -703,9 +721,12 @@
   static bool is_parallelDefine(Handle class_loader);
 
 public:
+  static instanceKlassHandle load_shared_class(Symbol* class_name,
+                                               Handle class_loader,
+                                               TRAPS);
   static bool is_ext_class_loader(Handle class_loader);
 
-private:
+protected:
   static Klass* find_shared_class(Symbol* class_name);
 
   // Setup link to hierarchy
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/classfile/systemDictionaryShared.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+#ifndef SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
+#define SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
+
+#include "classfile/systemDictionary.hpp"
+
+class SystemDictionaryShared: public SystemDictionary {
+public:
+  static void initialize(TRAPS) {}
+  static instanceKlassHandle find_or_load_shared_class(Symbol* class_name,
+                                                       Handle class_loader,
+                                                       TRAPS) {
+    return instanceKlassHandle();
+  }
+  static void roots_oops_do(OopClosure* blk) {}
+  static void oops_do(OopClosure* f) {}
+  static bool is_sharing_possible(ClassLoaderData* loader_data) {
+    oop class_loader = loader_data->class_loader();
+    return (class_loader == NULL);
+  }
+};
+
+#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
--- a/src/share/vm/classfile/verifier.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/verifier.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -43,7 +43,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/javaCalls.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 #ifdef TARGET_ARCH_x86
 # include "bytes_x86.hpp"
@@ -1553,14 +1553,14 @@
         case Bytecodes::_invokespecial :
         case Bytecodes::_invokestatic :
           verify_invoke_instructions(
-            &bcs, code_length, &current_frame,
-            &this_uninit, return_type, cp, CHECK_VERIFY(this));
+            &bcs, code_length, &current_frame, (bci >= ex_min && bci < ex_max),
+            &this_uninit, return_type, cp, &stackmap_table, CHECK_VERIFY(this));
           no_control_flow = false; break;
         case Bytecodes::_invokeinterface :
         case Bytecodes::_invokedynamic :
           verify_invoke_instructions(
-            &bcs, code_length, &current_frame,
-            &this_uninit, return_type, cp, CHECK_VERIFY(this));
+            &bcs, code_length, &current_frame, (bci >= ex_min && bci < ex_max),
+            &this_uninit, return_type, cp, &stackmap_table, CHECK_VERIFY(this));
           no_control_flow = false; break;
         case Bytecodes::_new :
         {
@@ -2408,8 +2408,9 @@
 
 void ClassVerifier::verify_invoke_init(
     RawBytecodeStream* bcs, u2 ref_class_index, VerificationType ref_class_type,
-    StackMapFrame* current_frame, u4 code_length, bool *this_uninit,
-    constantPoolHandle cp, TRAPS) {
+    StackMapFrame* current_frame, u4 code_length, bool in_try_block,
+    bool *this_uninit, constantPoolHandle cp, StackMapTable* stackmap_table,
+    TRAPS) {
   u2 bci = bcs->bci();
   VerificationType type = current_frame->pop_stack(
     VerificationType::reference_check(), CHECK_VERIFY(this));
@@ -2425,28 +2426,36 @@
       return;
     }
 
-    // Check if this call is done from inside of a TRY block.  If so, make
-    // sure that all catch clause paths end in a throw.  Otherwise, this
-    // can result in returning an incomplete object.
-    ExceptionTable exhandlers(_method());
-    int exlength = exhandlers.length();
-    for(int i = 0; i < exlength; i++) {
-      u2 start_pc = exhandlers.start_pc(i);
-      u2 end_pc = exhandlers.end_pc(i);
+    // If this invokespecial call is done from inside of a TRY block then make
+    // sure that all catch clause paths end in a throw.  Otherwise, this can
+    // result in returning an incomplete object.
+    if (in_try_block) {
+      ExceptionTable exhandlers(_method());
+      int exlength = exhandlers.length();
+      for(int i = 0; i < exlength; i++) {
+        u2 start_pc = exhandlers.start_pc(i);
+        u2 end_pc = exhandlers.end_pc(i);
 
-      if (bci >= start_pc && bci < end_pc) {
-        if (!ends_in_athrow(exhandlers.handler_pc(i))) {
-          verify_error(ErrorContext::bad_code(bci),
-            "Bad <init> method call from after the start of a try block");
-          return;
-        } else if (VerboseVerification) {
-          ResourceMark rm;
-          tty->print_cr(
-            "Survived call to ends_in_athrow(): %s",
-                        current_class()->name()->as_C_string());
+        if (bci >= start_pc && bci < end_pc) {
+          if (!ends_in_athrow(exhandlers.handler_pc(i))) {
+            verify_error(ErrorContext::bad_code(bci),
+              "Bad <init> method call from after the start of a try block");
+            return;
+          } else if (VerboseVerification) {
+            ResourceMark rm;
+            tty->print_cr(
+              "Survived call to ends_in_athrow(): %s",
+              current_class()->name()->as_C_string());
+          }
         }
       }
-    }
+
+      // Check the exception handler target stackmaps with the locals from the
+      // incoming stackmap (before initialize_object() changes them to outgoing
+      // state).
+      verify_exception_handler_targets(bci, true, current_frame,
+                                       stackmap_table, CHECK_VERIFY(this));
+    } // in_try_block
 
     current_frame->initialize_object(type, current_type());
     *this_uninit = true;
@@ -2500,6 +2509,13 @@
         }
       }
     }
+    // Check the exception handler target stackmaps with the locals from the
+    // incoming stackmap (before initialize_object() changes them to outgoing
+    // state).
+    if (in_try_block) {
+      verify_exception_handler_targets(bci, *this_uninit, current_frame,
+                                       stackmap_table, CHECK_VERIFY(this));
+    }
     current_frame->initialize_object(type, new_class_type);
   } else {
     verify_error(ErrorContext::bad_type(bci, current_frame->stack_top_ctx()),
@@ -2528,8 +2544,8 @@
 
 void ClassVerifier::verify_invoke_instructions(
     RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
-    bool *this_uninit, VerificationType return_type,
-    constantPoolHandle cp, TRAPS) {
+    bool in_try_block, bool *this_uninit, VerificationType return_type,
+    constantPoolHandle cp, StackMapTable* stackmap_table, TRAPS) {
   // Make sure the constant pool item is the right type
   u2 index = bcs->get_index_u2();
   Bytecodes::Code opcode = bcs->raw_code();
@@ -2699,7 +2715,8 @@
       opcode != Bytecodes::_invokedynamic) {
     if (method_name == vmSymbols::object_initializer_name()) {  // <init> method
       verify_invoke_init(bcs, index, ref_class_type, current_frame,
-        code_length, this_uninit, cp, CHECK_VERIFY(this));
+        code_length, in_try_block, this_uninit, cp, stackmap_table,
+        CHECK_VERIFY(this));
     } else {   // other methods
       // Ensures that target class is assignable to method class.
       if (opcode == Bytecodes::_invokespecial) {
--- a/src/share/vm/classfile/verifier.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/verifier.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -301,8 +301,9 @@
 
   void verify_invoke_init(
     RawBytecodeStream* bcs, u2 ref_index, VerificationType ref_class_type,
-    StackMapFrame* current_frame, u4 code_length, bool* this_uninit,
-    constantPoolHandle cp, TRAPS);
+    StackMapFrame* current_frame, u4 code_length, bool in_try_block,
+    bool* this_uninit, constantPoolHandle cp, StackMapTable* stackmap_table,
+    TRAPS);
 
   // Used by ends_in_athrow() to push all handlers that contain bci onto
   // the handler_stack, if the handler is not already on the stack.
@@ -316,8 +317,8 @@
 
   void verify_invoke_instructions(
     RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
-    bool* this_uninit, VerificationType return_type,
-    constantPoolHandle cp, TRAPS);
+    bool in_try_block, bool* this_uninit, VerificationType return_type,
+    constantPoolHandle cp, StackMapTable* stackmap_table, TRAPS);
 
   VerificationType get_newarray_type(u2 index, u2 bci, TRAPS);
   void verify_anewarray(u2 bci, u2 index, constantPoolHandle cp,
--- a/src/share/vm/classfile/vmSymbols.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/classfile/vmSymbols.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -79,6 +79,7 @@
   template(java_lang_ref_WeakReference,               "java/lang/ref/WeakReference")              \
   template(java_lang_ref_FinalReference,              "java/lang/ref/FinalReference")             \
   template(java_lang_ref_PhantomReference,            "java/lang/ref/PhantomReference")           \
+  template(sun_misc_Cleaner,                          "sun/misc/Cleaner")                         \
   template(java_lang_ref_Finalizer,                   "java/lang/ref/Finalizer")                  \
   template(java_lang_reflect_AccessibleObject,        "java/lang/reflect/AccessibleObject")       \
   template(java_lang_reflect_Method,                  "java/lang/reflect/Method")                 \
@@ -91,11 +92,17 @@
   template(java_lang_CharSequence,                    "java/lang/CharSequence")                   \
   template(java_lang_SecurityManager,                 "java/lang/SecurityManager")                \
   template(java_security_AccessControlContext,        "java/security/AccessControlContext")       \
+  template(java_security_CodeSource,                  "java/security/CodeSource")                 \
   template(java_security_ProtectionDomain,            "java/security/ProtectionDomain")           \
+  template(java_security_SecureClassLoader,           "java/security/SecureClassLoader")          \
+  template(java_net_URLClassLoader,                   "java/net/URLClassLoader")                  \
+  template(java_net_URL,                              "java/net/URL")                             \
+  template(java_util_jar_Manifest,                    "java/util/jar/Manifest")                   \
   template(impliesCreateAccessControlContext_name,    "impliesCreateAccessControlContext")        \
   template(java_io_OutputStream,                      "java/io/OutputStream")                     \
   template(java_io_Reader,                            "java/io/Reader")                           \
   template(java_io_BufferedReader,                    "java/io/BufferedReader")                   \
+  template(java_io_File,                              "java/io/File")                             \
   template(java_io_FileInputStream,                   "java/io/FileInputStream")                  \
   template(java_io_ByteArrayInputStream,              "java/io/ByteArrayInputStream")             \
   template(java_io_Serializable,                      "java/io/Serializable")                     \
@@ -106,9 +113,11 @@
   template(java_util_Hashtable,                       "java/util/Hashtable")                      \
   template(java_lang_Compiler,                        "java/lang/Compiler")                       \
   template(sun_misc_Signal,                           "sun/misc/Signal")                          \
+  template(sun_misc_Launcher,                         "sun/misc/Launcher")                        \
   template(java_lang_AssertionStatusDirectives,       "java/lang/AssertionStatusDirectives")      \
   template(getBootClassPathEntryForClass_name,        "getBootClassPathEntryForClass")            \
   template(sun_misc_PostVMInitHook,                   "sun/misc/PostVMInitHook")                  \
+  template(sun_misc_Launcher_AppClassLoader,          "sun/misc/Launcher$AppClassLoader")         \
   template(sun_misc_Launcher_ExtClassLoader,          "sun/misc/Launcher$ExtClassLoader")         \
                                                                                                   \
   /* Java runtime version access */                                                               \
@@ -463,6 +472,14 @@
   template(signers_name,                              "signers_name")                             \
   template(loader_data_name,                          "loader_data")                              \
   template(dependencies_name,                         "dependencies")                             \
+  template(input_stream_void_signature,               "(Ljava/io/InputStream;)V")                 \
+  template(getFileURL_name,                           "getFileURL")                               \
+  template(getFileURL_signature,                      "(Ljava/io/File;)Ljava/net/URL;")           \
+  template(definePackageInternal_name,                "definePackageInternal")                    \
+  template(definePackageInternal_signature,           "(Ljava/lang/String;Ljava/util/jar/Manifest;Ljava/net/URL;)V") \
+  template(getProtectionDomain_name,                  "getProtectionDomain")                      \
+  template(getProtectionDomain_signature,             "(Ljava/security/CodeSource;)Ljava/security/ProtectionDomain;") \
+  template(url_code_signer_array_void_signature,      "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \
                                                                                                   \
   /* non-intrinsic name/signature pairs: */                                                       \
   template(register_method_name,                      "register")                                 \
@@ -639,6 +656,7 @@
   template(serializePropertiesToByteArray_signature,   "()[B")                                                    \
   template(serializeAgentPropertiesToByteArray_name,   "serializeAgentPropertiesToByteArray")                     \
   template(classRedefinedCount_name,                   "classRedefinedCount")                                     \
+  template(classLoader_name,                           "classLoader")                                             \
                                                                                                                   \
   /* trace signatures */                                                                                          \
   TRACE_TEMPLATES(template)                                                                                       \
@@ -838,6 +856,11 @@
    do_name(     encodeISOArray_name,                             "encodeISOArray")                                      \
    do_signature(encodeISOArray_signature,                        "([CI[BII)I")                                          \
                                                                                                                         \
+  do_class(java_math_BigInteger,                      "java/math/BigInteger")                                           \
+  do_intrinsic(_multiplyToLen,      java_math_BigInteger, multiplyToLen_name, multiplyToLen_signature, F_R)             \
+   do_name(     multiplyToLen_name,                             "multiplyToLen")                                        \
+   do_signature(multiplyToLen_signature,                        "([II[II[I)[I")                                         \
+                                                                                                                        \
   /* java/lang/ref/Reference */                                                                                         \
   do_intrinsic(_Reference_get,            java_lang_ref_Reference, get_name,    void_object_signature, F_R)             \
                                                                                                                         \
@@ -856,6 +879,26 @@
    do_name(     decrypt_name,                                      "decrypt")                                           \
    do_signature(byteArray_int_int_byteArray_int_signature,         "([BII[BI)I")                                        \
                                                                                                                         \
+  /* support for sun.security.provider.SHA */                                                                           \
+  do_class(sun_security_provider_sha,                              "sun/security/provider/SHA")                         \
+  do_intrinsic(_sha_implCompress, sun_security_provider_sha, implCompress_name, implCompress_signature, F_R)            \
+   do_name(     implCompress_name,                                 "implCompress")                                      \
+   do_signature(implCompress_signature,                            "([BI)V")                                            \
+                                                                                                                        \
+  /* support for sun.security.provider.SHA2 */                                                                          \
+  do_class(sun_security_provider_sha2,                             "sun/security/provider/SHA2")                        \
+  do_intrinsic(_sha2_implCompress, sun_security_provider_sha2, implCompress_name, implCompress_signature, F_R)          \
+                                                                                                                        \
+  /* support for sun.security.provider.SHA5 */                                                                          \
+  do_class(sun_security_provider_sha5,                             "sun/security/provider/SHA5")                        \
+  do_intrinsic(_sha5_implCompress, sun_security_provider_sha5, implCompress_name, implCompress_signature, F_R)          \
+                                                                                                                        \
+  /* support for sun.security.provider.DigestBase */                                                                    \
+  do_class(sun_security_provider_digestbase,                       "sun/security/provider/DigestBase")                  \
+  do_intrinsic(_digestBase_implCompressMB, sun_security_provider_digestbase, implCompressMB_name, implCompressMB_signature, F_R)   \
+   do_name(     implCompressMB_name,                               "implCompressMultiBlock")                            \
+   do_signature(implCompressMB_signature,                          "([BII)I")                                           \
+                                                                                                                        \
   /* support for java.util.zip */                                                                                       \
   do_class(java_util_zip_CRC32,           "java/util/zip/CRC32")                                                        \
   do_intrinsic(_updateCRC32,               java_util_zip_CRC32,   update_name, int2_int_signature,               F_SN)  \
--- a/src/share/vm/code/codeBlob.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/code/codeBlob.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -253,6 +253,7 @@
 
 void BufferBlob::free( BufferBlob *blob ) {
   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+  blob->flush();
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     CodeCache::free((CodeBlob*)blob);
--- a/src/share/vm/code/codeCache.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/code/codeCache.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -337,6 +337,11 @@
 // Walk the list of methods which might contain non-perm oops.
 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
   assert_locked_or_safepoint(CodeCache_lock);
+
+  if (UseG1GC) {
+    return;
+  }
+
   debug_only(mark_scavenge_root_nmethods());
 
   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@@ -362,6 +367,11 @@
 
 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
   assert_locked_or_safepoint(CodeCache_lock);
+
+  if (UseG1GC) {
+    return;
+  }
+
   nm->set_on_scavenge_root_list();
   nm->set_scavenge_root_link(_scavenge_root_nmethods);
   set_scavenge_root_nmethods(nm);
@@ -370,6 +380,11 @@
 
 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
   assert_locked_or_safepoint(CodeCache_lock);
+
+  if (UseG1GC) {
+    return;
+  }
+
   print_trace("drop_scavenge_root", nm);
   nmethod* last = NULL;
   nmethod* cur = scavenge_root_nmethods();
@@ -391,6 +406,11 @@
 
 void CodeCache::prune_scavenge_root_nmethods() {
   assert_locked_or_safepoint(CodeCache_lock);
+
+  if (UseG1GC) {
+    return;
+  }
+
   debug_only(mark_scavenge_root_nmethods());
 
   nmethod* last = NULL;
@@ -423,6 +443,10 @@
 
 #ifndef PRODUCT
 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
+  if (UseG1GC) {
+    return;
+  }
+
   // While we are here, verify the integrity of the list.
   mark_scavenge_root_nmethods();
   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@@ -463,9 +487,36 @@
 }
 #endif //PRODUCT
 
+void CodeCache::verify_clean_inline_caches() {
+#ifdef ASSERT
+  FOR_ALL_ALIVE_BLOBS(cb) {
+    if (cb->is_nmethod()) {
+      nmethod* nm = (nmethod*)cb;
+      assert(!nm->is_unloaded(), "Tautology");
+      nm->verify_clean_inline_caches();
+      nm->verify();
+    }
+  }
+#endif
+}
+
+void CodeCache::verify_icholder_relocations() {
+#ifdef ASSERT
+  // make sure that we aren't leaking icholders
+  int count = 0;
+  FOR_ALL_BLOBS(cb) {
+    if (cb->is_nmethod()) {
+      nmethod* nm = (nmethod*)cb;
+      count += nm->verify_icholder_relocations();
+    }
+  }
+
+  assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
+         CompiledICHolder::live_count(), "must agree");
+#endif
+}
 
 void CodeCache::gc_prologue() {
-  assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
 }
 
 void CodeCache::gc_epilogue() {
@@ -478,41 +529,15 @@
         nm->cleanup_inline_caches();
       }
       DEBUG_ONLY(nm->verify());
-      nm->fix_oop_relocations();
+      DEBUG_ONLY(nm->verify_oop_relocations());
     }
   }
   set_needs_cache_clean(false);
   prune_scavenge_root_nmethods();
-  assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
 
-#ifdef ASSERT
-  // make sure that we aren't leaking icholders
-  int count = 0;
-  FOR_ALL_BLOBS(cb) {
-    if (cb->is_nmethod()) {
-      RelocIterator iter((nmethod*)cb);
-      while(iter.next()) {
-        if (iter.type() == relocInfo::virtual_call_type) {
-          if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
-            CompiledIC *ic = CompiledIC_at(iter.reloc());
-            if (TraceCompiledIC) {
-              tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
-              ic->print();
-            }
-            assert(ic->cached_icholder() != NULL, "must be non-NULL");
-            count++;
-          }
-        }
-      }
-    }
-  }
-
-  assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
-         CompiledICHolder::live_count(), "must agree");
-#endif
+  verify_icholder_relocations();
 }
 
-
 void CodeCache::verify_oops() {
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   VerifyOopClosure voc;
@@ -687,7 +712,9 @@
 void CodeCache::mark_all_nmethods_for_deoptimization() {
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   FOR_ALL_ALIVE_NMETHODS(nm) {
-    nm->mark_for_deoptimization();
+    if (!nm->method()->is_method_handle_intrinsic()) {
+      nm->mark_for_deoptimization();
+    }
   }
 }
 
--- a/src/share/vm/code/codeCache.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/code/codeCache.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -134,10 +134,6 @@
   // to) any unmarked codeBlobs in the cache.  Sets "marked_for_unloading"
   // to "true" iff some code got unloaded.
   static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
-  static void oops_do(OopClosure* f) {
-    CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
-    blobs_do(&oopc);
-  }
   static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
   static void scavenge_root_nmethods_do(CodeBlobClosure* f);
 
@@ -172,6 +168,9 @@
   static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
   static void clear_inline_caches();             // clear all inline caches
 
+  static void verify_clean_inline_caches();
+  static void verify_icholder_relocations();
+
   // Deoptimization
   static int  mark_for_deoptimization(DepChange& changes);
 #ifdef HOTSWAP
--- a/src/share/vm/code/compiledIC.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/code/compiledIC.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -99,13 +99,13 @@
   }
 
   {
-  MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
+    MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
 #ifdef ASSERT
-  CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
-  assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
+    CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
+    assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
 #endif
-  _ic_call->set_destination_mt_safe(entry_point);
-}
+     _ic_call->set_destination_mt_safe(entry_point);
+  }
 
   if (is_optimized() || is_icstub) {
     // Optimized call sites don't have a cache value and ICStub call
@@ -159,6 +159,50 @@
 //-----------------------------------------------------------------------------
 // High-level access to an inline cache. Guaranteed to be MT-safe.
 
+void CompiledIC::initialize_from_iter(RelocIterator* iter) {
+  assert(iter->addr() == _ic_call->instruction_address(), "must find ic_call");
+
+  if (iter->type() == relocInfo::virtual_call_type) {
+    virtual_call_Relocation* r = iter->virtual_call_reloc();
+    _is_optimized = false;
+    _value = nativeMovConstReg_at(r->cached_value());
+  } else {
+    assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
+    _is_optimized = true;
+    _value = NULL;
+  }
+}
+
+CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
+  : _ic_call(call)
+{
+  address ic_call = _ic_call->instruction_address();
+
+  assert(ic_call != NULL, "ic_call address must be set");
+  assert(nm != NULL, "must pass nmethod");
+  assert(nm->contains(ic_call), "must be in nmethod");
+
+  // Search for the ic_call at the given address.
+  RelocIterator iter(nm, ic_call, ic_call+1);
+  bool ret = iter.next();
+  assert(ret == true, "relocInfo must exist at this address");
+  assert(iter.addr() == ic_call, "must find ic_call");
+
+  initialize_from_iter(&iter);
+}
+
+CompiledIC::CompiledIC(RelocIterator* iter)
+  : _ic_call(nativeCall_at(iter->addr()))
+{
+  address ic_call = _ic_call->instruction_address();
+
+  nmethod* nm = iter->code();
+  assert(ic_call != NULL, "ic_call address must be set");
+  assert(nm != NULL, "must pass nmethod");
+  assert(nm->contains(ic_call), "must be in nmethod");
+
+  initialize_from_iter(iter);
+}
 
 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
@@ -487,7 +531,7 @@
 void CompiledStaticCall::set_to_clean() {
   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
   // Reset call site
-  MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
+  MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
 #ifdef ASSERT
   CodeBlob* cb = CodeCache::find_blob_unsafe(this);
   assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
@@ -553,6 +597,7 @@
   } else {
     // Callee is interpreted code.  In any case entering the interpreter
     // puts a converter-frame on the stack to save arguments.
+    assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics");
     info._to_interpreter = true;
     info._entry      = m()->get_c2i_entry();
   }
--- a/src/share/vm/code/compiledIC.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/code/compiledIC.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -150,6 +150,9 @@
   bool          _is_optimized;  // an optimized virtual call (i.e., no compiled IC)
 
   CompiledIC(nmethod* nm, NativeCall* ic_call);
+  CompiledIC(RelocIterator* iter);
+
+  void initialize_from_iter(RelocIterator* iter);
 
   static bool is_icholder_entry(address entry);
 
@@ -183,6 +186,7 @@
   friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
   friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
   friend CompiledIC* CompiledIC_at(Relocation* call_site);
+  friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
 
   // This is used to release CompiledICHolder*s from nmethods that
   // are about to be freed.  The callsite might contain other stale
@@ -263,6 +267,13 @@
   return c_ic;
 }
 
+inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
+  assert(reloc_iter->type() == relocInfo::virtual_call_type ||
+      reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
+  CompiledIC* c_ic = new CompiledIC(reloc_iter);
+  c_ic->verify();
+  return c_ic;
+}
 
 //-----------------------------------------------------------------------------
 // The CompiledStaticCall represents a call to a static method in the compiled
--- a/src/share/vm/code/dependencies.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/code/dependencies.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
 #include "oops/objArrayKlass.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/thread.inline.hpp"
 #include "utilities/copy.hpp"
 
 
@@ -618,56 +619,66 @@
 // for the sake of the compiler log, print out current dependencies:
 void Dependencies::log_all_dependencies() {
   if (log() == NULL)  return;
-  ciBaseObject* args[max_arg_count];
+  ResourceMark rm;
   for (int deptv = (int)FIRST_TYPE; deptv < (int)TYPE_LIMIT; deptv++) {
     DepType dept = (DepType)deptv;
     GrowableArray<ciBaseObject*>* deps = _deps[dept];
-    if (deps->length() == 0)  continue;
+    int deplen = deps->length();
+    if (deplen == 0) {
+      continue;
+    }
     int stride = dep_args(dept);
+    GrowableArray<ciBaseObject*>* ciargs = new GrowableArray<ciBaseObject*>(stride);
     for (int i = 0; i < deps->length(); i += stride) {
       for (int j = 0; j < stride; j++) {
         // flush out the identities before printing
-        args[j] = deps->at(i+j);
+        ciargs->push(deps->at(i+j));
       }
-      write_dependency_to(log(), dept, stride, args);
+      write_dependency_to(log(), dept, ciargs);
+      ciargs->clear();
     }
+    guarantee(deplen == deps->length(), "deps array cannot grow inside nested ResoureMark scope");
   }
 }
 
 void Dependencies::write_dependency_to(CompileLog* log,
                                        DepType dept,
-                                       int nargs, DepArgument args[],
+                                       GrowableArray<DepArgument>* args,
                                        Klass* witness) {
   if (log == NULL) {
     return;
   }
+  ResourceMark rm;
   ciEnv* env = ciEnv::current();
-  ciBaseObject* ciargs[max_arg_count];
-  assert(nargs <= max_arg_count, "oob");
-  for (int j = 0; j < nargs; j++) {
-    if (args[j].is_oop()) {
-      ciargs[j] = env->get_object(args[j].oop_value());
+  GrowableArray<ciBaseObject*>* ciargs = new GrowableArray<ciBaseObject*>(args->length());
+  for (GrowableArrayIterator<DepArgument> it = args->begin(); it != args->end(); ++it) {
+    DepArgument arg = *it;
+    if (arg.is_oop()) {
+      ciargs->push(env->get_object(arg.oop_value()));
     } else {
-      ciargs[j] = env->get_metadata(args[j].metadata_value());
+      ciargs->push(env->get_metadata(arg.metadata_value()));
     }
   }
-  Dependencies::write_dependency_to(log, dept, nargs, ciargs, witness);
+  int argslen = ciargs->length();
+  Dependencies::write_dependency_to(log, dept, ciargs, witness);
+  guarantee(argslen == ciargs->length(), "ciargs array cannot grow inside nested ResoureMark scope");
 }
 
 void Dependencies::write_dependency_to(CompileLog* log,
                                        DepType dept,
-                                       int nargs, ciBaseObject* args[],
+                                       GrowableArray<ciBaseObject*>* args,
                                        Klass* witness) {
-  if (log == NULL)  return;
-  assert(nargs <= max_arg_count, "oob");
-  int argids[max_arg_count];
-  int ctxkj = dep_context_arg(dept);  // -1 if no context arg
-  int j;
-  for (j = 0; j < nargs; j++) {
-    if (args[j]->is_object()) {
-      argids[j] = log->identify(args[j]->as_object());
+  if (log == NULL) {
+    return;
+  }
+  ResourceMark rm;
+  GrowableArray<int>* argids = new GrowableArray<int>(args->length());
+  for (GrowableArrayIterator<ciBaseObject*> it = args->begin(); it != args->end(); ++it) {
+    ciBaseObject* obj = *it;
+    if (obj->is_object()) {
+      argids->push(log->identify(obj->as_object()));
     } else {
-      argids[j] = log->identify(args[j]->as_metadata());
+      argids->push(log->identify(obj->as_metadata()));
     }
   }
   if (witness != NULL) {
@@ -676,16 +687,17 @@
     log->begin_elem("dependency");
   }
   log->print(" type='%s'", dep_name(dept));
-  if (ctxkj >= 0) {
-    log->print(" ctxk='%d'", argids[ctxkj]);
+  const int ctxkj = dep_context_arg(dept);  // -1 if no context arg
+  if (ctxkj >= 0 && ctxkj < argids->length()) {
+    log->print(" ctxk='%d'", argids->at(ctxkj));
   }
   // write remaining arguments, if any.
-  for (j = 0; j < nargs; j++) {
+  for (int j = 0; j < argids->length(); j++) {
     if (j == ctxkj)  continue;  // already logged
     if (j == 1) {
-      log->print(  " x='%d'",    argids[j]);
+      log->print(  " x='%d'",    argids->at(j));
     } else {
-      log->print(" x%d='%d'", j, argids[j]);
+      log->print(" x%d='%d'", j, argids->at(j));
     }
   }
   if (witness != NULL) {
@@ -697,9 +709,12 @@
 
 void Dependencies::write_dependency_to(xmlStream* xtty,
                                        DepType dept,
-                                       int nargs, DepArgument args[],
+                                       GrowableArray<DepArgument>* args,
                                        Klass* witness) {
-  if (xtty == NULL)  return;
+  if (xtty == NULL) {
+    return;
+  }
+  ResourceMark rm;
   ttyLocker ttyl;
   int ctxkj = dep_context_arg(dept);  // -1 if no context arg
   if (witness != NULL) {
@@ -709,23 +724,24 @@
   }
   xtty->print(" type='%s'", dep_name(dept));
   if (ctxkj >= 0) {
-    xtty->object("ctxk", args[ctxkj].metadata_value());
+    xtty->object("ctxk", args->at(ctxkj).metadata_value());
   }
   // write remaining arguments, if any.
-  for (int j = 0; j < nargs; j++) {
+  for (int j = 0; j < args->length(); j++) {
     if (j == ctxkj)  continue;  // already logged
+    DepArgument arg = args->at(j);
     if (j == 1) {
-      if (args[j].is_oop()) {
-        xtty->object("x", args[j].oop_value());
+      if (arg.is_oop()) {
+        xtty->object("x", arg.oop_value());
       } else {
-        xtty->object("x", args[j].metadata_value());
+        xtty->object("x", arg.metadata_value());
       }
     } else {
       char xn[10]; sprintf(xn, "x%d", j);
-      if (args[j].is_oop()) {
-        xtty->object(xn, args[j].oop_value());
+      if (arg.is_oop()) {
+        xtty->object(xn, arg.oop_value());
       } else {
-        xtty->object(xn, args[j].metadata_value());
+        xtty->object(xn, arg.metadata_value());
       }
     }
   }
@@ -736,7 +752,7 @@
   xtty->end_elem();
 }
 
-void Dependencies::print_dependency(DepType dept, int nargs, DepArgument args[],
+void Dependencies::print_dependency(DepType dept, GrowableArray<DepArgument>* args,
                                     Klass* witness, outputStream* st) {
   ResourceMark rm;
   ttyLocker ttyl;   // keep the following output all in one block
@@ -745,8 +761,8 @@
                 dep_name(dept));
   // print arguments
   int ctxkj = dep_context_arg(dept);  // -1 if no context arg
-  for (int j = 0; j < nargs; j++) {
-    DepArgument arg = args[j];
+  for (int j = 0; j < args->length(); j++) {
+    DepArgument arg = args->at(j);
     bool put_star = false;
     if (arg.is_null())  continue;
     const char* what;
@@ -756,7 +772,7 @@
       put_star = !Dependencies::is_concrete_klass((Klass*)arg.metadata_value());
     } else if (arg.is_method()) {
       what = "method ";
-      put_star = !Dependencies::is_concrete_method((Method*)arg.metadata_value());
+      put_star = !Dependencies::is_concrete_method((Method*)arg.metadata_value(), NULL);
     } else if (arg.is_klass()) {
       what = "class  ";
     } else {
@@ -782,37 +798,39 @@
 void Dependencies::DepStream::log_dependency(Klass* witness) {
   if (_deps == NULL && xtty == NULL)  return;  // fast cutout for runtime
   ResourceMark rm;
-  int nargs = argument_count();
-  DepArgument args[max_arg_count];
+  const int nargs = argument_count();
+  GrowableArray<DepArgument>* args = new GrowableArray<DepArgument>(nargs);
   for (int j = 0; j < nargs; j++) {
     if (type() == call_site_target_value) {
-      args[j] = argument_oop(j);
+      args->push(argument_oop(j));
     } else {
-      args[j] = argument(j);
+      args->push(argument(j));
     }
   }
+  int argslen = args->length();
   if (_deps != NULL && _deps->log() != NULL) {
     if (ciEnv::current() != NULL) {
       Dependencies::write_dependency_to(_deps->log(),
-                                        type(), nargs, args, witness);
+                                        type(), args, witness);
     } else {
       // Treat the CompileLog as an xmlstream instead
       Dependencies::write_dependency_to((xmlStream*)_deps->log(),
-                                        type(), nargs, args, witness);
+                                        type(), args, witness);
     }
   } else {
-    Dependencies::write_dependency_to(xtty,
-                                      type(), nargs, args, witness);
+    Dependencies::write_dependency_to(xtty, type(), args, witness);
   }
+  guarantee(argslen == args->length(), "args array cannot grow inside nested ResoureMark scope");
 }
 
 void Dependencies::DepStream::print_dependency(Klass* witness, bool verbose, outputStream* st) {
   int nargs = argument_count();
-  DepArgument args[max_arg_count];
+  GrowableArray<DepArgument>* args = new GrowableArray<DepArgument>(nargs);
   for (int j = 0; j < nargs; j++) {
-    args[j] = argument(j);
+    args->push(argument(j));
   }
-  Dependencies::print_dependency(type(), nargs, args, witness, st);
+  int argslen = args->length();
+  Dependencies::print_dependency(type(), args, witness, st);
   if (verbose) {
     if (_code != NULL) {
       st->print("  code: ");
@@ -820,6 +838,7 @@
       st->cr();
     }
   }
+  guarantee(argslen == args->length(), "args array cannot grow inside nested ResoureMark scope");
 }
 
 
@@ -1044,8 +1063,8 @@
         // Static methods don't override non-static so punt
         return true;
       }
-      if (   !Dependencies::is_concrete_method(lm)
-          && !Dependencies::is_concrete_method(m)
+      if (   !Dependencies::is_concrete_method(lm, k)
+          && !Dependencies::is_concrete_method(m, ctxk)
           && lm->method_holder()->is_subtype_of(m->method_holder()))
         // Method m is overridden by lm, but both are non-concrete.
         return true;
@@ -1078,9 +1097,20 @@
   bool is_witness(Klass* k) {
     if (doing_subtype_search()) {
       return Dependencies::is_concrete_klass(k);
+    } else if (!k->oop_is_instance()) {
+      return false; // no methods to find in an array type
     } else {
-      Method* m = InstanceKlass::cast(k)->find_method(_name, _signature);
-      if (m == NULL || !Dependencies::is_concrete_method(m))  return false;
+      // Search class hierarchy first.
+      Method* m = InstanceKlass::cast(k)->find_instance_method(_name, _signature);
+      if (!Dependencies::is_concrete_method(m, k)) {
+        // Check interface defaults also, if any exist.
+        Array<Method*>* default_methods = InstanceKlass::cast(k)->default_methods();
+        if (default_methods == NULL)
+            return false;
+        m = InstanceKlass::cast(k)->find_method(default_methods, _name, _signature);
+        if (!Dependencies::is_concrete_method(m, NULL))
+            return false;
+      }
       _found_methods[_num_participants] = m;
       // Note:  If add_participant(k) is called,
       // the method m will already be memoized for it.
@@ -1284,7 +1314,7 @@
   Klass* chain;       // scratch variable
 #define ADD_SUBCLASS_CHAIN(k)                     {  \
     assert(chaini < CHAINMAX, "oob");                \
-    chain = InstanceKlass::cast(k)->subklass();      \
+    chain = k->subklass();                           \
     if (chain != NULL)  chains[chaini++] = chain;    }
 
   // Look for non-abstract subclasses.
@@ -1295,35 +1325,37 @@
   // (Their subclasses are additional indirect implementors.
   // See InstanceKlass::add_implementor.)
   // (Note:  nof_implementors is always zero for non-interfaces.)
-  int nof_impls = InstanceKlass::cast(context_type)->nof_implementors();
-  if (nof_impls > 1) {
-    // Avoid this case: *I.m > { A.m, C }; B.m > C
-    // Here, I.m has 2 concrete implementations, but m appears unique
-    // as A.m, because the search misses B.m when checking C.
-    // The inherited method B.m was getting missed by the walker
-    // when interface 'I' was the starting point.
-    // %%% Until this is fixed more systematically, bail out.
-    // (Old CHA had the same limitation.)
-    return context_type;
-  }
-  if (nof_impls > 0) {
-    Klass* impl = InstanceKlass::cast(context_type)->implementor();
-    assert(impl != NULL, "just checking");
-    // If impl is the same as the context_type, then more than one
-    // implementor has seen. No exact info in this case.
-    if (impl == context_type) {
-      return context_type;  // report an inexact witness to this sad affair
+  if (top_level_call) {
+    int nof_impls = InstanceKlass::cast(context_type)->nof_implementors();
+    if (nof_impls > 1) {
+      // Avoid this case: *I.m > { A.m, C }; B.m > C
+      // Here, I.m has 2 concrete implementations, but m appears unique
+      // as A.m, because the search misses B.m when checking C.
+      // The inherited method B.m was getting missed by the walker
+      // when interface 'I' was the starting point.
+      // %%% Until this is fixed more systematically, bail out.
+      // (Old CHA had the same limitation.)
+      return context_type;
     }
-    if (do_counts)
-      { NOT_PRODUCT(deps_find_witness_steps++); }
-    if (is_participant(impl)) {
-      if (!participants_hide_witnesses) {
+    if (nof_impls > 0) {
+      Klass* impl = InstanceKlass::cast(context_type)->implementor();
+      assert(impl != NULL, "just checking");
+      // If impl is the same as the context_type, then more than one
+      // implementor has seen. No exact info in this case.
+      if (impl == context_type) {
+        return context_type;  // report an inexact witness to this sad affair
+      }
+      if (do_counts)
+        { NOT_PRODUCT(deps_find_witness_steps++); }
+      if (is_participant(impl)) {
+        if (!participants_hide_witnesses) {
+          ADD_SUBCLASS_CHAIN(impl);
+        }
+      } else if (is_witness(impl) && !ignore_witness(impl)) {
+        return impl;
+      } else {
         ADD_SUBCLASS_CHAIN(impl);
       }
-    } else if (is_witness(impl) && !ignore_witness(impl)) {
-      return impl;
-    } else {
-      ADD_SUBCLASS_CHAIN(impl);
     }
   }
 
@@ -1371,15 +1403,17 @@
   return true;
 }
 
-bool Dependencies::is_concrete_method(Method* m) {
-  // Statics are irrelevant to virtual call sites.
-  if (m->is_static())  return false;
-
-  // We could also return false if m does not yet appear to be
-  // executed, if the VM version supports this distinction also.
-  // Default methods are considered "concrete" as well.
-  return !m->is_abstract() &&
-         !m->is_overpass(); // error functions aren't concrete
+bool Dependencies::is_concrete_method(Method* m, Klass * k) {
+  // NULL is not a concrete method,
+  // statics are irrelevant to virtual call sites,
+  // abstract methods are not concrete,
+  // overpass (error) methods are not concrete if k is abstract
+  //
+  // note "true" is conservative answer --
+  //     overpass clause is false if k == NULL, implies return true if
+  //     answer depends on overpass clause.
+  return ! ( m == NULL || m -> is_static() || m -> is_abstract() ||
+             m->is_overpass() && k != NULL && k -> is_abstract() );
 }
 
 
@@ -1404,16 +1438,6 @@
   return true;
 }
 
-bool Dependencies::is_concrete_method(ciMethod* m) {
-  // Statics are irrelevant to virtual call sites.
-  if (m->is_static())  return false;
-
-  // We could also return false if m does not yet appear to be
-  // executed, if the VM version supports this distinction also.
-  return !m->is_abstract();
-}
-
-
 bool Dependencies::has_finalizable_subclass(ciInstanceKlass* k) {
   return k->has_finalizable_subclass();
 }
@@ -1627,7 +1651,7 @@
   Klass* wit = wf.find_witness_definer(ctxk);
   if (wit != NULL)  return NULL;  // Too many witnesses.
   Method* fm = wf.found_method(0);  // Will be NULL if num_parts == 0.
-  if (Dependencies::is_concrete_method(m)) {
+  if (Dependencies::is_concrete_method(m, ctxk)) {
     if (fm == NULL) {
       // It turns out that m was always the only implementation.
       fm = m;
@@ -1657,61 +1681,6 @@
   return wf.find_witness_definer(ctxk, changes);
 }
 
-// Find the set of all non-abstract methods under ctxk that match m[0].
-// (The method m[0] must be defined or inherited in ctxk.)
-// Include m itself in the set, unless it is abstract.
-// Fill the given array m[0..(mlen-1)] with this set, and return the length.
-// (The length may be zero if no concrete methods are found anywhere.)
-// If there are too many concrete methods to fit in marray, return -1.
-int Dependencies::find_exclusive_concrete_methods(Klass* ctxk,
-                                                  int mlen,
-                                                  Method* marray[]) {
-  Method* m0 = marray[0];
-  ClassHierarchyWalker wf(m0);
-  assert(wf.check_method_context(ctxk, m0), "proper context");
-  wf.record_witnesses(mlen);
-  bool participants_hide_witnesses = true;
-  Klass* wit = wf.find_witness_definer(ctxk);
-  if (wit != NULL)  return -1;  // Too many witnesses.
-  int num = wf.num_participants();
-  assert(num <= mlen, "oob");
-  // Keep track of whether m is also part of the result set.
-  int mfill = 0;
-  assert(marray[mfill] == m0, "sanity");
-  if (Dependencies::is_concrete_method(m0))
-    mfill++;  // keep m0 as marray[0], the first result
-  for (int i = 0; i < num; i++) {
-    Method* fm = wf.found_method(i);
-    if (fm == m0)  continue;  // Already put this guy in the list.
-    if (mfill == mlen) {
-      return -1;              // Oops.  Too many methods after all!
-    }
-    marray[mfill++] = fm;
-  }
-#ifndef PRODUCT
-  // Make sure the dependency mechanism will pass this discovery:
-  if (VerifyDependencies) {
-    // Turn off dependency tracing while actually testing deps.
-    FlagSetting fs(TraceDependencies, false);
-    switch (mfill) {
-    case 1:
-      guarantee(NULL == (void *)check_unique_concrete_method(ctxk, marray[0]),
-                "verify dep.");
-      break;
-    case 2:
-      guarantee(NULL == (void *)
-                check_exclusive_concrete_methods(ctxk, marray[0], marray[1]),
-                "verify dep.");
-      break;
-    default:
-      ShouldNotReachHere();  // mlen > 2 yet supported
-    }
-  }
-#endif //PRODUCT
-  return mfill;
-}
-
-
 Klass* Dependencies::check_has_no_finalizable_subclasses(Klass* ctxk, KlassDepChange* changes) {
   Klass* search_at = ctxk;
   if (changes != NULL)
@@ -1719,7 +1688,6 @@
   return find_finalizable_subclass(search_at);
 }
 
-
 Klass* Dependencies::check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes) {
   assert(call_site    ->is_a(SystemDictionary::CallSite_klass()),     "sanity");
   assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "sanity");
--- a/src/share/vm/code/dependencies.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/code/dependencies.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -377,7 +377,7 @@
   // In that case, there would be a middle ground between concrete
   // and abstract (as defined by the Java language and VM).
   static bool is_concrete_klass(Klass* k);    // k is instantiable
-  static bool is_concrete_method(Method* m);  // m is invocable
+  static bool is_concrete_method(Method* m, Klass* k);  // m is invocable
   static Klass* find_finalizable_subclass(Klass* k);
 
   // These versions of the concreteness queries work through the CI.
@@ -391,7 +391,6 @@
   // not go back into the VM to get their value; they must cache the
   // bit in the CI, either eagerly or lazily.)
   static bool is_concrete_klass(ciInstanceKlass* k); // k appears instantiable
-  static bool is_concrete_method(ciMethod* m);       // m appears invocable
   static bool has_finalizable_subclass(ciInstanceKlass* k);
 
   // As a general rule, it is OK to compile under the assumption that
@@ -438,7 +437,6 @@
   static Klass*    find_unique_concrete_subtype(Klass* ctxk);
   static Method*   find_unique_concrete_method(Klass* ctxk, Method* m);
   static int       find_exclusive_concrete_subtypes(Klass* ctxk, int klen, Klass* k[]);
-  static int       find_exclusive_concrete_methods(Klass* ctxk, int mlen, Method* m[]);
 
   // Create the encoding which will be stored in an nmethod.
   void encode_content_bytes();
@@ -458,20 +456,36 @@
   void copy_to(nmethod* nm);
 
   void log_all_dependencies();
-  void log_dependency(DepType dept, int nargs, ciBaseObject* args[]) {
-    write_dependency_to(log(), dept, nargs, args);
+
+  void log_dependency(DepType dept, GrowableArray<ciBaseObject*>* args) {
+    ResourceMark rm;
+    int argslen = args->length();
+    write_dependency_to(log(), dept, args);
+    guarantee(argslen == args->length(),
+              "args array cannot grow inside nested ResoureMark scope");
   }
+
   void log_dependency(DepType dept,
                       ciBaseObject* x0,
                       ciBaseObject* x1 = NULL,
                       ciBaseObject* x2 = NULL) {
-    if (log() == NULL)  return;
-    ciBaseObject* args[max_arg_count];
-    args[0] = x0;
-    args[1] = x1;
-    args[2] = x2;
-    assert(2 < max_arg_count, "");
-    log_dependency(dept, dep_args(dept), args);
+    if (log() == NULL) {
+      return;
+    }
+    ResourceMark rm;
+    GrowableArray<ciBaseObject*>* ciargs =
+                new GrowableArray<ciBaseObject*>(dep_args(dept));
+    assert (x0 != NULL, "no log x0");
+    ciargs->push(x0);
+
+    if (x1 != NULL) {
+      ciargs->push(x1);
+    }
+    if (x2 != NULL) {
+      ciargs->push(x2);
+    }
+    assert(ciargs->length() == dep_args(dept), "");
+    log_dependency(dept, ciargs);
   }
 
   class DepArgument : public ResourceObj {
@@ -494,20 +508,8 @@
     Metadata* metadata_value() const { assert(!_is_oop && _valid, "must be"); return (Metadata*) _value; }
   };
 
-  static void write_dependency_to(CompileLog* log,
-                                  DepType dept,
-                                  int nargs, ciBaseObject* args[],
-                                  Klass* witness = NULL);
-  static void write_dependency_to(CompileLog* log,
-                                  DepType dept,
-                                  int nargs, DepArgument args[],
-                                  Klass* witness = NULL);
-  static void write_dependency_to(xmlStream* xtty,
-                                  DepType dept,
-                                  int nargs, DepArgument args[],
-                                  Klass* witness = NULL);
   static void print_dependency(DepType dept,
-                               int nargs, DepArgument args[],
+                               GrowableArray<DepArgument>* args,
                                Klass* witness = NULL, outputStream* st = tty);
 
  private:
@@ -516,6 +518,18 @@
 
   static Klass* ctxk_encoded_as_null(DepType dept, Metadata* x);
 
+  static void write_dependency_to(CompileLog* log,
+                                  DepType dept,
+                                  GrowableArray<ciBaseObject*>* args,
+                                  Klass* witness = NULL);
+  static void write_dependency_to(CompileLog* log,
+                                  DepType dept,
+                                  GrowableArray<DepArgument>* args,
+                                  Klass* witness = NULL);
+  static void write_dependency_to(xmlStream* xtty,
+                                  DepType dept,
+                                  GrowableArray<DepArgument>* args,
+                                  Klass* witness = NULL);
  public:
   // Use this to iterate over an nmethod's dependency set.
   // Works on new and old dependency sets.
--- a/src/share/vm/code/nmethod.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/code/nmethod.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -37,6 +37,7 @@
 #include "oops/methodData.hpp"
 #include "prims/jvmtiRedefineClassesTrace.hpp"
 #include "prims/jvmtiImpl.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/sweeper.hpp"
 #include "utilities/dtrace.hpp"
@@ -51,6 +52,8 @@
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
+unsigned char nmethod::_global_unloading_clock = 0;
+
 #ifdef DTRACE_ENABLED
 
 // Only bother with this argument setup if dtrace is available
@@ -445,27 +448,30 @@
   set_exception_cache(new_entry);
 }
 
-void nmethod::remove_from_exception_cache(ExceptionCache* ec) {
+void nmethod::clean_exception_cache(BoolObjectClosure* is_alive) {
   ExceptionCache* prev = NULL;
   ExceptionCache* curr = exception_cache();
-  assert(curr != NULL, "nothing to remove");
-  // find the previous and next entry of ec
-  while (curr != ec) {
-    prev = curr;
-    curr = curr->next();
-    assert(curr != NULL, "ExceptionCache not found");
+
+  while (curr != NULL) {
+    ExceptionCache* next = curr->next();
+
+    Klass* ex_klass = curr->exception_type();
+    if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
+      if (prev == NULL) {
+        set_exception_cache(next);
+      } else {
+        prev->set_next(next);
+      }
+      delete curr;
+      // prev stays the same.
+    } else {
+      prev = curr;
+    }
+
+    curr = next;
   }
-  // now: curr == ec
-  ExceptionCache* next = curr->next();
-  if (prev == NULL) {
-    set_exception_cache(next);
-  } else {
-    prev->set_next(next);
-  }
-  delete curr;
 }
 
-
 // public method for accessing the exception cache
 // These are the public access methods.
 address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
@@ -524,6 +530,7 @@
 // Fill in default values for various flag fields
 void nmethod::init_defaults() {
   _state                      = in_use;
+  _unloading_clock            = 0;
   _marked_for_reclamation     = 0;
   _has_flushed_dependencies   = 0;
   _has_unsafe_access          = 0;
@@ -542,7 +549,11 @@
   _oops_do_mark_link       = NULL;
   _jmethod_id              = NULL;
   _osr_link                = NULL;
-  _scavenge_root_link      = NULL;
+  if (UseG1GC) {
+    _unloading_next        = NULL;
+  } else {
+    _scavenge_root_link    = NULL;
+  }
   _scavenge_root_state     = 0;
   _compiler                = NULL;
 #if INCLUDE_RTM_OPT
@@ -763,8 +774,10 @@
     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 
     code_buffer->copy_values_to(this);
-    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
-      CodeCache::add_scavenge_root_nmethod(this);
+    if (ScavengeRootsInCode) {
+      if (detect_scavenge_root_oops()) {
+        CodeCache::add_scavenge_root_nmethod(this);
+      }
       Universe::heap()->register_nmethod(this);
     }
     debug_only(verify_scavenge_root_oops());
@@ -848,8 +861,10 @@
     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 
     code_buffer->copy_values_to(this);
-    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
-      CodeCache::add_scavenge_root_nmethod(this);
+    if (ScavengeRootsInCode) {
+      if (detect_scavenge_root_oops()) {
+        CodeCache::add_scavenge_root_nmethod(this);
+      }
       Universe::heap()->register_nmethod(this);
     }
     DEBUG_ONLY(verify_scavenge_root_oops();)
@@ -996,8 +1011,10 @@
     code_buffer->copy_values_to(this);
     debug_info->copy_to(this);
     dependencies->copy_to(this);
-    if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
-      CodeCache::add_scavenge_root_nmethod(this);
+    if (ScavengeRootsInCode) {
+      if (detect_scavenge_root_oops()) {
+        CodeCache::add_scavenge_root_nmethod(this);
+      }
       Universe::heap()->register_nmethod(this);
     }
     debug_only(verify_scavenge_root_oops());
@@ -1264,7 +1281,7 @@
     switch(iter.type()) {
       case relocInfo::virtual_call_type:
       case relocInfo::opt_virtual_call_type: {
-        CompiledIC *ic = CompiledIC_at(iter.reloc());
+        CompiledIC *ic = CompiledIC_at(&iter);
         // Ok, to lookup references to zombies here
         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
         if( cb != NULL && cb->is_nmethod() ) {
@@ -1288,6 +1305,77 @@
   }
 }
 
+void nmethod::verify_clean_inline_caches() {
+  assert_locked_or_safepoint(CompiledIC_lock);
+
+  // If the method is not entrant or zombie then a JMP is plastered over the
+  // first few bytes.  If an oop in the old code was there, that oop
+  // should not get GC'd.  Skip the first few bytes of oops on
+  // not-entrant methods.
+  address low_boundary = verified_entry_point();
+  if (!is_in_use()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // This means that the low_boundary is going to be a little too high.
+    // This shouldn't matter, since oops of non-entrant methods are never used.
+    // In fact, why are we bothering to look at oops in a non-entrant method??
+  }
+
+  ResourceMark rm;
+  RelocIterator iter(this, low_boundary);
+  while(iter.next()) {
+    switch(iter.type()) {
+      case relocInfo::virtual_call_type:
+      case relocInfo::opt_virtual_call_type: {
+        CompiledIC *ic = CompiledIC_at(&iter);
+        // Ok, to lookup references to zombies here
+        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
+        if( cb != NULL && cb->is_nmethod() ) {
+          nmethod* nm = (nmethod*)cb;
+          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
+          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
+            assert(ic->is_clean(), "IC should be clean");
+          }
+        }
+        break;
+      }
+      case relocInfo::static_call_type: {
+        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
+        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
+        if( cb != NULL && cb->is_nmethod() ) {
+          nmethod* nm = (nmethod*)cb;
+          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
+          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
+            assert(csc->is_clean(), "IC should be clean");
+          }
+        }
+        break;
+      }
+    }
+  }
+}
+
+int nmethod::verify_icholder_relocations() {
+  int count = 0;
+
+  RelocIterator iter(this);
+  while(iter.next()) {
+    if (iter.type() == relocInfo::virtual_call_type) {
+      if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
+        CompiledIC *ic = CompiledIC_at(&iter);
+        if (TraceCompiledIC) {
+          tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
+          ic->print();
+        }
+        assert(ic->cached_icholder() != NULL, "must be non-NULL");
+        count++;
+      }
+    }
+  }
+
+  return count;
+}
+
 // This is a private interface with the sweeper.
 void nmethod::mark_as_seen_on_stack() {
   assert(is_alive(), "Must be an alive method");
@@ -1320,6 +1408,23 @@
   mdo->inc_decompile_count();
 }
 
+void nmethod::increase_unloading_clock() {
+  _global_unloading_clock++;
+  if (_global_unloading_clock == 0) {
+    // _nmethods are allocated with _unloading_clock == 0,
+    // so 0 is never used as a clock value.
+    _global_unloading_clock = 1;
+  }
+}
+
+void nmethod::set_unloading_clock(unsigned char unloading_clock) {
+  OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
+}
+
+unsigned char nmethod::unloading_clock() {
+  return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
+}
+
 void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
 
   post_compiled_method_unload();
@@ -1376,6 +1481,10 @@
     // for later on.
     CodeCache::set_needs_cache_clean(true);
   }
+
+  // Unregister must be done before the state change
+  Universe::heap()->unregister_nmethod(this);
+
   _state = unloaded;
 
   // Log the unloading.
@@ -1737,6 +1846,45 @@
   set_unload_reported();
 }
 
+void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive, bool mark_on_stack) {
+  if (ic->is_icholder_call()) {
+    // The only exception is compiledICHolder oops which may
+    // yet be marked below. (We check this further below).
+    CompiledICHolder* cichk_oop = ic->cached_icholder();
+
+    if (mark_on_stack) {
+      Metadata::mark_on_stack(cichk_oop->holder_method());
+      Metadata::mark_on_stack(cichk_oop->holder_klass());
+    }
+
+    if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
+        cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
+      return;
+    }
+  } else {
+    Metadata* ic_oop = ic->cached_metadata();
+    if (ic_oop != NULL) {
+      if (mark_on_stack) {
+        Metadata::mark_on_stack(ic_oop);
+      }
+
+      if (ic_oop->is_klass()) {
+        if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
+          return;
+        }
+      } else if (ic_oop->is_method()) {
+        if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
+          return;
+        }
+      } else {
+        ShouldNotReachHere();
+      }
+    }
+  }
+
+  ic->set_to_clean();
+}
+
 // This is called at the end of the strong tracing/marking phase of a
 // GC to unload an nmethod if it contains otherwise unreachable
 // oops.
@@ -1790,15 +1938,7 @@
 #endif
 
   // Exception cache
-  ExceptionCache* ec = exception_cache();
-  while (ec != NULL) {
-    Klass* ex_klass = ec->exception_type();
-    ExceptionCache* next_ec = ec->next();
-    if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
-      remove_from_exception_cache(ec);
-    }
-    ec = next_ec;
-  }
+  clean_exception_cache(is_alive);
 
   // If class unloading occurred we first iterate over all inline caches and
   // clear ICs where the cached oop is referring to an unloaded klass or method.
@@ -1808,32 +1948,8 @@
     RelocIterator iter(this, low_boundary);
     while(iter.next()) {
       if (iter.type() == relocInfo::virtual_call_type) {
-        CompiledIC *ic = CompiledIC_at(iter.reloc());
-        if (ic->is_icholder_call()) {
-          // The only exception is compiledICHolder oops which may
-          // yet be marked below. (We check this further below).
-          CompiledICHolder* cichk_oop = ic->cached_icholder();
-          if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
-              cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
-            continue;
-          }
-        } else {
-          Metadata* ic_oop = ic->cached_metadata();
-          if (ic_oop != NULL) {
-            if (ic_oop->is_klass()) {
-              if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
-                continue;
-              }
-            } else if (ic_oop->is_method()) {
-              if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
-                continue;
-              }
-            } else {
-              ShouldNotReachHere();
-            }
-          }
-        }
-        ic->set_to_clean();
+        CompiledIC *ic = CompiledIC_at(&iter);
+        clean_ic_if_metadata_is_dead(ic, is_alive, false);
       }
     }
   }
@@ -1871,6 +1987,245 @@
   verify_metadata_loaders(low_boundary, is_alive);
 }
 
+template <class CompiledICorStaticCall>
+static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
+  // Ok, to lookup references to zombies here
+  CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
+  if (cb != NULL && cb->is_nmethod()) {
+    nmethod* nm = (nmethod*)cb;
+
+    if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
+      // The nmethod has not been processed yet.
+      return true;
+    }
+
+    // Clean inline caches pointing to both zombie and not_entrant methods
+    if (!nm->is_in_use() || (nm->method()->code() != nm)) {
+      ic->set_to_clean();
+      assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
+    }
+  }
+
+  return false;
+}
+
+static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
+  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
+}
+
+static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
+  return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
+}
+
+bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
+  assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
+
+  oop_Relocation* r = iter_at_oop->oop_reloc();
+  // Traverse those oops directly embedded in the code.
+  // Other oops (oop_index>0) are seen as part of scopes_oops.
+  assert(1 == (r->oop_is_immediate()) +
+         (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
+         "oop must be found in exactly one place");
+  if (r->oop_is_immediate() && r->oop_value() != NULL) {
+    // Unload this nmethod if the oop is dead.
+    if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
+      return true;;
+    }
+  }
+
+  return false;
+}
+
+void nmethod::mark_metadata_on_stack_at(RelocIterator* iter_at_metadata) {
+  assert(iter_at_metadata->type() == relocInfo::metadata_type, "Wrong relocation type");
+
+  metadata_Relocation* r = iter_at_metadata->metadata_reloc();
+  // In this metadata, we must only follow those metadatas directly embedded in
+  // the code.  Other metadatas (oop_index>0) are seen as part of
+  // the metadata section below.
+  assert(1 == (r->metadata_is_immediate()) +
+         (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
+         "metadata must be found in exactly one place");
+  if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
+    Metadata* md = r->metadata_value();
+    if (md != _method) Metadata::mark_on_stack(md);
+  }
+}
+
+void nmethod::mark_metadata_on_stack_non_relocs() {
+    // Visit the metadata section
+    for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
+      if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
+      Metadata* md = *p;
+      Metadata::mark_on_stack(md);
+    }
+
+    // Visit metadata not embedded in the other places.
+    if (_method != NULL) Metadata::mark_on_stack(_method);
+}
+
+bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
+  ResourceMark rm;
+
+  // Make sure the oop's ready to receive visitors
+  assert(!is_zombie() && !is_unloaded(),
+         "should not call follow on zombie or unloaded nmethod");
+
+  // If the method is not entrant then a JMP is plastered over the
+  // first few bytes.  If an oop in the old code was there, that oop
+  // should not get GC'd.  Skip the first few bytes of oops on
+  // not-entrant methods.
+  address low_boundary = verified_entry_point();
+  if (is_not_entrant()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // (See comment above.)
+  }
+
+  // The RedefineClasses() API can cause the class unloading invariant
+  // to no longer be true. See jvmtiExport.hpp for details.
+  // Also, leave a debugging breadcrumb in local flag.
+  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
+  if (a_class_was_redefined) {
+    // This set of the unloading_occurred flag is done before the
+    // call to post_compiled_method_unload() so that the unloading
+    // of this nmethod is reported.
+    unloading_occurred = true;
+  }
+
+#ifdef GRAAL
+  // Follow Graal method
+  if (_graal_installed_code != NULL) {
+    if (_graal_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_graal_installed_code)) {
+      if (!is_alive->do_object_b(_graal_installed_code)) {
+        _graal_installed_code = NULL;
+      }
+    } else {
+      if (can_unload(is_alive, (oop*)&_graal_installed_code, unloading_occurred)) {
+        return false;
+      }
+    }
+  }
+
+  if (_speculation_log != NULL) {
+    if (!is_alive->do_object_b(_speculation_log)) {
+      _speculation_log = NULL;
+    }
+  }
+#endif
+
+  // When class redefinition is used all metadata in the CodeCache has to be recorded,
+  // so that unused "previous versions" can be purged. Since walking the CodeCache can
+  // be expensive, the "mark on stack" is piggy-backed on this parallel unloading code.
+  bool mark_metadata_on_stack = a_class_was_redefined;
+
+  // Exception cache
+  clean_exception_cache(is_alive);
+
+  bool is_unloaded = false;
+  bool postponed = false;
+
+  RelocIterator iter(this, low_boundary);
+  while(iter.next()) {
+
+    switch (iter.type()) {
+
+    case relocInfo::virtual_call_type:
+      if (unloading_occurred) {
+        // If class unloading occurred we first iterate over all inline caches and
+        // clear ICs where the cached oop is referring to an unloaded klass or method.
+        clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive, mark_metadata_on_stack);
+      }
+
+      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
+      break;
+
+    case relocInfo::opt_virtual_call_type:
+      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
+      break;
+
+    case relocInfo::static_call_type:
+      postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
+      break;
+
+    case relocInfo::oop_type:
+      if (!is_unloaded) {
+        is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred);
+      }
+      break;
+
+    case relocInfo::metadata_type:
+      if (mark_metadata_on_stack) {
+        mark_metadata_on_stack_at(&iter);
+      }
+    }
+  }
+
+  if (mark_metadata_on_stack) {
+    mark_metadata_on_stack_non_relocs();
+  }
+
+  if (is_unloaded) {
+    return postponed;
+  }
+
+  // Scopes
+  for (oop* p = oops_begin(); p < oops_end(); p++) {
+    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
+    if (can_unload(is_alive, p, unloading_occurred)) {
+      is_unloaded = true;
+      break;
+    }
+  }
+
+  if (is_unloaded) {
+    return postponed;
+  }
+
+  // Ensure that all metadata is still alive
+  verify_metadata_loaders(low_boundary, is_alive);
+
+  return postponed;
+}
+
+void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
+  ResourceMark rm;
+
+  // Make sure the oop's ready to receive visitors
+  assert(!is_zombie(),
+         "should not call follow on zombie nmethod");
+
+  // If the method is not entrant then a JMP is plastered over the
+  // first few bytes.  If an oop in the old code was there, that oop
+  // should not get GC'd.  Skip the first few bytes of oops on
+  // not-entrant methods.
+  address low_boundary = verified_entry_point();
+  if (is_not_entrant()) {
+    low_boundary += NativeJump::instruction_size;
+    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
+    // (See comment above.)
+  }
+
+  RelocIterator iter(this, low_boundary);
+  while(iter.next()) {
+
+    switch (iter.type()) {
+
+    case relocInfo::virtual_call_type:
+      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
+      break;
+
+    case relocInfo::opt_virtual_call_type:
+      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
+      break;
+
+    case relocInfo::static_call_type:
+      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
+      break;
+    }
+  }
+}
+
 #ifdef ASSERT
 
 class CheckClass : AllStatic {
@@ -1917,7 +2272,7 @@
     // compiled code is maintaining a link to dead metadata.
     address static_call_addr = NULL;
     if (iter.type() == relocInfo::opt_virtual_call_type) {
-      CompiledIC* cic = CompiledIC_at(iter.reloc());
+      CompiledIC* cic = CompiledIC_at(&iter);
       if (!cic->is_call_to_interpreted()) {
         static_call_addr = iter.addr();
       }
@@ -1957,7 +2312,7 @@
     while (iter.next()) {
       if (iter.type() == relocInfo::metadata_type ) {
         metadata_Relocation* r = iter.metadata_reloc();
-        // In this lmetadata, we must only follow those metadatas directly embedded in
+        // In this metadata, we must only follow those metadatas directly embedded in
         // the code.  Other metadatas (oop_index>0) are seen as part of
         // the metadata section below.
         assert(1 == (r->metadata_is_immediate()) +
@@ -1969,7 +2324,7 @@
         }
       } else if (iter.type() == relocInfo::virtual_call_type) {
         // Check compiledIC holders associated with this nmethod
-        CompiledIC *ic = CompiledIC_at(iter.reloc());
+        CompiledIC *ic = CompiledIC_at(&iter);
         if (ic->is_icholder_call()) {
           CompiledICHolder* cichk = ic->cached_icholder();
           f(cichk->holder_method());
@@ -1991,7 +2346,7 @@
     f(md);
   }
 
-  // Call function Method*, not embedded in these other places.
+  // Visit metadata not embedded in the other places.
   if (_method != NULL) f(_method);
 }
 
@@ -2096,7 +2451,7 @@
     assert(cur != NULL, "not NULL-terminated");
     nmethod* next = cur->_oops_do_mark_link;
     cur->_oops_do_mark_link = NULL;
-    cur->fix_oop_relocations();
+    cur->verify_oop_relocations();
     NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
     cur = next;
   }
@@ -2638,6 +2993,10 @@
 };
 
 void nmethod::verify_scavenge_root_oops() {
+  if (UseG1GC) {
+    return;
+  }
+
   if (!on_scavenge_root_list()) {
     // Actually look inside, to verify the claim that it's clean.
     DebugScavengeRoot debug_scavenge_root(this);
@@ -3086,7 +3445,7 @@
     case relocInfo::virtual_call_type:
     case relocInfo::opt_virtual_call_type: {
       VerifyMutexLocker mc(CompiledIC_lock);
-      CompiledIC_at(iter.reloc())->print();
+      CompiledIC_at(&iter)->print();
       break;
     }
     case relocInfo::static_call_type:
--- a/src/share/vm/code/nmethod.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/code/nmethod.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -69,7 +69,12 @@
   friend class VMStructs;
  private:
   enum { cache_size = 4 };
-  PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found
+  // The array elements MUST be volatile! Several threads may modify
+  // and read from the cache concurrently. find_pc_desc_internal has
+  // returned wrong results. C++ compiler (namely xlC12) may duplicate
+  // C++ field accesses if the elements are not volatile.
+  typedef PcDesc* PcDescPtr;
+  volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
  public:
   PcDescCache() { debug_only(_pc_descs[0] = NULL); }
   void    reset_to(PcDesc* initial_pc_desc);
@@ -111,6 +116,11 @@
   friend class NMethodSweeper;
   friend class CodeCache;  // scavengable oops
  private:
+
+  // GC support to help figure out if an nmethod has been
+  // cleaned/unloaded by the current GC.
+  static unsigned char _global_unloading_clock;
+
   // Shared fields for all nmethod's
   Method*   _method;
   int       _entry_bci;        // != InvocationEntryBci if this nmethod is an on-stack replacement method
@@ -124,7 +134,13 @@
 
   // To support simple linked-list chaining of nmethods:
   nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head
-  nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
+
+  union {
+    // Used by G1 to chain nmethods.
+    nmethod* _unloading_next;
+    // Used by non-G1 GCs to chain nmethods.
+    nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
+  };
 
   static nmethod* volatile _oops_do_mark_nmethods;
   nmethod*        volatile _oops_do_mark_link;
@@ -186,6 +202,8 @@
   // Protected by Patching_lock
   volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
 
+  volatile unsigned char _unloading_clock;   // Incremented after GC unloaded/cleaned the nmethod
+
 #ifdef ASSERT
   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 #endif
@@ -447,13 +465,25 @@
   // alive.  It is used when an uncommon trap happens.  Returns true
   // if this thread changed the state of the nmethod or false if
   // another thread performed the transition.
-  bool  make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
+  bool  make_not_entrant() {
+    assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant");
+    return make_not_entrant_or_zombie(not_entrant);
+  }
   bool  make_zombie()      { return make_not_entrant_or_zombie(zombie); }
 
   // used by jvmti to track if the unload event has been reported
   bool  unload_reported()                         { return _unload_reported; }
   void  set_unload_reported()                     { _unload_reported = true; }
 
+  void set_unloading_next(nmethod* next)          { _unloading_next = next; }
+  nmethod* unloading_next()                       { return _unloading_next; }
+
+  static unsigned char global_unloading_clock()   { return _global_unloading_clock; }
+  static void increase_unloading_clock();
+
+  void set_unloading_clock(unsigned char unloading_clock);
+  unsigned char unloading_clock();
+
   bool  is_marked_for_deoptimization() const      { return _marked_for_deoptimization; }
   void  mark_for_deoptimization()                 { _marked_for_deoptimization = true; }
 
@@ -546,7 +576,7 @@
   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
   address handler_for_exception_and_pc(Handle exception, address pc);
   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
-  void remove_from_exception_cache(ExceptionCache* ec);
+  void clean_exception_cache(BoolObjectClosure* is_alive);
 
   // implicit exceptions support
   address continuation_for_implicit_exception(address pc);
@@ -569,6 +599,10 @@
     return (addr >= code_begin() && addr < verified_entry_point());
   }
 
+  // Verify calls to dead methods have been cleaned.
+  void verify_clean_inline_caches();
+  // Verify and count cached icholder relocations.
+  int  verify_icholder_relocations();
   // Check that all metadata is still alive
   void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
 
@@ -601,8 +635,19 @@
 
   // GC support
   void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
+  //  The parallel versions are used by G1.
+  bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
+  void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
+
+ private:
+  //  Unload a nmethod if the *root object is dead.
   bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
+  bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
 
+  void mark_metadata_on_stack_at(RelocIterator* iter_at_metadata);
+  void mark_metadata_on_stack_non_relocs();
+
+ public:
   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
                                      OopClosure* f);
   void oops_do(OopClosure* f) { oops_do(f, false); }
--- a/src/share/vm/code/relocInfo.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/code/relocInfo.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -902,11 +902,7 @@
 void internal_word_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
   address target = _target;
   if (target == NULL) {
-    if (addr_in_const()) {
-      target = new_addr_for(*(address*)addr(), src, dest);
-    } else {
-      target = new_addr_for(pd_get_address_from_code(), src, dest);
-    }
+    target = new_addr_for(this->target(), src, dest);
   }
   set_value(target);
 }
@@ -915,7 +911,11 @@
 address internal_word_Relocation::target() {
   address target = _target;
   if (target == NULL) {
-    target = pd_get_address_from_code();
+    if (addr_in_const()) {
+      target = *(address*)addr();
+    } else {
+      target = pd_get_address_from_code();
+    }
   }
   return target;
 }
--- a/src/share/vm/compiler/compileBroker.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/compiler/compileBroker.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -190,9 +190,8 @@
 
 long CompileBroker::_peak_compilation_time       = 0;
 
-CompileQueue* CompileBroker::_c2_method_queue    = NULL;
-CompileQueue* CompileBroker::_c1_method_queue    = NULL;
-CompileTask*  CompileBroker::_task_free_list     = NULL;
+CompileQueue* CompileBroker::_c2_compile_queue   = NULL;
+CompileQueue* CompileBroker::_c1_compile_queue   = NULL;
 
 GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
 
@@ -260,13 +259,56 @@
 
     // By convention, the compiling thread is responsible for
     // recycling a non-blocking CompileTask.
-    CompileBroker::free_task(task);
+    CompileTask::free(task);
   }
 }
 
 
-// ------------------------------------------------------------------
-// CompileTask::initialize
+CompileTask*  CompileTask::_task_free_list = NULL;
+#ifdef ASSERT
+int CompileTask::_num_allocated_tasks = 0;
+#endif
+/**
+ * Allocate a CompileTask, from the free list if possible.
+ */
+CompileTask* CompileTask::allocate() {
+  MutexLocker locker(CompileTaskAlloc_lock);
+  CompileTask* task = NULL;
+
+  if (_task_free_list != NULL) {
+    task = _task_free_list;
+    _task_free_list = task->next();
+    task->set_next(NULL);
+  } else {
+    task = new CompileTask();
+    DEBUG_ONLY(_num_allocated_tasks++;)
+    assert (_num_allocated_tasks < 10000, "Leaking compilation tasks?");
+    task->set_next(NULL);
+    task->set_is_free(true);
+  }
+  assert(task->is_free(), "Task must be free.");
+  task->set_is_free(false);
+  return task;
+}
+
+
+/**
+ * Add a task to the free list.
+ */
+void CompileTask::free(CompileTask* task) {
+  MutexLocker locker(CompileTaskAlloc_lock);
+  if (!task->is_free()) {
+    task->set_code(NULL);
+    assert(!task->lock()->is_locked(), "Should not be locked when freed");
+    JNIHandles::destroy_global(task->_method_holder);
+    JNIHandles::destroy_global(task->_hot_method_holder);
+
+    task->set_is_free(true);
+    task->set_next(_task_free_list);
+    _task_free_list = task;
+  }
+}
+
 void CompileTask::initialize(int compile_id,
                              methodHandle method,
                              int osr_bci,
@@ -294,6 +336,7 @@
   _hot_count = hot_count;
   _time_queued = 0;  // tidy
   _comment = comment;
+  _failure_reason = NULL;
 
   if (LogCompilation) {
     _time_queued = os::elapsed_counter();
@@ -324,15 +367,6 @@
   if (nm == NULL)  _code_handle = NULL;  // drop the handle also
 }
 
-// ------------------------------------------------------------------
-// CompileTask::free
-void CompileTask::free() {
-  set_code(NULL);
-  assert(!_lock->is_locked(), "Should not be locked when freed");
-  JNIHandles::destroy_global(_method_holder);
-  JNIHandles::destroy_global(_hot_method_holder);
-}
-
 
 void CompileTask::mark_on_stack() {
   // Mark these methods as something redefine classes cannot remove.
@@ -591,6 +625,11 @@
   methodHandle method(thread, this->method());
   ResourceMark rm(thread);
 
+  if (!_is_success) {
+    const char* reason = _failure_reason != NULL ? _failure_reason : "unknown";
+    log->elem("failure reason='%s'", reason);
+  }
+
   // <task_done ... stamp='1.234'>  </task>
   nmethod* nm = code();
   log->begin_elem("task_done success='%d' nmsize='%d' count='%d'",
@@ -614,9 +653,12 @@
 
 
 
-// Add a CompileTask to a CompileQueue
+/**
+ * Add a CompileTask to a CompileQueue
+ */
 void CompileQueue::add(CompileTask* task) {
   assert(lock()->owned_by_self(), "must own lock");
+  assert(!CompileBroker::is_compilation_disabled_forever(), "Do not add task if compilation is turned off forever");
 
   task->set_next(NULL);
   task->set_prev(NULL);
@@ -638,9 +680,7 @@
   // Mark the method as being in the compile queue.
   task->method()->set_queued_for_compilation();
 
-  if (CIPrintCompileQueue) {
-    print();
-  }
+  NOT_PRODUCT(print();)
 
   if (LogCompilation && xtty != NULL) {
     task->log_task_queued();
@@ -650,14 +690,32 @@
   lock()->notify_all();
 }
 
-void CompileQueue::delete_all() {
-  assert(lock()->owned_by_self(), "must own lock");
-  if (_first != NULL) {
-    for (CompileTask* task = _first; task != NULL; task = task->next()) {
-      delete task;
+/**
+ * Empties compilation queue by putting all compilation tasks onto
+ * a freelist. Furthermore, the method wakes up all threads that are
+ * waiting on a compilation task to finish. This can happen if background
+ * compilation is disabled.
+ */
+void CompileQueue::free_all() {
+  MutexLocker mu(lock());
+  CompileTask* next = _first;
+
+  // Iterate over all tasks in the compile queue
+  while (next != NULL) {
+    CompileTask* current = next;
+    next = current->next();
+    {
+      // Wake up thread that blocks on the compile task.
+      MutexLocker ct_lock(current->lock());
+      current->lock()->notify();
     }
-    _first = NULL;
+    // Put the task back on the freelist.
+    CompileTask::free(current);
   }
+  _first = NULL;
+
+  // Wake up all threads that block on the queue.
+  lock()->notify_all();
 }
 
 // ------------------------------------------------------------------
@@ -714,13 +772,40 @@
     return NULL;
   }
 
-  CompileTask* task = CompilationPolicy::policy()->select_task(this);
+  CompileTask* task;
+  {
+    No_Safepoint_Verifier nsv;
+    task = CompilationPolicy::policy()->select_task(this);
+  }
   remove(task);
+  purge_stale_tasks(); // may temporarily release MCQ lock
   return task;
 }
 
-void CompileQueue::remove(CompileTask* task)
-{
+// Clean & deallocate stale compile tasks.
+// Temporarily releases MethodCompileQueue lock.
+void CompileQueue::purge_stale_tasks() {
+  assert(lock()->owned_by_self(), "must own lock");
+  if (_first_stale != NULL) {
+    // Stale tasks are purged when MCQ lock is released,
+    // but _first_stale updates are protected by MCQ lock.
+    // Once task processing starts and MCQ lock is released,
+    // other compiler threads can reuse _first_stale.
+    CompileTask* head = _first_stale;
+    _first_stale = NULL;
+    {
+      MutexUnlocker ul(lock());
+      for (CompileTask* task = head; task != NULL; ) {
+        CompileTask* next_task = task->next();
+        CompileTaskWrapper ctw(task); // Frees the task
+        task->set_failure_reason("stale task");
+        task = next_task;
+      }
+    }
+  }
+}
+
+void CompileQueue::remove(CompileTask* task) {
    assert(lock()->owned_by_self(), "must own lock");
   if (task->prev() != NULL) {
     task->prev()->set_next(task->next());
@@ -740,6 +825,16 @@
   --_size;
 }
 
+void CompileQueue::remove_and_mark_stale(CompileTask* task) {
+  assert(lock()->owned_by_self(), "must own lock");
+  remove(task);
+
+  // Enqueue the task for reclamation (should be done outside MCQ lock)
+  task->set_next(_first_stale);
+  task->set_prev(NULL);
+  _first_stale = task;
+}
+
 // methods in the compile queue need to be marked as used on the stack
 // so that they don't get reclaimed by Redefine Classes
 void CompileQueue::mark_on_stack() {
@@ -750,18 +845,24 @@
   }
 }
 
-// ------------------------------------------------------------------
-// CompileQueue::print
+#ifndef PRODUCT
+/**
+ * Print entire compilation queue.
+ */
 void CompileQueue::print() {
-  tty->print_cr("Contents of %s", name());
-  tty->print_cr("----------------------");
-  CompileTask* task = _first;
-  while (task != NULL) {
-    task->print_line();
-    task = task->next();
+  if (CIPrintCompileQueue) {
+    ttyLocker ttyl;
+    tty->print_cr("Contents of %s", name());
+    tty->print_cr("----------------------");
+    CompileTask* task = _first;
+    while (task != NULL) {
+      task->print_line();
+      task = task->next();
+    }
+    tty->print_cr("----------------------");
   }
-  tty->print_cr("----------------------");
 }
+#endif // PRODUCT
 
 CompilerCounters::CompilerCounters(const char* thread_name, int instance, TRAPS) {
 
@@ -854,9 +955,6 @@
   _compilers[1] = new SharkCompiler();
 #endif // SHARK
 
-  // Initialize the CompileTask free list
-  _task_free_list = NULL;
-
   // Start the CompilerThreads
   init_compiler_threads(c1_count, c2_count);
   // totalTime performance counter is always created as it is required
@@ -1049,11 +1147,11 @@
 #endif // !ZERO && !SHARK && !COMPILERGRAAL
   // Initialize the compilation queue
   if (c2_compiler_count > 0) {
-    _c2_method_queue  = new CompileQueue("C2MethodQueue",  MethodCompileQueue_lock);
+    _c2_compile_queue  = new CompileQueue("C2 CompileQueue",  MethodCompileQueue_lock);
     _compilers[1]->set_num_compiler_threads(c2_compiler_count);
   }
   if (c1_compiler_count > 0) {
-    _c1_method_queue  = new CompileQueue("C1MethodQueue",  MethodCompileQueue_lock);
+    _c1_compile_queue  = new CompileQueue("C1 CompileQueue",  MethodCompileQueue_lock);
     _compilers[0]->set_num_compiler_threads(c1_compiler_count);
   }
 
@@ -1068,7 +1166,7 @@
     sprintf(name_buffer, "%s CompilerThread%d", _compilers[1]->name(), i);
     CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
     // Shark and C2
-    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, _compilers[1], CHECK);
+    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], CHECK);
     _compiler_threads->append(new_thread);
   }
 
@@ -1077,7 +1175,7 @@
     sprintf(name_buffer, "C1 CompilerThread%d", i);
     CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
     // C1
-    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, _compilers[0], CHECK);
+    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], CHECK);
     _compiler_threads->append(new_thread);
   }
 
@@ -1087,14 +1185,19 @@
 }
 
 
-// Set the methods on the stack as on_stack so that redefine classes doesn't
-// reclaim them
+/**
+ * Set the methods on the stack as on_stack so that redefine classes doesn't
+ * reclaim them. This method is executed at a safepoint.
+ */
 void CompileBroker::mark_on_stack() {
-  if (_c2_method_queue != NULL) {
-    _c2_method_queue->mark_on_stack();
+  assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
+  // Since we are at a safepoint, we do not need a lock to access
+  // the compile queues.
+  if (_c2_compile_queue != NULL) {
+    _c2_compile_queue->mark_on_stack();
   }
-  if (_c1_method_queue != NULL) {
-    _c1_method_queue->mark_on_stack();
+  if (_c1_compile_queue != NULL) {
+    _c1_compile_queue->mark_on_stack();
   }
 }
 
@@ -1110,7 +1213,7 @@
                                         const char* comment,
                                         Thread* thread) {
   // do nothing if compiler thread(s) is not available
-  if (!_initialized ) {
+  if (!_initialized) {
     return;
   }
 
@@ -1157,7 +1260,7 @@
 
   // If this method is already in the compile queue, then
   // we do not block the current thread.
-  if (compilation_is_in_queue(method, osr_bci)) {
+  if (compilation_is_in_queue(method)) {
     // We may want to decay our counter a bit here to prevent
     // multiple denied requests for compilation.  This is an
     // open compilation policy issue. Note: The other possibility,
@@ -1178,6 +1281,12 @@
     return;
   }
 
+  if (TieredCompilation) {
+    // Tiered policy requires MethodCounters to exist before adding a method to
+    // the queue. Create if we don't have them yet.
+    method->get_method_counters(thread);
+  }
+
   // Outputs from the following MutexLocker block:
   CompileTask* task     = NULL;
   bool         blocking = false;
@@ -1190,7 +1299,7 @@
     // Make sure the method has not slipped into the queues since
     // last we checked; note that those checks were "fast bail-outs".
     // Here we need to be more careful, see 14012000 below.
-    if (compilation_is_in_queue(method, osr_bci)) {
+    if (compilation_is_in_queue(method)) {
       return;
     }
 
@@ -1211,7 +1320,7 @@
     }
 
     // Should this thread wait for completion of the compile?
-    blocking = is_compile_blocking(method, osr_bci);
+    blocking = is_compile_blocking();
 
 #ifdef COMPILERGRAAL
     if (blocking) {
@@ -1437,19 +1546,17 @@
 }
 
 
-// ------------------------------------------------------------------
-// CompileBroker::compilation_is_in_queue
-//
-// See if this compilation is already requested.
-//
-// Implementation note: there is only a single "is in queue" bit
-// for each method.  This means that the check below is overly
-// conservative in the sense that an osr compilation in the queue
-// will block a normal compilation from entering the queue (and vice
-// versa).  This can be remedied by a full queue search to disambiguate
-// cases.  If it is deemed profitible, this may be done.
-bool CompileBroker::compilation_is_in_queue(methodHandle method,
-                                            int          osr_bci) {
+/**
+ * See if this compilation is already requested.
+ *
+ * Implementation note: there is only a single "is in queue" bit
+ * for each method.  This means that the check below is overly
+ * conservative in the sense that an osr compilation in the queue
+ * will block a normal compilation from entering the queue (and vice
+ * versa).  This can be remedied by a full queue search to disambiguate
+ * cases.  If it is deemed profitable, this may be done.
+ */
+bool CompileBroker::compilation_is_in_queue(methodHandle method) {
   return method->queued_for_compilation();
 }
 
@@ -1539,13 +1646,11 @@
   return assign_compile_id(method, osr_bci);
 }
 
-
-// ------------------------------------------------------------------
-// CompileBroker::is_compile_blocking
-//
-// Should the current thread be blocked until this compilation request
-// has been fulfilled?
-bool CompileBroker::is_compile_blocking(methodHandle method, int osr_bci) {
+/**
+ * Should the current thread block until this compilation request
+ * has been fulfilled?
+ */
+bool CompileBroker::is_compile_blocking() {
   assert(!InstanceRefKlass::owns_pending_list_lock(JavaThread::current()), "possible deadlock");
   return !BackgroundCompilation;
 }
@@ -1573,7 +1678,7 @@
                                               int           hot_count,
                                               const char*   comment,
                                               bool          blocking) {
-  CompileTask* new_task = allocate_task();
+  CompileTask* new_task = CompileTask::allocate();
   new_task->initialize(compile_id, method, osr_bci, comp_level,
                        hot_method, hot_count, comment,
                        blocking);
@@ -1582,75 +1687,52 @@
 }
 
 
-// ------------------------------------------------------------------
-// CompileBroker::allocate_task
-//
-// Allocate a CompileTask, from the free list if possible.
-CompileTask* CompileBroker::allocate_task() {
-  MutexLocker locker(CompileTaskAlloc_lock);
-  CompileTask* task = NULL;
-  if (_task_free_list != NULL) {
-    task = _task_free_list;
-    _task_free_list = task->next();
-    task->set_next(NULL);
-  } else {
-    task = new CompileTask();
-    task->set_next(NULL);
-  }
-  return task;
-}
-
-
-// ------------------------------------------------------------------
-// CompileBroker::free_task
-//
-// Add a task to the free list.
-void CompileBroker::free_task(CompileTask* task) {
-  MutexLocker locker(CompileTaskAlloc_lock);
-  task->free();
-  task->set_next(_task_free_list);
-  _task_free_list = task;
-}
-
-
-// ------------------------------------------------------------------
-// CompileBroker::wait_for_completion
-//
-// Wait for the given method CompileTask to complete.
+/**
+ *  Wait for the compilation task to complete.
+ */
 void CompileBroker::wait_for_completion(CompileTask* task) {
   if (CIPrintCompileQueue) {
+    ttyLocker ttyl;
     tty->print_cr("BLOCKING FOR COMPILE");
   }
 
   assert(task->is_blocking(), "can only wait on blocking task");
 
-  JavaThread *thread = JavaThread::current();
+  JavaThread* thread = JavaThread::current();
   thread->set_blocked_on_compilation(true);
 
   methodHandle method(thread, task->method());
   {
     MutexLocker waiter(task->lock(), thread);
 
-    while (!task->is_complete())
+    while (!task->is_complete() && !is_compilation_disabled_forever()) {
       task->lock()->wait();
+    }
   }
+
+  thread->set_blocked_on_compilation(false);
+  if (is_compilation_disabled_forever()) {
+    CompileTask::free(task);
+    return;
+  }
+
   // It is harmless to check this status without the lock, because
   // completion is a stable property (until the task object is recycled).
   assert(task->is_complete(), "Compilation should have completed");
   assert(task->code_handle() == NULL, "must be reset");
 
-  thread->set_blocked_on_compilation(false);
-
   // By convention, the waiter is responsible for recycling a
   // blocking CompileTask. Since there is only one waiter ever
   // waiting on a CompileTask, we know that no one else will
   // be using this CompileTask; we can free it.
-  free_task(task);
+  CompileTask::free(task);
 }
 
-// Initialize compiler thread(s) + compiler object(s). The postcondition
-// of this function is that the compiler runtimes are initialized and that
-//compiler threads can start compiling.
+/**
+ * Initialize compiler thread(s) + compiler object(s). The postcondition
+ * of this function is that the compiler runtimes are initialized and that
+ * compiler threads can start compiling.
+ */
 bool CompileBroker::init_compiler_runtime() {
   CompilerThread* thread = CompilerThread::current();
   AbstractCompiler* comp = thread->compiler();
@@ -1687,7 +1769,6 @@
     disable_compilation_forever();
     // If compiler initialization failed, no compiler thread that is specific to a
     // particular compiler runtime will ever start to compile methods.
-
     shutdown_compiler_runtime(comp, thread);
     return false;
   }
@@ -1701,9 +1782,11 @@
   return true;
 }
 
-// If C1 and/or C2 initialization failed, we shut down all compilation.
-// We do this to keep things simple. This can be changed if it ever turns out to be
-// a problem.
+/**
+ * If C1 and/or C2 initialization failed, we shut down all compilation.
+ * We do this to keep things simple. This can be changed if it ever turns
+ * out to be a problem.
+ */
 void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
   // Free buffer blob, if allocated
   if (thread->get_buffer_blob() != NULL) {
@@ -1715,28 +1798,25 @@
     // There are two reasons for shutting down the compiler
     // 1) compiler runtime initialization failed
     // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing
-    warning("Shutting down compiler %s (no space to run compilers)", comp->name());
+    warning("%s initialization failed. Shutting down all compilers", comp->name());
 
     // Only one thread per compiler runtime object enters here
     // Set state to shut down
     comp->set_shut_down();
 
-    MutexLocker mu(MethodCompileQueue_lock, thread);
-    CompileQueue* queue;
-    if (_c1_method_queue != NULL) {
-      _c1_method_queue->delete_all();
-      queue = _c1_method_queue;
-      _c1_method_queue = NULL;
-      delete _c1_method_queue;
+    // Delete all queued compilation tasks to make compiler threads exit faster.
+    if (_c1_compile_queue != NULL) {
+      _c1_compile_queue->free_all();
     }
 
-    if (_c2_method_queue != NULL) {
-      _c2_method_queue->delete_all();
-      queue = _c2_method_queue;
-      _c2_method_queue = NULL;
-      delete _c2_method_queue;
+    if (_c2_compile_queue != NULL) {
+      _c2_compile_queue->free_all();
     }
 
+    // Set flags so that we continue execution with using interpreter only.
+    UseCompiler    = false;
+    UseInterpreter = true;
+
     // We could delete compiler runtimes also. However, there are references to
     // the compiler runtime(s) (e.g.,  nmethod::is_compiled_by_c1()) which then
     // fail. This can be done later if necessary.
@@ -1822,26 +1902,11 @@
     if (method()->number_of_breakpoints() == 0) {
       // Compile the method.
       if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
-#ifdef COMPILER1
-        // Allow repeating compilations for the purpose of benchmarking
-        // compile speed. This is not useful for customers.
-        if (CompilationRepeat != 0) {
-          int compile_count = CompilationRepeat;
-          while (compile_count > 0) {
-            invoke_compiler_on_method(task);
-            nmethod* nm = method->code();
-            if (nm != NULL) {
-              nm->make_zombie();
-              method->clear_code();
-            }
-            compile_count--;
-          }
-        }
-#endif /* COMPILER1 */
         invoke_compiler_on_method(task);
       } else {
         // After compilation is disabled, remove remaining methods from queue
         method->clear_queued_for_compilation();
+        task->set_failure_reason("compilation is disabled");
       }
     }
   }
@@ -1870,7 +1935,7 @@
                      os::file_separator(), thread_id, os::current_process_id());
       }
 
-      fp = fopen(file_name, "at");
+      fp = fopen(file_name, "wt");
       if (fp != NULL) {
         if (LogCompilation && Verbose) {
           tty->print_cr("Opening compilation log %s", file_name);
@@ -2072,6 +2137,7 @@
     compilable = ci_env.compilable();
 
     if (ci_env.failing()) {
+      task->set_failure_reason(ci_env.failure_reason());
       const char* retry_message = ci_env.retry_message();
       if (_compilation_log != NULL) {
         _compilation_log->log_failure(thread, task, ci_env.failure_reason(), retry_message);
@@ -2125,7 +2191,7 @@
 
   // Note that the queued_for_compilation bits are cleared without
   // protection of a mutex. [They were set by the requester thread,
-  // when adding the task to the complie queue -- at which time the
+  // when adding the task to the compile queue -- at which time the
   // compile queue lock was held. Subsequently, we acquired the compile
   // queue lock to get this task off the compile queue; thus (to belabour
   // the point somewhat) our clearing of the bits must be occurring
--- a/src/share/vm/compiler/compileBroker.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/compiler/compileBroker.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -41,6 +41,11 @@
   friend class VMStructs;
 
  private:
+  static CompileTask* _task_free_list;
+#ifdef ASSERT
+  static int          _num_allocated_tasks;
+#endif
+
   Monitor*     _lock;
   uint         _compile_id;
   Method*      _method;
@@ -53,13 +58,14 @@
   int          _num_inlined_bytecodes;
   nmethodLocker* _code_handle;  // holder of eventual result
   CompileTask* _next, *_prev;
-
+  bool         _is_free;
   // Fields used for logging why the compilation was initiated:
   jlong        _time_queued;  // in units of os::elapsed_counter()
   Method*      _hot_method;   // which method actually triggered this task
   jobject      _hot_method_holder;
   int          _hot_count;    // information about its invocation counter
   const char*  _comment;      // more info about the task
+  const char*  _failure_reason;
 
  public:
   CompileTask() {
@@ -70,7 +76,8 @@
                   methodHandle hot_method, int hot_count, const char* comment,
                   bool is_blocking);
 
-  void free();
+  static CompileTask* allocate();
+  static void         free(CompileTask* task);
 
   int          compile_id() const                { return _compile_id; }
   Method*      method() const                    { return _method; }
@@ -99,6 +106,8 @@
   void         set_next(CompileTask* next)       { _next = next; }
   CompileTask* prev() const                      { return _prev; }
   void         set_prev(CompileTask* prev)       { _prev = prev; }
+  bool         is_free() const                   { return _is_free; }
+  void         set_is_free(bool val)             { _is_free = val; }
 
 private:
   static void  print_compilation_impl(outputStream* st, Method* method, int compile_id, int comp_level,
@@ -132,6 +141,10 @@
   void         log_task_dequeued(const char* comment);
   void         log_task_start(CompileLog* log);
   void         log_task_done(CompileLog* log);
+
+  void         set_failure_reason(const char* reason) {
+    _failure_reason = reason;
+  }
 };
 
 // CompilerCounters
@@ -190,7 +203,11 @@
   CompileTask* _first;
   CompileTask* _last;
 
+  CompileTask* _first_stale;
+
   int _size;
+
+  void purge_stale_tasks();
  public:
   CompileQueue(const char* name, Monitor* lock) {
     _name = name;
@@ -198,6 +215,7 @@
     _first = NULL;
     _last = NULL;
     _size = 0;
+    _first_stale = NULL;
   }
 
   const char*  name() const                      { return _name; }
@@ -205,6 +223,7 @@
 
   void         add(CompileTask* task);
   void         remove(CompileTask* task);
+  void         remove_and_mark_stale(CompileTask* task);
   CompileTask* first()                           { return _first; }
   CompileTask* last()                            { return _last;  }
 
@@ -213,10 +232,11 @@
   bool         is_empty() const                  { return _first == NULL; }
   int          size()     const                  { return _size;          }
 
+
   // Redefine Classes support
   void mark_on_stack();
-  void delete_all();
-  void         print();
+  void free_all();
+  NOT_PRODUCT (void print();)
 
   ~CompileQueue() {
     assert (is_empty(), " Compile Queue must be empty");
@@ -269,9 +289,8 @@
   static int  _last_compile_level;
   static char _last_method_compiled[name_buffer_length];
 
-  static CompileQueue* _c2_method_queue;
-  static CompileQueue* _c1_method_queue;
-  static CompileTask* _task_free_list;
+  static CompileQueue* _c2_compile_queue;
+  static CompileQueue* _c1_compile_queue;
 
   static GrowableArray<CompilerThread*>* _compiler_threads;
 
@@ -324,7 +343,7 @@
   static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
   static bool compilation_is_complete  (methodHandle method, int osr_bci, int comp_level);
   static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
-  static bool is_compile_blocking      (methodHandle method, int osr_bci);
+  static bool is_compile_blocking      ();
   static void preload_classes          (methodHandle method, TRAPS);
 
   static CompileTask* create_compile_task(CompileQueue* queue,
@@ -336,8 +355,6 @@
                                           int           hot_count,
                                           const char*   comment,
                                           bool          blocking);
-  static CompileTask* allocate_task();
-  static void free_task(CompileTask* task);
   static void wait_for_completion(CompileTask* task);
 
   static void invoke_compiler_on_method(CompileTask* task);
@@ -356,8 +373,8 @@
                                   const char* comment,
                                   Thread* thread);
   static CompileQueue* compile_queue(int comp_level) {
-    if (is_c2_compile(comp_level)) return _c2_method_queue;
-    if (is_c1_compile(comp_level)) return _c1_method_queue;
+    if (is_c2_compile(comp_level)) return _c2_compile_queue;
+    if (is_c1_compile(comp_level)) return _c1_compile_queue;
     return NULL;
   }
   static bool init_compiler_runtime();
@@ -375,7 +392,7 @@
     return NULL;
   }
 
-  static bool compilation_is_in_queue(methodHandle method, int osr_bci);
+  static bool compilation_is_in_queue(methodHandle method);
   static int queue_size(int comp_level) {
     CompileQueue *q = compile_queue(comp_level);
     return q != NULL ? q->size() : 0;
--- a/src/share/vm/compiler/compileLog.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/compiler/compileLog.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -55,8 +55,10 @@
 }
 
 CompileLog::~CompileLog() {
-  delete _out;
+  delete _out; // Close fd in fileStream::~fileStream()
   _out = NULL;
+  // Remove partial file after merging in CompileLog::finish_log_on_error
+  unlink(_file);
   FREE_C_HEAP_ARRAY(char, _identities, mtCompiler);
   FREE_C_HEAP_ARRAY(char, _file, mtCompiler);
 }
@@ -268,10 +270,9 @@
       }
       file->print_raw_cr("</compilation_log>");
       close(partial_fd);
-      unlink(partial_file);
     }
     CompileLog* next_log = log->_next;
-    delete log;
+    delete log; // Removes partial file
     log = next_log;
   }
   _first = NULL;
--- a/src/share/vm/compiler/compilerOracle.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/compiler/compilerOracle.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -167,44 +167,134 @@
   }
 }
 
+enum OptionType {
+  IntxType,
+  UintxType,
+  BoolType,
+  CcstrType,
+  UnknownType
+};
 
-class MethodOptionMatcher: public MethodMatcher {
-  const char * option;
- public:
-  MethodOptionMatcher(Symbol* class_name, Mode class_mode,
-                             Symbol* method_name, Mode method_mode,
-                             Symbol* signature, const char * opt, MethodMatcher* next):
-    MethodMatcher(class_name, class_mode, method_name, method_mode, signature, next) {
-    option = opt;
+/* Methods to map real type names to OptionType */
+template<typename T>
+static OptionType get_type_for() {
+  return UnknownType;
+};
+
+template<> OptionType get_type_for<intx>() {
+  return IntxType;
+}
+
+template<> OptionType get_type_for<uintx>() {
+  return UintxType;
+}
+
+template<> OptionType get_type_for<bool>() {
+  return BoolType;
+}
+
+template<> OptionType get_type_for<ccstr>() {
+  return CcstrType;
+}
+
+template<typename T>
+static const T copy_value(const T value) {
+  return value;
+}
+
+template<> const ccstr copy_value<ccstr>(const ccstr value) {
+  return (const ccstr)strdup(value);
+}
+
+template <typename T>
+class TypedMethodOptionMatcher : public MethodMatcher {
+  const char* _option;
+  OptionType _type;
+  const T _value;
+
+public:
+  TypedMethodOptionMatcher(Symbol* class_name, Mode class_mode,
+                           Symbol* method_name, Mode method_mode,
+                           Symbol* signature, const char* opt,
+                           const T value,  MethodMatcher* next) :
+    MethodMatcher(class_name, class_mode, method_name, method_mode, signature, next),
+                  _type(get_type_for<T>()), _value(copy_value<T>(value)) {
+    _option = strdup(opt);
   }
 
-  bool match(methodHandle method, const char* opt) {
-    MethodOptionMatcher* current = this;
+  ~TypedMethodOptionMatcher() {
+    free((void*)_option);
+  }
+
+  TypedMethodOptionMatcher* match(methodHandle method, const char* opt) {
+    TypedMethodOptionMatcher* current = this;
     while (current != NULL) {
-      current = (MethodOptionMatcher*)current->find(method);
+      current = (TypedMethodOptionMatcher*)current->find(method);
       if (current == NULL) {
-        return false;
+        return NULL;
       }
-      if (strcmp(current->option, opt) == 0) {
-        return true;
+      if (strcmp(current->_option, opt) == 0) {
+        return current;
       }
       current = current->next();
     }
-    return false;
+    return NULL;
+  }
+
+  TypedMethodOptionMatcher* next() {
+    return (TypedMethodOptionMatcher*)_next;
   }
 
-  MethodOptionMatcher* next() {
-    return (MethodOptionMatcher*)_next;
-  }
+  OptionType get_type(void) {
+      return _type;
+  };
+
+  T value() { return _value; }
 
-  virtual void print() {
+  void print() {
+    ttyLocker ttyl;
     print_base();
-    tty->print(" %s", option);
+    tty->print(" %s", _option);
+    tty->print(" <unknown option type>");
     tty->cr();
   }
 };
 
+template<>
+void TypedMethodOptionMatcher<intx>::print() {
+  ttyLocker ttyl;
+  print_base();
+  tty->print(" intx %s", _option);
+  tty->print(" = " INTX_FORMAT, _value);
+  tty->cr();
+};
 
+template<>
+void TypedMethodOptionMatcher<uintx>::print() {
+  ttyLocker ttyl;
+  print_base();
+  tty->print(" uintx %s", _option);
+  tty->print(" = " UINTX_FORMAT, _value);
+  tty->cr();
+};
+
+template<>
+void TypedMethodOptionMatcher<bool>::print() {
+  ttyLocker ttyl;
+  print_base();
+  tty->print(" bool %s", _option);
+  tty->print(" = %s", _value ? "true" : "false");
+  tty->cr();
+};
+
+template<>
+void TypedMethodOptionMatcher<ccstr>::print() {
+  ttyLocker ttyl;
+  print_base();
+  tty->print(" const char* %s", _option);
+  tty->print(" = '%s'", _value);
+  tty->cr();
+};
 
 // this must parallel the command_names below
 enum OracleCommand {
@@ -259,23 +349,46 @@
   return lists[command];
 }
 
-
-
+template<typename T>
 static MethodMatcher* add_option_string(Symbol* class_name, MethodMatcher::Mode c_mode,
                                         Symbol* method_name, MethodMatcher::Mode m_mode,
                                         Symbol* signature,
-                                        const char* option) {
-  lists[OptionCommand] = new MethodOptionMatcher(class_name, c_mode, method_name, m_mode,
-                                                 signature, option, lists[OptionCommand]);
+                                        const char* option,
+                                        T value) {
+  lists[OptionCommand] = new TypedMethodOptionMatcher<T>(class_name, c_mode, method_name, m_mode,
+                                                         signature, option, value, lists[OptionCommand]);
   return lists[OptionCommand];
 }
 
+template<typename T>
+static bool get_option_value(methodHandle method, const char* option, T& value) {
+   TypedMethodOptionMatcher<T>* m;
+   if (lists[OptionCommand] != NULL
+       && (m = ((TypedMethodOptionMatcher<T>*)lists[OptionCommand])->match(method, option)) != NULL
+       && m->get_type() == get_type_for<T>()) {
+       value = m->value();
+       return true;
+   } else {
+     return false;
+   }
+}
 
 bool CompilerOracle::has_option_string(methodHandle method, const char* option) {
-  return lists[OptionCommand] != NULL &&
-    ((MethodOptionMatcher*)lists[OptionCommand])->match(method, option);
+  bool value = false;
+  get_option_value(method, option, value);
+  return value;
 }
 
+template<typename T>
+bool CompilerOracle::has_option_value(methodHandle method, const char* option, T& value) {
+  return ::get_option_value(method, option, value);
+}
+
+// Explicit instantiation for all OptionTypes supported.
+template bool CompilerOracle::has_option_value<intx>(methodHandle method, const char* option, intx& value);
+template bool CompilerOracle::has_option_value<uintx>(methodHandle method, const char* option, uintx& value);
+template bool CompilerOracle::has_option_value<bool>(methodHandle method, const char* option, bool& value);
+template bool CompilerOracle::has_option_value<ccstr>(methodHandle method, const char* option, ccstr& value);
 
 bool CompilerOracle::should_exclude(methodHandle method, bool& quietly) {
   quietly = true;
@@ -434,6 +547,94 @@
 
 
 
+// Scan next flag and value in line, return MethodMatcher object on success, NULL on failure.
+// On failure, error_msg contains description for the first error.
+// For future extensions: set error_msg on first error.
+static MethodMatcher* scan_flag_and_value(const char* type, const char* line, int& total_bytes_read,
+                                          Symbol* c_name, MethodMatcher::Mode c_match,
+                                          Symbol* m_name, MethodMatcher::Mode m_match,
+                                          Symbol* signature,
+                                          char* errorbuf, const int buf_size) {
+  total_bytes_read = 0;
+  int bytes_read = 0;
+  char flag[256];
+
+  // Read flag name.
+  if (sscanf(line, "%*[ \t]%255[a-zA-Z0-9]%n", flag, &bytes_read) == 1) {
+    line += bytes_read;
+    total_bytes_read += bytes_read;
+
+    // Read value.
+    if (strcmp(type, "intx") == 0) {
+      intx value;
+      if (sscanf(line, "%*[ \t]" INTX_FORMAT "%n", &value, &bytes_read) == 1) {
+        total_bytes_read += bytes_read;
+        return add_option_string(c_name, c_match, m_name, m_match, signature, flag, value);
+      } else {
+        jio_snprintf(errorbuf, buf_size, "  Value cannot be read for flag %s of type %s ", flag, type);
+      }
+    } else if (strcmp(type, "uintx") == 0) {
+      uintx value;
+      if (sscanf(line, "%*[ \t]" UINTX_FORMAT "%n", &value, &bytes_read) == 1) {
+        total_bytes_read += bytes_read;
+        return add_option_string(c_name, c_match, m_name, m_match, signature, flag, value);
+      } else {
+        jio_snprintf(errorbuf, buf_size, "  Value cannot be read for flag %s of type %s", flag, type);
+      }
+    } else if (strcmp(type, "ccstr") == 0) {
+      ResourceMark rm;
+      char* value = NEW_RESOURCE_ARRAY(char, strlen(line) + 1);
+      if (sscanf(line, "%*[ \t]%255[_a-zA-Z0-9]%n", value, &bytes_read) == 1) {
+        total_bytes_read += bytes_read;
+        return add_option_string(c_name, c_match, m_name, m_match, signature, flag, (ccstr)value);
+      } else {
+        jio_snprintf(errorbuf, buf_size, "  Value cannot be read for flag %s of type %s", flag, type);
+      }
+    } else if (strcmp(type, "ccstrlist") == 0) {
+      // Accumulates several strings into one. The internal type is ccstr.
+      ResourceMark rm;
+      char* value = NEW_RESOURCE_ARRAY(char, strlen(line) + 1);
+      char* next_value = value;
+      if (sscanf(line, "%*[ \t]%255[_a-zA-Z0-9]%n", next_value, &bytes_read) == 1) {
+        total_bytes_read += bytes_read;
+        line += bytes_read;
+        next_value += bytes_read;
+        char* end_value = next_value-1;
+        while (sscanf(line, "%*[ \t]%255[_a-zA-Z0-9]%n", next_value, &bytes_read) == 1) {
+          total_bytes_read += bytes_read;
+          line += bytes_read;
+          *end_value = ' '; // override '\0'
+          next_value += bytes_read;
+          end_value = next_value-1;
+        }
+        return add_option_string(c_name, c_match, m_name, m_match, signature, flag, (ccstr)value);
+      } else {
+        jio_snprintf(errorbuf, buf_size, "  Value cannot be read for flag %s of type %s", flag, type);
+      }
+    } else if (strcmp(type, "bool") == 0) {
+      char value[256];
+      if (sscanf(line, "%*[ \t]%255[a-zA-Z]%n", value, &bytes_read) == 1) {
+        if (strcmp(value, "true") == 0) {
+          total_bytes_read += bytes_read;
+          return add_option_string(c_name, c_match, m_name, m_match, signature, flag, true);
+        } else if (strcmp(value, "false") == 0) {
+          total_bytes_read += bytes_read;
+          return add_option_string(c_name, c_match, m_name, m_match, signature, flag, false);
+        } else {
+          jio_snprintf(errorbuf, buf_size, "  Value cannot be read for flag %s of type %s", flag, type);
+        }
+      } else {
+        jio_snprintf(errorbuf, sizeof(errorbuf), "  Value cannot be read for flag %s of type %s", flag, type);
+      }
+    } else {
+      jio_snprintf(errorbuf, sizeof(errorbuf), "  Type %s not supported ", type);
+    }
+  } else {
+    jio_snprintf(errorbuf, sizeof(errorbuf), "  Flag name for type %s should be alphanumeric ", type);
+  }
+  return NULL;
+}
+
 void CompilerOracle::parse_from_line(char* line) {
   if (line[0] == '\0') return;
   if (line[0] == '#')  return;
@@ -463,8 +664,10 @@
   int bytes_read;
   OracleCommand command = parse_command_name(line, &bytes_read);
   line += bytes_read;
+  ResourceMark rm;
 
   if (command == UnknownCommand) {
+    ttyLocker ttyl;
     tty->print_cr("CompilerOracle: unrecognized line");
     tty->print_cr("  \"%s\"", original_line);
     return;
@@ -486,7 +689,7 @@
   char method_name[256];
   char sig[1024];
   char errorbuf[1024];
-  const char* error_msg = NULL;
+  const char* error_msg = NULL; // description of first error that appears
   MethodMatcher* match = NULL;
 
   if (scan_line(line, class_name, &c_match, method_name, &m_match, &bytes_read, error_msg)) {
@@ -505,43 +708,77 @@
     }
 
     if (command == OptionCommand) {
-      // Look for trailing options to support
-      // ciMethod::has_option("string") to control features in the
-      // compiler.  Multiple options may follow the method name.
-      char option[256];
+      // Look for trailing options.
+      //
+      // Two types of trailing options are
+      // supported:
+      //
+      // (1) CompileCommand=option,Klass::method,flag
+      // (2) CompileCommand=option,Klass::method,type,flag,value
+      //
+      // Type (1) is used to support ciMethod::has_option("someflag")
+      // (i.e., to check if a flag "someflag" is enabled for a method).
+      //
+      // Type (2) is used to support options with a value. Values can have the
+      // the following types: intx, uintx, bool, ccstr, and ccstrlist.
+      //
+      // For future extensions: extend scan_flag_and_value()
+      char option[256]; // stores flag for Type (1) and type of Type (2)
       while (sscanf(line, "%*[ \t]%255[a-zA-Z0-9]%n", option, &bytes_read) == 1) {
         if (match != NULL && !_quiet) {
           // Print out the last match added
+          ttyLocker ttyl;
           tty->print("CompilerOracle: %s ", command_names[command]);
           match->print();
         }
-        match = add_option_string(c_name, c_match, m_name, m_match, signature, strdup(option));
         line += bytes_read;
-      }
+
+        if (strcmp(option, "intx") == 0
+            || strcmp(option, "uintx") == 0
+            || strcmp(option, "bool") == 0
+            || strcmp(option, "ccstr") == 0
+            || strcmp(option, "ccstrlist") == 0
+            ) {
+
+          // Type (2) option: parse flag name and value.
+          match = scan_flag_and_value(option, line, bytes_read,
+                                      c_name, c_match, m_name, m_match, signature,
+                                      errorbuf, sizeof(errorbuf));
+          if (match == NULL) {
+            error_msg = errorbuf;
+            break;
+          }
+          line += bytes_read;
+        } else {
+          // Type (1) option
+          match = add_option_string(c_name, c_match, m_name, m_match, signature, option, true);
+        }
+      } // while(
     } else {
-      bytes_read = 0;
-      sscanf(line, "%*[ \t]%n", &bytes_read);
-      if (line[bytes_read] != '\0') {
-        jio_snprintf(errorbuf, sizeof(errorbuf), "  Unrecognized text after command: %s", line);
-        error_msg = errorbuf;
-      } else {
-        match = add_predicate(command, c_name, c_match, m_name, m_match, signature);
-      }
+      match = add_predicate(command, c_name, c_match, m_name, m_match, signature);
     }
   }
 
-  if (match != NULL) {
-    if (!_quiet) {
-      ResourceMark rm;
-      tty->print("CompilerOracle: %s ", command_names[command]);
-      match->print();
-    }
-  } else {
+  ttyLocker ttyl;
+  if (error_msg != NULL) {
+    // an error has happened
     tty->print_cr("CompilerOracle: unrecognized line");
     tty->print_cr("  \"%s\"", original_line);
     if (error_msg != NULL) {
       tty->print_cr("%s", error_msg);
     }
+  } else {
+    // check for remaining characters
+    bytes_read = 0;
+    sscanf(line, "%*[ \t]%n", &bytes_read);
+    if (line[bytes_read] != '\0') {
+      tty->print_cr("CompilerOracle: unrecognized line");
+      tty->print_cr("  \"%s\"", original_line);
+      tty->print_cr("  Unrecognized text %s after command ", line);
+    } else if (match != NULL && !_quiet) {
+      tty->print("CompilerOracle: %s ", command_names[command]);
+      match->print();
+    }
   }
 }
 
--- a/src/share/vm/compiler/compilerOracle.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/compiler/compilerOracle.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -64,6 +64,11 @@
   // Check to see if this method has option set for it
   static bool has_option_string(methodHandle method, const char * option);
 
+  // Check if method has option and value set. If yes, overwrite value and return true,
+  // otherwise leave value unchanged and return false.
+  template<typename T>
+  static bool has_option_value(methodHandle method, const char* option, T& value);
+
   // Reads from string instead of file
   static void parse_from_string(const char* command_string, void (*parser)(char*));
 
--- a/src/share/vm/compiler/disassembler.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/compiler/disassembler.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -245,12 +245,12 @@
 };
 
 decode_env::decode_env(CodeBlob* code, outputStream* output, CodeStrings c) {
-  memset(this, 0, sizeof(*this));
+  memset(this, 0, sizeof(*this)); // Beware, this zeroes bits of fields.
   _output = output ? output : tty;
   _code = code;
   if (code != NULL && code->is_nmethod())
     _nm = (nmethod*) code;
-  _strings.assign(c);
+  _strings.copy(c);
 
   // by default, output pc but not bytes:
   _print_pc       = true;
--- a/src/share/vm/compiler/methodLiveness.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/compiler/methodLiveness.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -475,7 +475,7 @@
     bci = 0;
   }
 
-  MethodLivenessResult answer((uintptr_t*)NULL,0);
+  MethodLivenessResult answer((BitMap::bm_word_t*)NULL,0);
 
   if (_block_count > 0) {
     if (TimeLivenessAnalysis) _time_total.start();
@@ -1000,7 +1000,7 @@
 }
 
 MethodLivenessResult MethodLiveness::BasicBlock::get_liveness_at(ciMethod* method, int bci) {
-  MethodLivenessResult answer(NEW_RESOURCE_ARRAY(uintptr_t, _analyzer->bit_map_size_words()),
+  MethodLivenessResult answer(NEW_RESOURCE_ARRAY(BitMap::bm_word_t, _analyzer->bit_map_size_words()),
                 _analyzer->bit_map_size_bits());
   answer.set_is_valid();
 
--- a/src/share/vm/compiler/oopMap.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/compiler/oopMap.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -463,7 +463,6 @@
   assert(cb != NULL, "no codeblob");
 
   // Any reg might be saved by a safepoint handler (see generate_handler_blob).
-  const int max_saved_on_entry_reg_count = ConcreteRegisterImpl::number_of_registers;
   assert( reg_map->_update_for_id == NULL || fr->is_older(reg_map->_update_for_id),
          "already updated this map; do not 'update' it twice!" );
   debug_only(reg_map->_update_for_id = fr->id());
@@ -473,27 +472,20 @@
           !cb->caller_must_gc_arguments(reg_map->thread())),
          "include_argument_oops should already be set");
 
-  int nof_callee = 0;
-  oop*        locs[2*max_saved_on_entry_reg_count+1];
-  VMReg regs[2*max_saved_on_entry_reg_count+1];
-  // ("+1" because max_saved_on_entry_reg_count might be zero)
-
   // Scan through oopmap and find location of all callee-saved registers
   // (we do not do update in place, since info could be overwritten)
 
   address pc = fr->pc();
-
   OopMap* map  = cb->oop_map_for_return_address(pc);
-
-  assert(map != NULL, " no ptr map found");
+  assert(map != NULL, "no ptr map found");
+  DEBUG_ONLY(int nof_callee = 0;)
 
-  OopMapValue omv;
-  for(OopMapStream oms(map,OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) {
-    omv = oms.current();
-    assert(nof_callee < 2*max_saved_on_entry_reg_count, "overflow");
-    regs[nof_callee] = omv.content_reg();
-    locs[nof_callee] = fr->oopmapreg_to_location(omv.reg(),reg_map);
-    nof_callee++;
+  for (OopMapStream oms(map, OopMapValue::callee_saved_value); !oms.is_done(); oms.next()) {
+    OopMapValue omv = oms.current();
+    VMReg reg = omv.content_reg();
+    oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
+    reg_map->set_location(reg, (address) loc);
+    DEBUG_ONLY(nof_callee++;)
   }
 
   // Check that runtime stubs save all callee-saved registers
@@ -502,11 +494,6 @@
          (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
          "must save all");
 #endif // COMPILER2
-
-  // Copy found callee-saved register to reg_map
-  for(int i = 0; i < nof_callee; i++) {
-    reg_map->set_location(regs[i], (address)locs[i]);
-  }
 }
 
 //=============================================================================
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -158,7 +158,7 @@
                  " coal_deaths(" SIZE_FORMAT ")"
                  " + count(" SSIZE_FORMAT ")",
                  p2i(this), size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
-                 _allocation_stats.split_births(), _allocation_stats.split_deaths(),
+                 _allocation_stats.coal_births(), _allocation_stats.split_deaths(),
                  _allocation_stats.coal_deaths(), count()));
 }
 #endif
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,8 @@
 }
 
 void ConcurrentMarkSweepPolicy::initialize_generations() {
-  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
+  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC,
+    CURRENT_PC, AllocFailStrategy::RETURN_NULL);
   if (_generations == NULL)
     vm_exit_during_initialization("Unable to allocate gen spec");
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
 
 #include "memory/genOopClosures.hpp"
+#include "memory/iterator.hpp"
 
 /////////////////////////////////////////////////////////////////
 // Closures used by ConcurrentMarkSweepGeneration's collector
@@ -48,33 +49,13 @@
     }                                                     \
   }
 
-// Applies the given oop closure to all oops in all klasses visited.
-class CMKlassClosure : public KlassClosure {
-  friend class CMSOopClosure;
-  friend class CMSOopsInGenClosure;
-
-  OopClosure* _oop_closure;
-
-  // Used when _oop_closure couldn't be set in an initialization list.
-  void initialize(OopClosure* oop_closure) {
-    assert(_oop_closure == NULL, "Should only be called once");
-    _oop_closure = oop_closure;
-  }
+// TODO: This duplication of the MetadataAwareOopClosure class is only needed
+//       because some CMS OopClosures derive from OopsInGenClosure. It would be
+//       good to get rid of them completely.
+class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
+  KlassToOopClosure _klass_closure;
  public:
-  CMKlassClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) { }
-
-  void do_klass(Klass* k);
-};
-
-// The base class for all CMS marking closures.
-// It's used to proxy through the metadata to the oops defined in them.
-class CMSOopClosure: public ExtendedOopClosure {
-  CMKlassClosure      _klass_closure;
- public:
-  CMSOopClosure() : ExtendedOopClosure() {
-    _klass_closure.initialize(this);
-  }
-  CMSOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
+  MetadataAwareOopsInGenClosure() {
     _klass_closure.initialize(this);
   }
 
@@ -87,26 +68,7 @@
   virtual void do_class_loader_data(ClassLoaderData* cld);
 };
 
-// TODO: This duplication of the CMSOopClosure class is only needed because
-//       some CMS OopClosures derive from OopsInGenClosure. It would be good
-//       to get rid of them completely.
-class CMSOopsInGenClosure: public OopsInGenClosure {
-  CMKlassClosure _klass_closure;
- public:
-  CMSOopsInGenClosure() {
-    _klass_closure.initialize(this);
-  }
-
-  virtual bool do_metadata()    { return do_metadata_nv(); }
-  inline  bool do_metadata_nv() { return true; }
-
-  virtual void do_klass(Klass* k);
-  void do_klass_nv(Klass* k);
-
-  virtual void do_class_loader_data(ClassLoaderData* cld);
-};
-
-class MarkRefsIntoClosure: public CMSOopsInGenClosure {
+class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
  private:
   const MemRegion _span;
   CMSBitMap*      _bitMap;
@@ -122,7 +84,7 @@
   }
 };
 
-class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure {
+class Par_MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
  private:
   const MemRegion _span;
   CMSBitMap*      _bitMap;
@@ -140,7 +102,7 @@
 
 // A variant of the above used in certain kinds of CMS
 // marking verification.
-class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure {
+class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure {
  private:
   const MemRegion _span;
   CMSBitMap*      _verification_bm;
@@ -159,7 +121,7 @@
 };
 
 // The non-parallel version (the parallel version appears further below).
-class PushAndMarkClosure: public CMSOopClosure {
+class PushAndMarkClosure: public MetadataAwareOopClosure {
  private:
   CMSCollector* _collector;
   MemRegion     _span;
@@ -193,7 +155,7 @@
 // synchronization (for instance, via CAS). The marking stack
 // used in the non-parallel case above is here replaced with
 // an OopTaskQueue structure to allow efficient work stealing.
-class Par_PushAndMarkClosure: public CMSOopClosure {
+class Par_PushAndMarkClosure: public MetadataAwareOopClosure {
  private:
   CMSCollector* _collector;
   MemRegion     _span;
@@ -218,7 +180,7 @@
 };
 
 // The non-parallel version (the parallel version appears further below).
-class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
+class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
  private:
   MemRegion          _span;
   CMSBitMap*         _bit_map;
@@ -262,7 +224,7 @@
 // stack and the bitMap are shared, so access needs to be suitably
 // sycnhronized. An OopTaskQueue structure, supporting efficient
 // workstealing, replaces a CMSMarkStack for storing grey objects.
-class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
+class Par_MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
  private:
   MemRegion              _span;
   CMSBitMap*             _bit_map;
@@ -291,7 +253,7 @@
 // This closure is used during the concurrent marking phase
 // following the first checkpoint. Its use is buried in
 // the closure MarkFromRootsClosure.
-class PushOrMarkClosure: public CMSOopClosure {
+class PushOrMarkClosure: public MetadataAwareOopClosure {
  private:
   CMSCollector*   _collector;
   MemRegion       _span;
@@ -324,7 +286,7 @@
 // This closure is used during the concurrent marking phase
 // following the first checkpoint. Its use is buried in
 // the closure Par_MarkFromRootsClosure.
-class Par_PushOrMarkClosure: public CMSOopClosure {
+class Par_PushOrMarkClosure: public MetadataAwareOopClosure {
  private:
   CMSCollector*    _collector;
   MemRegion        _whole_span;
@@ -364,7 +326,7 @@
 // processing phase of the CMS final checkpoint step, as
 // well as during the concurrent precleaning of the discovered
 // reference lists.
-class CMSKeepAliveClosure: public CMSOopClosure {
+class CMSKeepAliveClosure: public MetadataAwareOopClosure {
  private:
   CMSCollector* _collector;
   const MemRegion _span;
@@ -384,7 +346,7 @@
   inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
 };
 
-class CMSInnerParMarkAndPushClosure: public CMSOopClosure {
+class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
  private:
   CMSCollector* _collector;
   MemRegion     _span;
@@ -405,7 +367,7 @@
 // A parallel (MT) version of the above, used when
 // reference processing is parallel; the only difference
 // is in the do_oop method.
-class CMSParKeepAliveClosure: public CMSOopClosure {
+class CMSParKeepAliveClosure: public MetadataAwareOopClosure {
  private:
   MemRegion     _span;
   OopTaskQueue* _work_queue;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -44,33 +44,20 @@
   }
 }
 
-// CMSOopClosure and CMSoopsInGenClosure are duplicated,
+// MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated,
 // until we get rid of OopsInGenClosure.
 
-inline void CMSOopClosure::do_klass(Klass* k)       { do_klass_nv(k); }
-inline void CMSOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
-
-inline void CMSOopClosure::do_klass_nv(Klass* k) {
+inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) {
   ClassLoaderData* cld = k->class_loader_data();
   do_class_loader_data(cld);
 }
-inline void CMSOopsInGenClosure::do_klass_nv(Klass* k) {
-  ClassLoaderData* cld = k->class_loader_data();
-  do_class_loader_data(cld);
-}
+inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
 
-inline void CMSOopClosure::do_class_loader_data(ClassLoaderData* cld) {
-  assert(_klass_closure._oop_closure == this, "Must be");
-
-  bool claim = true;  // Must claim the class loader data before processing.
-  cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim);
-}
-inline void CMSOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) {
+inline void MetadataAwareOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) {
   assert(_klass_closure._oop_closure == this, "Must be");
 
   bool claim = true;  // Must claim the class loader data before processing.
   cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim);
 }
 
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_INLINE_HPP
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -33,12 +33,14 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/blockOffsetTable.inline.hpp"
 #include "memory/resourceArea.hpp"
+#include "memory/space.inline.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/java.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/copy.hpp"
 
@@ -793,53 +795,6 @@
   }
 }
 
-// Apply the given closure to each oop in the space \intersect memory region.
-void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
-  assert_lock_strong(freelistLock());
-  if (is_empty()) {
-    return;
-  }
-  MemRegion cur = MemRegion(bottom(), end());
-  mr = mr.intersection(cur);
-  if (mr.is_empty()) {
-    return;
-  }
-  if (mr.equals(cur)) {
-    oop_iterate(cl);
-    return;
-  }
-  assert(mr.end() <= end(), "just took an intersection above");
-  HeapWord* obj_addr = block_start(mr.start());
-  HeapWord* t = mr.end();
-
-  SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
-  if (block_is_obj(obj_addr)) {
-    // Handle first object specially.
-    oop obj = oop(obj_addr);
-    obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
-  } else {
-    FreeChunk* fc = (FreeChunk*)obj_addr;
-    obj_addr += fc->size();
-  }
-  while (obj_addr < t) {
-    HeapWord* obj = obj_addr;
-    obj_addr += block_size(obj_addr);
-    // If "obj_addr" is not greater than top, then the
-    // entire object "obj" is within the region.
-    if (obj_addr <= t) {
-      if (block_is_obj(obj)) {
-        oop(obj)->oop_iterate(cl);
-      }
-    } else {
-      // "obj" extends beyond end of region
-      if (block_is_obj(obj)) {
-        oop(obj)->oop_iterate(&smr_blk);
-      }
-      break;
-    }
-  }
-}
-
 // NOTE: In the following methods, in order to safely be able to
 // apply the closure to an object, we need to be sure that the
 // object has been initialized. We are guaranteed that an object
@@ -898,42 +853,60 @@
                                                   UpwardsObjectClosure* cl) {
   assert_locked(freelistLock());
   NOT_PRODUCT(verify_objects_initialized());
-  Space::object_iterate_mem(mr, cl);
+  assert(!mr.is_empty(), "Should be non-empty");
+  // We use MemRegion(bottom(), end()) rather than used_region() below
+  // because the two are not necessarily equal for some kinds of
+  // spaces, in particular, certain kinds of free list spaces.
+  // We could use the more complicated but more precise:
+  // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
+  // but the slight imprecision seems acceptable in the assertion check.
+  assert(MemRegion(bottom(), end()).contains(mr),
+         "Should be within used space");
+  HeapWord* prev = cl->previous();   // max address from last time
+  if (prev >= mr.end()) { // nothing to do
+    return;
+  }
+  // This assert will not work when we go from cms space to perm
+  // space, and use same closure. Easy fix deferred for later. XXX YSR
+  // assert(prev == NULL || contains(prev), "Should be within space");
+
+  bool last_was_obj_array = false;
+  HeapWord *blk_start_addr, *region_start_addr;
+  if (prev > mr.start()) {
+    region_start_addr = prev;
+    blk_start_addr    = prev;
+    // The previous invocation may have pushed "prev" beyond the
+    // last allocated block yet there may be still be blocks
+    // in this region due to a particular coalescing policy.
+    // Relax the assertion so that the case where the unallocated
+    // block is maintained and "prev" is beyond the unallocated
+    // block does not cause the assertion to fire.
+    assert((BlockOffsetArrayUseUnallocatedBlock &&
+            (!is_in(prev))) ||
+           (blk_start_addr == block_start(region_start_addr)), "invariant");
+  } else {
+    region_start_addr = mr.start();
+    blk_start_addr    = block_start(region_start_addr);
+  }
+  HeapWord* region_end_addr = mr.end();
+  MemRegion derived_mr(region_start_addr, region_end_addr);
+  while (blk_start_addr < region_end_addr) {
+    const size_t size = block_size(blk_start_addr);
+    if (block_is_obj(blk_start_addr)) {
+      last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
+    } else {
+      last_was_obj_array = false;
+    }
+    blk_start_addr += size;
+  }
+  if (!last_was_obj_array) {
+    assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
+           "Should be within (closed) used space");
+    assert(blk_start_addr > prev, "Invariant");
+    cl->set_previous(blk_start_addr); // min address for next time
+  }
 }
 
-// Callers of this iterator beware: The closure application should
-// be robust in the face of uninitialized objects and should (always)
-// return a correct size so that the next addr + size below gives us a
-// valid block boundary. [See for instance,
-// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
-// in ConcurrentMarkSweepGeneration.cpp.]
-HeapWord*
-CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
-  assert_lock_strong(freelistLock());
-  HeapWord *addr, *last;
-  size_t size;
-  for (addr = bottom(), last  = end();
-       addr < last; addr += size) {
-    FreeChunk* fc = (FreeChunk*)addr;
-    if (fc->is_free()) {
-      // Since we hold the free list lock, which protects direct
-      // allocation in this generation by mutators, a free object
-      // will remain free throughout this iteration code.
-      size = fc->size();
-    } else {
-      // Note that the object need not necessarily be initialized,
-      // because (for instance) the free list lock does NOT protect
-      // object initialization. The closure application below must
-      // therefore be correct in the face of uninitialized objects.
-      size = cl->do_object_careful(oop(addr));
-      if (size == 0) {
-        // An unparsable object found. Signal early termination.
-        return addr;
-      }
-    }
-  }
-  return NULL;
-}
 
 // Callers of this iterator beware: The closure application should
 // be robust in the face of uninitialized objects and should (always)
@@ -2668,7 +2641,7 @@
   // Get the #blocks we want to claim
   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
   assert(n_blks > 0, "Error");
-  assert(ResizePLAB || n_blks == OldPLABSize, "Error");
+  assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
   // In some cases, when the application has a phase change,
   // there may be a sudden and sharp shift in the object survival
   // profile, and updating the counts at the end of a scavenge
@@ -2760,10 +2733,12 @@
   }
 }
 
-void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
-  assert(fl->count() == 0, "Precondition.");
-  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
-         "Precondition");
+// Used by par_get_chunk_of_blocks() for the chunks from the
+// indexed_free_lists.  Looks for a chunk with size that is a multiple
+// of "word_sz" and if found, splits it into "word_sz" chunks and add
+// to the free list "fl".  "n" is the maximum number of chunks to
+// be added to "fl".
+bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
 
   // We'll try all multiples of word_sz in the indexed set, starting with
   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
@@ -2844,11 +2819,15 @@
                         Mutex::_no_safepoint_check_flag);
         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
         _indexedFreeList[word_sz].set_split_births(births);
-        return;
+        return true;
       }
     }
+    return found;
   }
-  // Otherwise, we'll split a block from the dictionary.
+}
+
+FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
+
   FreeChunk* fc = NULL;
   FreeChunk* rem_fc = NULL;
   size_t rem;
@@ -2859,16 +2838,12 @@
       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
                                   FreeBlockDictionary<FreeChunk>::atLeast);
       if (fc != NULL) {
-        _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
-        dictionary()->dict_census_update(fc->size(),
-                                       true /*split*/,
-                                       false /*birth*/);
         break;
       } else {
         n--;
       }
     }
-    if (fc == NULL) return;
+    if (fc == NULL) return NULL;
     // Otherwise, split up that block.
     assert((ssize_t)n >= 1, "Control point invariant");
     assert(fc->is_free(), "Error: should be a free block");
@@ -2890,10 +2865,14 @@
     // dictionary and return, leaving "fl" empty.
     if (n == 0) {
       returnChunkToDictionary(fc);
-      assert(fl->count() == 0, "We never allocated any blocks");
-      return;
+      return NULL;
     }
 
+    _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
+    dictionary()->dict_census_update(fc->size(),
+                                     true /*split*/,
+                                     false /*birth*/);
+
     // First return the remainder, if any.
     // Note that we hold the lock until we decide if we're going to give
     // back the remainder to the dictionary, since a concurrent allocation
@@ -2926,7 +2905,24 @@
     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
     smallSplitBirth(rem);
   }
-  assert((ssize_t)n > 0 && fc != NULL, "Consistency");
+  assert(n * word_sz == fc->size(),
+    err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
+    SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
+    fc->size(), n, word_sz));
+  return fc;
+}
+
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
+
+  FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
+
+  if (fc == NULL) {
+    return;
+  }
+
+  size_t n = fc->size() / word_sz;
+
+  assert((ssize_t)n > 0, "Consistency");
   // Now do the splitting up.
   // Must do this in reverse order, so that anybody attempting to
   // access the main chunk sees it as a single free block until we
@@ -2974,6 +2970,20 @@
   assert(fl->tail()->next() == NULL, "List invariant.");
 }
 
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
+  assert(fl->count() == 0, "Precondition.");
+  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
+         "Precondition");
+
+  if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
+    // Got it
+    return;
+  }
+
+  // Otherwise, we'll split a block from the dictionary.
+  par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
+}
+
 // Set up the space's par_seq_tasks structure for work claiming
 // for parallel rescan. See CMSParRemarkTask where this is currently used.
 // XXX Need to suitably abstract and generalize this and the next
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -172,6 +172,20 @@
   // list of size "word_sz", and must now be decremented.
   void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
 
+  // Used by par_get_chunk_of_blocks() for the chunks from the
+  // indexed_free_lists.
+  bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
+
+  // Used by par_get_chunk_of_blocks_dictionary() to get a chunk
+  // evenly splittable into "n" "word_sz" chunks.  Returns that
+  // evenly splittable chunk.  May split a larger chunk to get the
+  // evenly splittable chunk.
+  FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
+
+  // Used by par_get_chunk_of_blocks() for the chunks from the
+  // dictionary.
+  void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
+
   // Allocation helper functions
   // Allocate using a strategy that takes from the indexed free lists
   // first.  This allocation strategy assumes a companion sweeping
@@ -337,10 +351,6 @@
                      unallocated_block() : end());
   }
 
-  bool is_in(const void* p) const {
-    return used_region().contains(p);
-  }
-
   virtual bool is_free_block(const HeapWord* p) const;
 
   // Resizing support
@@ -350,7 +360,6 @@
   Mutex* freelistLock() const { return &_freelistLock; }
 
   // Iteration support
-  void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
   void oop_iterate(ExtendedOopClosure* cl);
 
   void object_iterate(ObjectClosure* blk);
@@ -363,6 +372,12 @@
   // obj_is_alive() to determine whether it is safe to iterate of
   // an object.
   void safe_object_iterate(ObjectClosure* blk);
+
+  // Iterate over all objects that intersect with mr, calling "cl->do_object"
+  // on each.  There is an exception to this: if this closure has already
+  // been invoked on an object, it may skip such objects in some cases.  This is
+  // Most likely to happen in an "upwards" (ascending address) iteration of
+  // MemRegions.
   void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
 
   // Requires that "mr" be entirely within the space.
@@ -371,11 +386,8 @@
   // terminate the iteration and return the address of the start of the
   // subregion that isn't done.  Return of "NULL" indicates that the
   // interation completed.
-  virtual HeapWord*
-       object_iterate_careful_m(MemRegion mr,
-                                ObjectClosureCareful* cl);
-  virtual HeapWord*
-       object_iterate_careful(ObjectClosureCareful* cl);
+ HeapWord* object_iterate_careful_m(MemRegion mr,
+                                    ObjectClosureCareful* cl);
 
   // Override: provides a DCTO_CL specific to this kind of space.
   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -49,7 +49,7 @@
 #include "memory/genCollectedHeap.hpp"
 #include "memory/genMarkSweep.hpp"
 #include "memory/genOopClosures.inline.hpp"
-#include "memory/iterator.hpp"
+#include "memory/iterator.inline.hpp"
 #include "memory/padded.hpp"
 #include "memory/referencePolicy.hpp"
 #include "memory/resourceArea.hpp"
@@ -59,6 +59,7 @@
 #include "runtime/globals_extension.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "services/memoryService.hpp"
 #include "services/runtimeService.hpp"
@@ -737,7 +738,7 @@
   // Support for parallelizing survivor space rescan
   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
     const size_t max_plab_samples =
-      ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
+      ((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
 
     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
@@ -795,6 +796,12 @@
   _inter_sweep_timer.start();  // start of time
 }
 
+size_t CMSCollector::plab_sample_minimum_size() {
+  // The default value of MinTLABSize is 2k, but there is
+  // no way to get the default value if the flag has been overridden.
+  return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
+}
+
 const char* ConcurrentMarkSweepGeneration::name() const {
   return "concurrent mark-sweep generation";
 }
@@ -1513,6 +1520,8 @@
     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
+    gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
+    gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
     gclog_or_tty->print_cr("metadata initialized %d",
       MetaspaceGC::should_concurrent_collect());
   }
@@ -1569,11 +1578,33 @@
   }
 
   if (MetaspaceGC::should_concurrent_collect()) {
+    if (Verbose && PrintGCDetails) {
+      gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
+    }
+    return true;
+  }
+
+  // CMSTriggerInterval starts a CMS cycle if enough time has passed.
+  if (CMSTriggerInterval >= 0) {
+    if (CMSTriggerInterval == 0) {
+      // Trigger always
+      return true;
+    }
+
+    // Check the CMS time since begin (we do not check the stats validity
+    // as we want to be able to trigger the first CMS cycle as well)
+    if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
       if (Verbose && PrintGCDetails) {
-      gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
+        if (stats().valid()) {
+          gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
+                                 stats().cms_time_since_begin());
+        } else {
+          gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
+        }
       }
       return true;
     }
+  }
 
   return false;
 }
@@ -1999,7 +2030,7 @@
   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
 
-  GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
+  GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
       "collections passed to foreground collector", _full_gcs_since_conc_gc);
@@ -2509,8 +2540,10 @@
   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
          "VM thread should have CMS token");
 
+  // The gc id is created in register_foreground_gc_start if this collection is synchronous
+  const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
-    true, NULL);)
+    true, NULL, gc_id);)
   if (UseAdaptiveSizePolicy) {
     size_policy()->ms_collection_begin();
   }
@@ -3025,22 +3058,21 @@
   HandleMark  hm;
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
-  // Get a clear set of claim bits for the strong roots processing to work with.
+  // Get a clear set of claim bits for the roots processing to work with.
   ClassLoaderDataGraph::clear_claimed_marks();
 
   // Mark from roots one level into CMS
   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
 
-  gch->gen_process_strong_roots(_cmsGen->level(),
-                                true,   // younger gens are roots
-                                true,   // activate StrongRootsScope
-                                false,  // not scavenging
-                                SharedHeap::ScanningOption(roots_scanning_options()),
-                                &notOlder,
-                                true,   // walk code active on stacks
-                                NULL,
-                                NULL); // SSS: Provide correct closure
+  gch->gen_process_roots(_cmsGen->level(),
+                         true,   // younger gens are roots
+                         true,   // activate StrongRootsScope
+                         SharedHeap::ScanningOption(roots_scanning_options()),
+                         should_unload_classes(),
+                         &notOlder,
+                         NULL,
+                         NULL);  // SSS: Provide correct closure
 
   // Now mark from the roots
   MarkFromRootsClosure markFromRootsClosure(this, _span,
@@ -3091,24 +3123,24 @@
   HandleMark  hm;
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
-  // Get a clear set of claim bits for the strong roots processing to work with.
+  // Get a clear set of claim bits for the roots processing to work with.
   ClassLoaderDataGraph::clear_claimed_marks();
 
   // Mark from roots one level into CMS
   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
                                      markBitMap());
-  CMKlassClosure klass_closure(&notOlder);
+  CLDToOopClosure cld_closure(&notOlder, true);
 
   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-  gch->gen_process_strong_roots(_cmsGen->level(),
-                                true,   // younger gens are roots
-                                true,   // activate StrongRootsScope
-                                false,  // not scavenging
-                                SharedHeap::ScanningOption(roots_scanning_options()),
-                                &notOlder,
-                                true,   // walk code active on stacks
-                                NULL,
-                                &klass_closure);
+
+  gch->gen_process_roots(_cmsGen->level(),
+                         true,   // younger gens are roots
+                         true,   // activate StrongRootsScope
+                         SharedHeap::ScanningOption(roots_scanning_options()),
+                         should_unload_classes(),
+                         &notOlder,
+                         NULL,
+                         &cld_closure);
 
   // Now mark from the roots
   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
@@ -3169,16 +3201,6 @@
 }
 
 void
-ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
-  if (freelistLock()->owned_by_self()) {
-    Generation::oop_iterate(mr, cl);
-  } else {
-    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
-    Generation::oop_iterate(mr, cl);
-  }
-}
-
-void
 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
   if (freelistLock()->owned_by_self()) {
     Generation::oop_iterate(cl);
@@ -3305,12 +3327,10 @@
 void CMSCollector::setup_cms_unloading_and_verification_state() {
   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
                              || VerifyBeforeExit;
-  const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+  const  int  rso           =   SharedHeap::SO_AllCodeCache;
 
   // We set the proper root for this CMS cycle here.
   if (should_unload_classes()) {   // Should unload classes this cycle
-    remove_root_scanning_option(SharedHeap::SO_AllClasses);
-    add_root_scanning_option(SharedHeap::SO_SystemClasses);
     remove_root_scanning_option(rso);  // Shrink the root set appropriately
     set_verifying(should_verify);    // Set verification state for this cycle
     return;                            // Nothing else needs to be done at this time
@@ -3318,8 +3338,6 @@
 
   // Not unloading classes this cycle
   assert(!should_unload_classes(), "Inconsitency!");
-  remove_root_scanning_option(SharedHeap::SO_SystemClasses);
-  add_root_scanning_option(SharedHeap::SO_AllClasses);
 
   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
     // Include symbols, strings and code cache elements to prevent their resurrection.
@@ -3527,6 +3545,7 @@
  public:
   CMSPhaseAccounting(CMSCollector *collector,
                      const char *phase,
+                     const GCId gc_id,
                      bool print_cr = true);
   ~CMSPhaseAccounting();
 
@@ -3535,6 +3554,7 @@
   const char *_phase;
   elapsedTimer _wallclock;
   bool _print_cr;
+  const GCId _gc_id;
 
  public:
   // Not MT-safe; so do not pass around these StackObj's
@@ -3550,15 +3570,15 @@
 
 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
                                        const char *phase,
+                                       const GCId gc_id,
                                        bool print_cr) :
-  _collector(collector), _phase(phase), _print_cr(print_cr) {
+  _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
 
   if (PrintCMSStatistics != 0) {
     _collector->resetYields();
   }
   if (PrintGCDetails) {
-    gclog_or_tty->date_stamp(PrintGCDateStamps);
-    gclog_or_tty->stamp(PrintGCTimeStamps);
+    gclog_or_tty->gclog_stamp(_gc_id);
     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
       _collector->cmsGen()->short_name(), _phase);
   }
@@ -3572,8 +3592,7 @@
   _collector->stopTimer();
   _wallclock.stop();
   if (PrintGCDetails) {
-    gclog_or_tty->date_stamp(PrintGCDateStamps);
-    gclog_or_tty->stamp(PrintGCTimeStamps);
+    gclog_or_tty->gclog_stamp(_gc_id);
     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
                  _collector->cmsGen()->short_name(),
                  _phase, _collector->timerValue(), _wallclock.seconds());
@@ -3671,7 +3690,7 @@
   setup_cms_unloading_and_verification_state();
 
   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
-    PrintGCDetails && Verbose, true, _gc_timer_cm);)
+    PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
   if (UseAdaptiveSizePolicy) {
     size_policy()->checkpoint_roots_initial_begin();
   }
@@ -3684,12 +3703,6 @@
   ResourceMark rm;
   HandleMark  hm;
 
-  FalseClosure falseClosure;
-  // In the case of a synchronous collection, we will elide the
-  // remark step, so it's important to catch all the nmethod oops
-  // in this step.
-  // The final 'true' flag to gen_process_strong_roots will ensure this.
-  // If 'async' is true, we can relax the nmethod tracing.
   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
@@ -3735,17 +3748,16 @@
       gch->set_par_threads(0);
     } else {
       // The serial version.
-      CMKlassClosure klass_closure(&notOlder);
+      CLDToOopClosure cld_closure(&notOlder, true);
       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-      gch->gen_process_strong_roots(_cmsGen->level(),
-                                    true,   // younger gens are roots
-                                    true,   // activate StrongRootsScope
-                                    false,  // not scavenging
-                                    SharedHeap::ScanningOption(roots_scanning_options()),
-                                    &notOlder,
-                                    true,   // walk all of code cache if (so & SO_CodeCache)
-                                    NULL,
-                                    &klass_closure);
+      gch->gen_process_roots(_cmsGen->level(),
+                             true,   // younger gens are roots
+                             true,   // activate StrongRootsScope
+                             SharedHeap::ScanningOption(roots_scanning_options()),
+                             should_unload_classes(),
+                             &notOlder,
+                             NULL,
+                             &cld_closure);
     }
   }
 
@@ -3796,7 +3808,7 @@
 
     CMSTokenSyncWithLocks ts(true, bitMapLock());
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
+    CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
     res = markFromRootsWork(asynch);
     if (res) {
       _collectorState = Precleaning;
@@ -4199,7 +4211,7 @@
   pst->all_tasks_completed();
 }
 
-class Par_ConcMarkingClosure: public CMSOopClosure {
+class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
  private:
   CMSCollector* _collector;
   CMSConcMarkingTask* _task;
@@ -4212,7 +4224,7 @@
  public:
   Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
-    CMSOopClosure(collector->ref_processor()),
+    MetadataAwareOopClosure(collector->ref_processor()),
     _collector(collector),
     _task(task),
     _span(collector->_span),
@@ -4519,7 +4531,7 @@
       _start_sampling = false;
     }
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
+    CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
   }
   CMSTokenSync x(true); // is cms thread
@@ -4548,7 +4560,7 @@
   // we will never do an actual abortable preclean cycle.
   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
+    CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
     // We need more smarts in the abortable preclean
     // loop below to deal with cases where allocation
     // in young gen is very very slow, and our precleaning
@@ -4693,7 +4705,7 @@
     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
     rp->preclean_discovered_references(
           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
-          gc_timer);
+          gc_timer, _gc_tracer_cm->gc_id());
   }
 
   if (clean_survivor) {  // preclean the active survivor space(s)
@@ -4983,7 +4995,7 @@
 }
 
 class PrecleanKlassClosure : public KlassClosure {
-  CMKlassClosure _cm_klass_closure;
+  KlassToOopClosure _cm_klass_closure;
  public:
   PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
   void do_klass(Klass* k) {
@@ -5036,7 +5048,7 @@
       // expect it to be false and set to true
       FlagSetting fl(gch->_is_gc_active, false);
       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
-        PrintGCDetails && Verbose, true, _gc_timer_cm);)
+        PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
       int level = _cmsGen->level() - 1;
       if (level >= 0) {
         gch->do_collection(true,        // full (i.e. force, see below)
@@ -5065,7 +5077,7 @@
 void CMSCollector::checkpointRootsFinalWork(bool asynch,
   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
 
-  NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
+  NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
 
   assert(haveFreelistLocks(), "must have free list locks");
   assert_lock_strong(bitMapLock());
@@ -5120,11 +5132,11 @@
       // the most recent young generation GC, minus those cleaned up by the
       // concurrent precleaning.
       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
-        GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
+        GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
         do_remark_parallel();
       } else {
         GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
-                    _gc_timer_cm);
+                    _gc_timer_cm, _gc_tracer_cm->gc_id());
         do_remark_non_parallel();
       }
     }
@@ -5137,7 +5149,7 @@
   verify_overflow_empty();
 
   {
-    NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
+    NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
     refProcessingWork(asynch, clear_all_soft_refs);
   }
   verify_work_stacks_empty();
@@ -5221,7 +5233,6 @@
   _timer.start();
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
-  CMKlassClosure klass_closure(&par_mri_cl);
 
   // ---------- young gen roots --------------
   {
@@ -5237,17 +5248,19 @@
   // ---------- remaining roots --------------
   _timer.reset();
   _timer.start();
-  gch->gen_process_strong_roots(_collector->_cmsGen->level(),
-                                false,     // yg was scanned above
-                                false,     // this is parallel code
-                                false,     // not scavenging
-                                SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-                                &par_mri_cl,
-                                true,   // walk all of code cache if (so & SO_CodeCache)
-                                NULL,
-                                &klass_closure);
+
+  CLDToOopClosure cld_closure(&par_mri_cl, true);
+
+  gch->gen_process_roots(_collector->_cmsGen->level(),
+                         false,     // yg was scanned above
+                         false,     // this is parallel code
+                         SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+                         _collector->should_unload_classes(),
+                         &par_mri_cl,
+                         NULL,
+                         &cld_closure);
   assert(_collector->should_unload_classes()
-         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
+         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   _timer.stop();
   if (PrintCMSStatistics != 0) {
@@ -5297,7 +5310,7 @@
 };
 
 class RemarkKlassClosure : public KlassClosure {
-  CMKlassClosure _cm_klass_closure;
+  KlassToOopClosure _cm_klass_closure;
  public:
   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
   void do_klass(Klass* k) {
@@ -5374,17 +5387,17 @@
   // ---------- remaining roots --------------
   _timer.reset();
   _timer.start();
-  gch->gen_process_strong_roots(_collector->_cmsGen->level(),
-                                false,     // yg was scanned above
-                                false,     // this is parallel code
-                                false,     // not scavenging
-                                SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-                                &par_mrias_cl,
-                                true,   // walk all of code cache if (so & SO_CodeCache)
-                                NULL,
-                                NULL);     // The dirty klasses will be handled below
+  gch->gen_process_roots(_collector->_cmsGen->level(),
+                         false,     // yg was scanned above
+                         false,     // this is parallel code
+                         SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+                         _collector->should_unload_classes(),
+                         &par_mrias_cl,
+                         NULL,
+                         NULL);     // The dirty klasses will be handled below
+
   assert(_collector->should_unload_classes()
-         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
+         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   _timer.stop();
   if (PrintCMSStatistics != 0) {
@@ -5437,7 +5450,7 @@
   // We might have added oops to ClassLoaderData::_handles during the
   // concurrent marking phase. These oops point to newly allocated objects
   // that are guaranteed to be kept alive. Either by the direct allocation
-  // code, or when the young collector processes the strong roots. Hence,
+  // code, or when the young collector processes the roots. Hence,
   // we don't have to revisit the _handles block during the remark phase.
 
   // ---------- rescan dirty cards ------------
@@ -5859,7 +5872,7 @@
     cms_space,
     n_workers, workers, task_queues());
 
-  // Set up for parallel process_strong_roots work.
+  // Set up for parallel process_roots work.
   gch->set_par_threads(n_workers);
   // We won't be iterating over the cards in the card table updating
   // the younger_gen cards, so we shouldn't call the following else
@@ -5868,7 +5881,7 @@
   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
 
   // The young gen rescan work will not be done as part of
-  // process_strong_roots (which currently doesn't knw how to
+  // process_roots (which currently doesn't know how to
   // parallelize such a scan), but rather will be broken up into
   // a set of parallel tasks (via the sampling that the [abortable]
   // preclean phase did of EdenSpace, plus the [two] tasks of
@@ -5922,7 +5935,7 @@
                               NULL,  // space is set further below
                               &_markBitMap, &_markStack, &mrias_cl);
   {
-    GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
+    GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
     // Iterate over the dirty cards, setting the corresponding bits in the
     // mod union table.
     {
@@ -5959,29 +5972,29 @@
     Universe::verify();
   }
   {
-    GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
+    GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
 
     verify_work_stacks_empty();
 
     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
     GenCollectedHeap::StrongRootsScope srs(gch);
-    gch->gen_process_strong_roots(_cmsGen->level(),
-                                  true,  // younger gens as roots
-                                  false, // use the local StrongRootsScope
-                                  false, // not scavenging
-                                  SharedHeap::ScanningOption(roots_scanning_options()),
-                                  &mrias_cl,
-                                  true,   // walk code active on stacks
-                                  NULL,
-                                  NULL);  // The dirty klasses will be handled below
+
+    gch->gen_process_roots(_cmsGen->level(),
+                           true,  // younger gens as roots
+                           false, // use the local StrongRootsScope
+                           SharedHeap::ScanningOption(roots_scanning_options()),
+                           should_unload_classes(),
+                           &mrias_cl,
+                           NULL,
+                           NULL); // The dirty klasses will be handled below
 
     assert(should_unload_classes()
-           || (roots_scanning_options() & SharedHeap::SO_CodeCache),
+           || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   }
 
   {
-    GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
+    GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
 
     verify_work_stacks_empty();
 
@@ -6000,7 +6013,7 @@
   }
 
   {
-    GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
+    GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
 
     verify_work_stacks_empty();
 
@@ -6013,7 +6026,7 @@
   // We might have added oops to ClassLoaderData::_handles during the
   // concurrent marking phase. These oops point to newly allocated objects
   // that are guaranteed to be kept alive. Either by the direct allocation
-  // code, or when the young collector processes the strong roots. Hence,
+  // code, or when the young collector processes the roots. Hence,
   // we don't have to revisit the _handles block during the remark phase.
 
   verify_work_stacks_empty();
@@ -6068,6 +6081,8 @@
 };
 
 void CMSRefProcTaskProxy::work(uint worker_id) {
+  ResourceMark rm;
+  HandleMark hm;
   assert(_collector->_span.equals(_span), "Inconsistency in _span");
   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
                                         _mark_bit_map,
@@ -6202,7 +6217,7 @@
                                 _span, &_markBitMap, &_markStack,
                                 &cmsKeepAliveClosure, false /* !preclean */);
   {
-    GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
+    GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
 
     ReferenceProcessorStats stats;
     if (rp->processing_is_mt()) {
@@ -6227,13 +6242,15 @@
                                         &cmsKeepAliveClosure,
                                         &cmsDrainMarkingStackClosure,
                                         &task_executor,
-                                        _gc_timer_cm);
+                                        _gc_timer_cm,
+                                        _gc_tracer_cm->gc_id());
     } else {
       stats = rp->process_discovered_references(&_is_alive_closure,
                                         &cmsKeepAliveClosure,
                                         &cmsDrainMarkingStackClosure,
                                         NULL,
-                                        _gc_timer_cm);
+                                        _gc_timer_cm,
+                                        _gc_tracer_cm->gc_id());
     }
     _gc_tracer_cm->report_gc_reference_stats(stats);
 
@@ -6244,7 +6261,7 @@
 
   if (should_unload_classes()) {
     {
-      GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
+      GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
 
       // Unload classes and purge the SystemDictionary.
       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
@@ -6257,19 +6274,18 @@
     }
 
     {
-      GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
+      GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
       // Clean up unreferenced symbols in symbol table.
       SymbolTable::unlink();
     }
-  }
-
-  // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
-  // Need to check if we really scanned the StringTable.
-  if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
-    GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
-    // Delete entries for dead interned strings.
-    StringTable::unlink(&_is_alive_closure);
-  }
+
+    {
+      GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+      // Delete entries for dead interned strings.
+      StringTable::unlink(&_is_alive_closure);
+    }
+  }
+
 
   // Restore any preserved marks as a result of mark stack or
   // work queue overflow
@@ -6333,7 +6349,7 @@
   _intra_sweep_timer.start();
   if (asynch) {
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
+    CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
     // First sweep the old gen
     {
       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
@@ -6554,7 +6570,7 @@
     // Clear the mark bitmap (no grey objects to start with)
     // for the next cycle.
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
+    CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
 
     HeapWord* curAddr = _markBitMap.startWord();
     while (curAddr < _markBitMap.endWord()) {
@@ -6620,7 +6636,7 @@
 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-  GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
+  GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
   TraceCollectorStats tcs(counters());
 
   switch (op) {
@@ -7738,7 +7754,7 @@
   CMSCollector* collector, MemRegion span,
   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
   CMSMarkStack*  mark_stack):
-  CMSOopClosure(collector->ref_processor()),
+  MetadataAwareOopClosure(collector->ref_processor()),
   _collector(collector),
   _span(span),
   _verification_bm(verification_bm),
@@ -7791,7 +7807,7 @@
                      MemRegion span,
                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
                      HeapWord* finger, MarkFromRootsClosure* parent) :
-  CMSOopClosure(collector->ref_processor()),
+  MetadataAwareOopClosure(collector->ref_processor()),
   _collector(collector),
   _span(span),
   _bitMap(bitMap),
@@ -7808,7 +7824,7 @@
                      HeapWord* finger,
                      HeapWord** global_finger_addr,
                      Par_MarkFromRootsClosure* parent) :
-  CMSOopClosure(collector->ref_processor()),
+  MetadataAwareOopClosure(collector->ref_processor()),
   _collector(collector),
   _whole_span(collector->_span),
   _span(span),
@@ -7857,11 +7873,6 @@
   _overflow_stack->expand(); // expand the stack if possible
 }
 
-void CMKlassClosure::do_klass(Klass* k) {
-  assert(_oop_closure != NULL, "Not initialized?");
-  k->oops_do(_oop_closure);
-}
-
 void PushOrMarkClosure::do_oop(oop obj) {
   // Ignore mark word because we are running concurrent with mutators.
   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
@@ -7959,7 +7970,7 @@
                                        CMSBitMap* mod_union_table,
                                        CMSMarkStack*  mark_stack,
                                        bool           concurrent_precleaning):
-  CMSOopClosure(rp),
+  MetadataAwareOopClosure(rp),
   _collector(collector),
   _span(span),
   _bit_map(bit_map),
@@ -8032,7 +8043,7 @@
                                                ReferenceProcessor* rp,
                                                CMSBitMap* bit_map,
                                                OopTaskQueue* work_queue):
-  CMSOopClosure(rp),
+  MetadataAwareOopClosure(rp),
   _collector(collector),
   _span(span),
   _bit_map(bit_map),
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -32,6 +32,7 @@
 #include "gc_implementation/shared/generationCounters.hpp"
 #include "memory/freeBlockDictionary.hpp"
 #include "memory/generation.hpp"
+#include "memory/iterator.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/virtualspace.hpp"
 #include "services/memoryService.hpp"
@@ -763,6 +764,10 @@
   size_t*    _cursor;
   ChunkArray* _survivor_plab_array;
 
+  // A bounded minimum size of PLABs, should not return too small values since
+  // this will affect the size of the data structures used for parallel young gen rescan
+  size_t plab_sample_minimum_size();
+
   // Support for marking stack overflow handling
   bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
   bool par_take_from_overflow_list(size_t num,
@@ -1285,7 +1290,6 @@
   void save_sweep_limit();
 
   // More iteration support
-  virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
   virtual void oop_iterate(ExtendedOopClosure* cl);
   virtual void safe_object_iterate(ObjectClosure* cl);
   virtual void object_iterate(ObjectClosure* cl);
@@ -1383,13 +1387,6 @@
 // Closures of various sorts used by CMS to accomplish its work
 //
 
-// This closure is used to check that a certain set of oops is empty.
-class FalseClosure: public OopClosure {
- public:
-  void do_oop(oop* p)       { guarantee(false, "Should be an empty set"); }
-  void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
-};
-
 // This closure is used to do concurrent marking from the roots
 // following the first checkpoint.
 class MarkFromRootsClosure: public BitMapClosure {
@@ -1454,7 +1451,7 @@
 
 // The following closures are used to do certain kinds of verification of
 // CMS marking.
-class PushAndMarkVerifyClosure: public CMSOopClosure {
+class PushAndMarkVerifyClosure: public MetadataAwareOopClosure {
   CMSCollector*    _collector;
   MemRegion        _span;
   CMSBitMap*       _verification_bm;
@@ -1507,6 +1504,19 @@
   }
 };
 
+// A version of ObjectClosure with "memory" (see _previous_address below)
+class UpwardsObjectClosure: public BoolObjectClosure {
+  HeapWord* _previous_address;
+ public:
+  UpwardsObjectClosure() : _previous_address(NULL) { }
+  void set_previous(HeapWord* addr) { _previous_address = addr; }
+  HeapWord* previous()              { return _previous_address; }
+  // A return value of "true" can be used by the caller to decide
+  // if this object's end should *NOT* be recorded in
+  // _previous_address above.
+  virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
+};
+
 // This closure is used during the second checkpointing phase
 // to rescan the marked objects on the dirty cards in the mod
 // union table and the card table proper. It's invoked via
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -50,8 +50,12 @@
 void VM_CMS_Operation::acquire_pending_list_lock() {
   // The caller may block while communicating
   // with the SLT thread in order to acquire/release the PLL.
-  ConcurrentMarkSweepThread::slt()->
-    manipulatePLL(SurrogateLockerThread::acquirePLL);
+  SurrogateLockerThread* slt = ConcurrentMarkSweepThread::slt();
+  if (slt != NULL) {
+    slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
+  } else {
+    SurrogateLockerThread::report_missing_slt();
+  }
 }
 
 void VM_CMS_Operation::release_and_notify_pending_list_lock() {
@@ -64,7 +68,7 @@
 void VM_CMS_Operation::verify_before_gc() {
   if (VerifyBeforeGC &&
       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm);
+    GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id());
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@@ -76,7 +80,7 @@
 void VM_CMS_Operation::verify_after_gc() {
   if (VerifyAfterGC &&
       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm);
+    GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id());
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/bufferingOopClosure.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/bufferingOopClosure.hpp"
+#include "memory/iterator.hpp"
+#include "utilities/debug.hpp"
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TestBufferingOopClosure {
+
+  // Helper class to fake a set of oop*s and narrowOop*s.
+  class FakeRoots {
+   public:
+    // Used for sanity checking of the values passed to the do_oops functions in the test.
+    static const uintptr_t NarrowOopMarker = uintptr_t(1) << (BitsPerWord -1);
+
+    int    _num_narrow;
+    int    _num_full;
+    void** _narrow;
+    void** _full;
+
+    FakeRoots(int num_narrow, int num_full) :
+        _num_narrow(num_narrow),
+        _num_full(num_full),
+        _narrow((void**)::malloc(sizeof(void*) * num_narrow)),
+        _full((void**)::malloc(sizeof(void*) * num_full)) {
+
+      for (int i = 0; i < num_narrow; i++) {
+        _narrow[i] = (void*)(NarrowOopMarker + (uintptr_t)i);
+      }
+      for (int i = 0; i < num_full; i++) {
+        _full[i] = (void*)(uintptr_t)i;
+      }
+    }
+
+    ~FakeRoots() {
+      ::free(_narrow);
+      ::free(_full);
+    }
+
+    void oops_do_narrow_then_full(OopClosure* cl) {
+      for (int i = 0; i < _num_narrow; i++) {
+        cl->do_oop((narrowOop*)_narrow[i]);
+      }
+      for (int i = 0; i < _num_full; i++) {
+        cl->do_oop((oop*)_full[i]);
+      }
+    }
+
+    void oops_do_full_then_narrow(OopClosure* cl) {
+      for (int i = 0; i < _num_full; i++) {
+        cl->do_oop((oop*)_full[i]);
+      }
+      for (int i = 0; i < _num_narrow; i++) {
+        cl->do_oop((narrowOop*)_narrow[i]);
+      }
+    }
+
+    void oops_do_mixed(OopClosure* cl) {
+      int i;
+      for (i = 0; i < _num_full && i < _num_narrow; i++) {
+        cl->do_oop((oop*)_full[i]);
+        cl->do_oop((narrowOop*)_narrow[i]);
+      }
+      for (int j = i; j < _num_full; j++) {
+        cl->do_oop((oop*)_full[i]);
+      }
+      for (int j = i; j < _num_narrow; j++) {
+        cl->do_oop((narrowOop*)_narrow[i]);
+      }
+    }
+
+    static const int MaxOrder = 2;
+
+    void oops_do(OopClosure* cl, int do_oop_order) {
+      switch(do_oop_order) {
+        case 0:
+          oops_do_narrow_then_full(cl);
+          break;
+        case 1:
+          oops_do_full_then_narrow(cl);
+          break;
+        case 2:
+          oops_do_mixed(cl);
+          break;
+        default:
+          oops_do_narrow_then_full(cl);
+          break;
+      }
+    }
+  };
+
+  class CountOopClosure : public OopClosure {
+    int _narrow_oop_count;
+    int _full_oop_count;
+   public:
+    CountOopClosure() : _narrow_oop_count(0), _full_oop_count(0) {}
+    void do_oop(narrowOop* p) {
+      assert((uintptr_t(p) & FakeRoots::NarrowOopMarker) != 0,
+          "The narrowOop was unexpectedly not marked with the NarrowOopMarker");
+      _narrow_oop_count++;
+    }
+
+    void do_oop(oop* p){
+      assert((uintptr_t(p) & FakeRoots::NarrowOopMarker) == 0,
+          "The oop was unexpectedly marked with the NarrowOopMarker");
+      _full_oop_count++;
+    }
+
+    int narrow_oop_count() { return _narrow_oop_count; }
+    int full_oop_count()   { return _full_oop_count; }
+    int all_oop_count()    { return _narrow_oop_count + _full_oop_count; }
+  };
+
+  class DoNothingOopClosure : public OopClosure {
+   public:
+    void do_oop(narrowOop* p) {}
+    void do_oop(oop* p)       {}
+  };
+
+  static void testCount(int num_narrow, int num_full, int do_oop_order) {
+    FakeRoots fr(num_narrow, num_full);
+
+    CountOopClosure coc;
+    BufferingOopClosure boc(&coc);
+
+    fr.oops_do(&boc, do_oop_order);
+
+    boc.done();
+
+    #define assert_testCount(got, expected)                                     \
+       assert((got) == (expected),                                              \
+           err_msg("Expected: %d, got: %d, when running testCount(%d, %d, %d)", \
+               (got), (expected), num_narrow, num_full, do_oop_order))
+
+    assert_testCount(num_narrow, coc.narrow_oop_count());
+    assert_testCount(num_full, coc.full_oop_count());
+    assert_testCount(num_narrow + num_full, coc.all_oop_count());
+  }
+
+  static void testCount() {
+    int buffer_length = BufferingOopClosure::BufferLength;
+
+    for (int order = 0; order < FakeRoots::MaxOrder; order++) {
+      testCount(0,                 0,                 order);
+      testCount(10,                0,                 order);
+      testCount(0,                 10,                order);
+      testCount(10,                10,                order);
+      testCount(buffer_length,     10,                order);
+      testCount(10,                buffer_length,     order);
+      testCount(buffer_length,     buffer_length,     order);
+      testCount(buffer_length + 1, 10,                order);
+      testCount(10,                buffer_length + 1, order);
+      testCount(buffer_length + 1, buffer_length,     order);
+      testCount(buffer_length,     buffer_length + 1, order);
+      testCount(buffer_length + 1, buffer_length + 1, order);
+    }
+  }
+
+  static void testIsBufferEmptyOrFull(int num_narrow, int num_full, bool expect_empty, bool expect_full) {
+    FakeRoots fr(num_narrow, num_full);
+
+    DoNothingOopClosure cl;
+    BufferingOopClosure boc(&cl);
+
+    fr.oops_do(&boc, 0);
+
+    #define assert_testIsBufferEmptyOrFull(got, expected)                             \
+        assert((got) == (expected),                                                   \
+            err_msg("Expected: %d, got: %d. testIsBufferEmptyOrFull(%d, %d, %s, %s)", \
+                (got), (expected), num_narrow, num_full,                              \
+                BOOL_TO_STR(expect_empty), BOOL_TO_STR(expect_full)))
+
+    assert_testIsBufferEmptyOrFull(expect_empty, boc.is_buffer_empty());
+    assert_testIsBufferEmptyOrFull(expect_full, boc.is_buffer_full());
+  }
+
+  static void testIsBufferEmptyOrFull() {
+    int bl = BufferingOopClosure::BufferLength;
+
+    testIsBufferEmptyOrFull(0,       0, true,  false);
+    testIsBufferEmptyOrFull(1,       0, false, false);
+    testIsBufferEmptyOrFull(0,       1, false, false);
+    testIsBufferEmptyOrFull(1,       1, false, false);
+    testIsBufferEmptyOrFull(10,      0, false, false);
+    testIsBufferEmptyOrFull(0,      10, false, false);
+    testIsBufferEmptyOrFull(10,     10, false, false);
+    testIsBufferEmptyOrFull(0,      bl, false, true);
+    testIsBufferEmptyOrFull(bl,      0, false, true);
+    testIsBufferEmptyOrFull(bl/2, bl/2, false, true);
+    testIsBufferEmptyOrFull(bl-1,    1, false, true);
+    testIsBufferEmptyOrFull(1,    bl-1, false, true);
+    // Processed
+    testIsBufferEmptyOrFull(bl+1,    0, false, false);
+    testIsBufferEmptyOrFull(bl*2,    0, false, true);
+  }
+
+  static void testEmptyAfterDone(int num_narrow, int num_full) {
+    FakeRoots fr(num_narrow, num_full);
+
+    DoNothingOopClosure cl;
+    BufferingOopClosure boc(&cl);
+
+    fr.oops_do(&boc, 0);
+
+    // Make sure all get processed.
+    boc.done();
+
+    assert(boc.is_buffer_empty(),
+        err_msg("Should be empty after call to done(). testEmptyAfterDone(%d, %d)",
+            num_narrow, num_full));
+  }
+
+  static void testEmptyAfterDone() {
+    int bl = BufferingOopClosure::BufferLength;
+
+    testEmptyAfterDone(0,       0);
+    testEmptyAfterDone(1,       0);
+    testEmptyAfterDone(0,       1);
+    testEmptyAfterDone(1,       1);
+    testEmptyAfterDone(10,      0);
+    testEmptyAfterDone(0,      10);
+    testEmptyAfterDone(10,     10);
+    testEmptyAfterDone(0,      bl);
+    testEmptyAfterDone(bl,      0);
+    testEmptyAfterDone(bl/2, bl/2);
+    testEmptyAfterDone(bl-1,    1);
+    testEmptyAfterDone(1,    bl-1);
+    // Processed
+    testEmptyAfterDone(bl+1,    0);
+    testEmptyAfterDone(bl*2,    0);
+  }
+
+  public:
+  static void test() {
+    testCount();
+    testIsBufferEmptyOrFull();
+    testEmptyAfterDone();
+  }
+};
+
+void TestBufferingOopClosure_test() {
+  TestBufferingOopClosure::test();
+}
+
+#endif
--- a/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,10 +25,10 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_BUFFERINGOOPCLOSURE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_BUFFERINGOOPCLOSURE_HPP
 
-#include "memory/genOopClosures.hpp"
-#include "memory/generation.hpp"
+#include "memory/iterator.hpp"
+#include "oops/oopsHierarchy.hpp"
 #include "runtime/os.hpp"
-#include "utilities/taskqueue.hpp"
+#include "utilities/debug.hpp"
 
 // A BufferingOops closure tries to separate out the cost of finding roots
 // from the cost of applying closures to them.  It maintains an array of
@@ -41,60 +41,103 @@
 // The caller must be sure to call "done" to process any unprocessed
 // buffered entriess.
 
-class Generation;
-class HeapRegion;
-
 class BufferingOopClosure: public OopClosure {
+  friend class TestBufferingOopClosure;
 protected:
-  enum PrivateConstants {
-    BufferLength = 1024
-  };
+  static const size_t BufferLength = 1024;
 
-  StarTask  _buffer[BufferLength];
-  StarTask* _buffer_top;
-  StarTask* _buffer_curr;
+  // We need to know if the buffered addresses contain oops or narrowOops.
+  // We can't tag the addresses the way StarTask does, because we need to
+  // be able to handle unaligned addresses coming from oops embedded in code.
+  //
+  // The addresses for the full-sized oops are filled in from the bottom,
+  // while the addresses for the narrowOops are filled in from the top.
+  OopOrNarrowOopStar  _buffer[BufferLength];
+  OopOrNarrowOopStar* _oop_top;
+  OopOrNarrowOopStar* _narrowOop_bottom;
 
   OopClosure* _oc;
   double      _closure_app_seconds;
 
-  void process_buffer () {
+
+  bool is_buffer_empty() {
+    return _oop_top == _buffer && _narrowOop_bottom == (_buffer + BufferLength - 1);
+  }
+
+  bool is_buffer_full() {
+    return _narrowOop_bottom < _oop_top;
+  }
+
+  // Process addresses containing full-sized oops.
+  void process_oops() {
+    for (OopOrNarrowOopStar* curr = _buffer; curr < _oop_top; ++curr) {
+      _oc->do_oop((oop*)(*curr));
+    }
+    _oop_top = _buffer;
+  }
+
+  // Process addresses containing narrow oops.
+  void process_narrowOops() {
+    for (OopOrNarrowOopStar* curr = _buffer + BufferLength - 1; curr > _narrowOop_bottom; --curr) {
+      _oc->do_oop((narrowOop*)(*curr));
+    }
+    _narrowOop_bottom = _buffer + BufferLength - 1;
+  }
+
+  // Apply the closure to all oops and clear the buffer.
+  // Accumulate the time it took.
+  void process_buffer() {
     double start = os::elapsedTime();
-    for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
-      if (curr->is_narrow()) {
-        assert(UseCompressedOops, "Error");
-        _oc->do_oop((narrowOop*)(*curr));
-      } else {
-        _oc->do_oop((oop*)(*curr));
-      }
-    }
-    _buffer_curr = _buffer;
+
+    process_oops();
+    process_narrowOops();
+
     _closure_app_seconds += (os::elapsedTime() - start);
   }
 
-  template <class T> inline void do_oop_work(T* p) {
-    if (_buffer_curr == _buffer_top) {
+  void process_buffer_if_full() {
+    if (is_buffer_full()) {
       process_buffer();
     }
-    StarTask new_ref(p);
-    *_buffer_curr = new_ref;
-    ++_buffer_curr;
+  }
+
+  void add_narrowOop(narrowOop* p) {
+    assert(!is_buffer_full(), "Buffer should not be full");
+    *_narrowOop_bottom = (OopOrNarrowOopStar)p;
+    _narrowOop_bottom--;
+  }
+
+  void add_oop(oop* p) {
+    assert(!is_buffer_full(), "Buffer should not be full");
+    *_oop_top = (OopOrNarrowOopStar)p;
+    _oop_top++;
   }
 
 public:
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) {
+    process_buffer_if_full();
+    add_narrowOop(p);
+  }
 
-  void done () {
-    if (_buffer_curr > _buffer) {
+  virtual void do_oop(oop* p)       {
+    process_buffer_if_full();
+    add_oop(p);
+  }
+
+  void done() {
+    if (!is_buffer_empty()) {
       process_buffer();
     }
   }
-  double closure_app_seconds () {
+
+  double closure_app_seconds() {
     return _closure_app_seconds;
   }
-  BufferingOopClosure (OopClosure *oc) :
+
+  BufferingOopClosure(OopClosure *oc) :
     _oc(oc),
-    _buffer_curr(_buffer), _buffer_top(_buffer + BufferLength),
+    _oop_top(_buffer),
+    _narrowOop_bottom(_buffer + BufferLength - 1),
     _closure_app_seconds(0.0) { }
 };
 
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -29,7 +29,7 @@
 #include "gc_implementation/g1/g1HotCardCache.hpp"
 #include "runtime/java.hpp"
 
-ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
+ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure) :
   _threads(NULL), _n_threads(0),
   _hot_card_cache(g1h)
 {
@@ -61,7 +61,7 @@
 
   ConcurrentG1RefineThread *next = NULL;
   for (uint i = _n_threads - 1; i != UINT_MAX; i--) {
-    ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
+    ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, refine_closure, worker_id_offset, i);
     assert(t != NULL, "Conc refine should have been created");
     if (t->osthread() == NULL) {
         vm_shutdown_during_initialization("Could not create ConcurrentG1RefineThread");
@@ -81,8 +81,8 @@
   }
 }
 
-void ConcurrentG1Refine::init() {
-  _hot_card_cache.initialize();
+void ConcurrentG1Refine::init(G1RegionToSpaceMapper* card_counts_storage) {
+  _hot_card_cache.initialize(card_counts_storage);
 }
 
 void ConcurrentG1Refine::stop() {
@@ -128,9 +128,7 @@
 }
 
 uint ConcurrentG1Refine::thread_num() {
-  uint n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
-                                                : ParallelGCThreads;
-  return MAX2<uint>(n_threads, 1);
+  return G1ConcRefinementThreads;
 }
 
 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -34,6 +34,7 @@
 class ConcurrentG1RefineThread;
 class G1CollectedHeap;
 class G1HotCardCache;
+class G1RegionToSpaceMapper;
 class G1RemSet;
 class DirtyCardQueue;
 
@@ -71,10 +72,10 @@
   void reset_threshold_step();
 
  public:
-  ConcurrentG1Refine(G1CollectedHeap* g1h);
+  ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure);
   ~ConcurrentG1Refine();
 
-  void init(); // Accomplish some initialization that has to wait.
+  void init(G1RegionToSpaceMapper* card_counts_storage);
   void stop();
 
   void reinitialize_threads();
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -33,8 +33,10 @@
 
 ConcurrentG1RefineThread::
 ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
+                         CardTableEntryClosure* refine_closure,
                          uint worker_id_offset, uint worker_id) :
   ConcurrentGCThread(),
+  _refine_closure(refine_closure),
   _worker_id_offset(worker_id_offset),
   _worker_id(worker_id),
   _active(false),
@@ -71,6 +73,7 @@
 }
 
 void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {
+  SuspendibleThreadSetJoiner sts;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   G1CollectorPolicy* g1p = g1h->g1_policy();
   if (g1p->adaptive_young_list_length()) {
@@ -82,8 +85,8 @@
 
       // we try to yield every time we visit 10 regions
       if (regions_visited == 10) {
-        if (_sts.should_yield()) {
-          _sts.yield("G1 refine");
+        if (sts.should_yield()) {
+          sts.yield();
           // we just abandon the iteration
           break;
         }
@@ -99,9 +102,7 @@
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   _vtime_start = os::elapsedVTime();
   while(!_should_terminate) {
-    _sts.join();
     sample_young_list_rs_lengths();
-    _sts.leave();
 
     if (os::supports_vtime()) {
       _vtime_accum = (os::elapsedVTime() - _vtime_start);
@@ -182,37 +183,37 @@
       break;
     }
 
-    _sts.join();
+    {
+      SuspendibleThreadSetJoiner sts;
+
+      do {
+        int curr_buffer_num = (int)dcqs.completed_buffers_num();
+        // If the number of the buffers falls down into the yellow zone,
+        // that means that the transition period after the evacuation pause has ended.
+        if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) {
+          dcqs.set_completed_queue_padding(0);
+        }
 
-    do {
-      int curr_buffer_num = (int)dcqs.completed_buffers_num();
-      // If the number of the buffers falls down into the yellow zone,
-      // that means that the transition period after the evacuation pause has ended.
-      if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) {
-        dcqs.set_completed_queue_padding(0);
-      }
+        if (_worker_id > 0 && curr_buffer_num <= _deactivation_threshold) {
+          // If the number of the buffer has fallen below our threshold
+          // we should deactivate. The predecessor will reactivate this
+          // thread should the number of the buffers cross the threshold again.
+          deactivate();
+          break;
+        }
 
-      if (_worker_id > 0 && curr_buffer_num <= _deactivation_threshold) {
-        // If the number of the buffer has fallen below our threshold
-        // we should deactivate. The predecessor will reactivate this
-        // thread should the number of the buffers cross the threshold again.
+        // Check if we need to activate the next thread.
+        if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) {
+          _next->activate();
+        }
+      } while (dcqs.apply_closure_to_completed_buffer(_refine_closure, _worker_id + _worker_id_offset, cg1r()->green_zone()));
+
+      // We can exit the loop above while being active if there was a yield request.
+      if (is_active()) {
         deactivate();
-        break;
       }
-
-      // Check if we need to activate the next thread.
-      if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) {
-        _next->activate();
-      }
-    } while (dcqs.apply_closure_to_completed_buffer(_worker_id + _worker_id_offset, cg1r()->green_zone()));
-
-    // We can exit the loop above while being active if there was a yield request.
-    if (is_active()) {
-      deactivate();
     }
 
-    _sts.leave();
-
     if (os::supports_vtime()) {
       _vtime_accum = (os::elapsedVTime() - _vtime_start);
     } else {
@@ -223,17 +224,6 @@
   terminate();
 }
 
-
-void ConcurrentG1RefineThread::yield() {
-  if (G1TraceConcRefinement) {
-    gclog_or_tty->print_cr("G1-Refine-yield");
-  }
-  _sts.yield("G1 refine");
-  if (G1TraceConcRefinement) {
-    gclog_or_tty->print_cr("G1-Refine-yield-end");
-  }
-}
-
 void ConcurrentG1RefineThread::stop() {
   // it is ok to take late safepoints here, if needed
   {
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,6 +28,7 @@
 #include "gc_implementation/shared/concurrentGCThread.hpp"
 
 // Forward Decl.
+class CardTableEntryClosure;
 class ConcurrentG1Refine;
 
 // The G1 Concurrent Refinement Thread (could be several in the future).
@@ -49,6 +50,9 @@
   Monitor* _monitor;
   ConcurrentG1Refine* _cg1r;
 
+  // The closure applied to completed log buffers.
+  CardTableEntryClosure* _refine_closure;
+
   int _thread_threshold_step;
   // This thread activation threshold
   int _threshold;
@@ -64,13 +68,11 @@
   void activate();
   void deactivate();
 
-  // For use by G1CollectedHeap, which is a friend.
-  static SuspendibleThreadSet* sts() { return &_sts; }
-
 public:
   virtual void run();
   // Constructor
   ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
+                           CardTableEntryClosure* refine_closure,
                            uint worker_id_offset, uint worker_id);
 
   void initialize();
@@ -84,8 +86,6 @@
 
   ConcurrentG1Refine* cg1r() { return _cg1r;     }
 
-  // Yield for GC
-  void yield();
   // shutdown
   void stop();
 };
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -23,7 +23,9 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/metadataOnStackMark.hpp"
 #include "classfile/symbolTable.hpp"
+#include "code/codeCache.hpp"
 #include "gc_implementation/g1/concurrentMark.inline.hpp"
 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
@@ -33,18 +35,21 @@
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/heapRegion.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcTraceTime.hpp"
+#include "memory/allocation.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/referencePolicy.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
+#include "runtime/prefetch.inline.hpp"
 #include "services/memTracker.hpp"
 
 // Concurrent marking bit map wrapper
@@ -56,8 +61,8 @@
   _bmWordSize = 0;
 }
 
-HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
-                                               HeapWord* limit) const {
+HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
+                                               const HeapWord* limit) const {
   // First we must round addr *up* to a possible object boundary.
   addr = (HeapWord*)align_size_up((intptr_t)addr,
                                   HeapWordSize << _shifter);
@@ -74,8 +79,8 @@
   return nextAddr;
 }
 
-HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
-                                                 HeapWord* limit) const {
+HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
+                                                 const HeapWord* limit) const {
   size_t addrOffset = heapWordToOffset(addr);
   if (limit == NULL) {
     limit = _bmStartWord + _bmWordSize;
@@ -95,12 +100,12 @@
 }
 
 #ifndef PRODUCT
-bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
+bool CMBitMapRO::covers(MemRegion heap_rs) const {
   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
   assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
          "size inconsistency");
-  return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
-         _bmWordSize  == heap_rs.size()>>LogHeapWordSize;
+  return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
+         _bmWordSize  == heap_rs.word_size();
 }
 #endif
 
@@ -108,33 +113,76 @@
   _bm.print_on_error(st, prefix);
 }
 
-bool CMBitMap::allocate(ReservedSpace heap_rs) {
-  _bmStartWord = (HeapWord*)(heap_rs.base());
-  _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
-  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
-                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
-  if (!brs.is_reserved()) {
-    warning("ConcurrentMark marking bit map allocation failure");
+size_t CMBitMap::compute_size(size_t heap_size) {
+  return heap_size / mark_distance();
+}
+
+size_t CMBitMap::mark_distance() {
+  return MinObjAlignmentInBytes * BitsPerByte;
+}
+
+void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
+  _bmStartWord = heap.start();
+  _bmWordSize = heap.word_size();
+
+  _bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
+  _bm.set_size(_bmWordSize >> _shifter);
+
+  storage->set_mapping_changed_listener(&_listener);
+}
+
+void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions, bool zero_filled) {
+  if (zero_filled) {
+    return;
+  }
+  // We need to clear the bitmap on commit, removing any existing information.
+  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
+  _bm->clearRange(mr);
+}
+
+// Closure used for clearing the given mark bitmap.
+class ClearBitmapHRClosure : public HeapRegionClosure {
+ private:
+  ConcurrentMark* _cm;
+  CMBitMap* _bitmap;
+  bool _may_yield;      // The closure may yield during iteration. If yielded, abort the iteration.
+ public:
+  ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
+    assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
+  }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    size_t const chunk_size_in_words = M / HeapWordSize;
+
+    HeapWord* cur = r->bottom();
+    HeapWord* const end = r->end();
+
+    while (cur < end) {
+      MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
+      _bitmap->clearRange(mr);
+
+      cur += chunk_size_in_words;
+
+      // Abort iteration if after yielding the marking has been aborted.
+      if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
+        return true;
+      }
+      // Repeat the asserts from before the start of the closure. We will do them
+      // as asserts here to minimize their overhead on the product. However, we
+      // will have them as guarantees at the beginning / end of the bitmap
+      // clearing to get some checking in the product.
+      assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
+      assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
+    }
+
     return false;
   }
-  MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
-  // For now we'll just commit all of the bit map up front.
-  // Later on we'll try to be more parsimonious with swap.
-  if (!_virtual_space.initialize(brs, brs.size())) {
-    warning("ConcurrentMark marking bit map backing store failure");
-    return false;
-  }
-  assert(_virtual_space.committed_size() == brs.size(),
-         "didn't reserve backing store for all of concurrent marking bit map?");
-  _bm.set_map((uintptr_t*)_virtual_space.low());
-  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
-         _bmWordSize, "inconsistency in bit map sizing");
-  _bm.set_size(_bmWordSize >> _shifter);
-  return true;
-}
+};
 
 void CMBitMap::clearAll() {
-  _bm.clear();
+  ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
+  G1CollectedHeap::heap()->heap_region_iterate(&cl);
+  guarantee(cl.complete(), "Must have completed iteration.");
   return;
 }
 
@@ -389,10 +437,6 @@
   }
 }
 
-bool ConcurrentMark::not_yet_marked(oop obj) const {
-  return _g1h->is_obj_ill(obj);
-}
-
 CMRootRegions::CMRootRegions() :
   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
   _should_abort(false),  _next_survivor(NULL) { }
@@ -479,10 +523,10 @@
   return MAX2((n_par_threads + 2) / 4, 1U);
 }
 
-ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
+ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
   _g1h(g1h),
-  _markBitMap1(log2_intptr(MinObjAlignment)),
-  _markBitMap2(log2_intptr(MinObjAlignment)),
+  _markBitMap1(),
+  _markBitMap2(),
   _parallel_marking_threads(0),
   _max_parallel_marking_threads(0),
   _sleep_factor(0.0),
@@ -491,7 +535,7 @@
   _cleanup_task_overhead(1.0),
   _cleanup_list("Cleanup List"),
   _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
-  _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
+  _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
             CardTableModRefBS::card_shift,
             false /* in_resource_area*/),
 
@@ -510,6 +554,7 @@
   _has_overflown(false),
   _concurrent(false),
   _has_aborted(false),
+  _aborted_gc_id(GCId::undefined()),
   _restart_for_overflow(false),
   _concurrent_marking_in_progress(false),
 
@@ -540,14 +585,8 @@
                            "heap end = " INTPTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
   }
 
-  if (!_markBitMap1.allocate(heap_rs)) {
-    warning("Failed to allocate first CM bit map");
-    return;
-  }
-  if (!_markBitMap2.allocate(heap_rs)) {
-    warning("Failed to allocate second CM bit map");
-    return;
-  }
+  _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
+  _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
 
   // Create & start a ConcurrentMark thread.
   _cmThread = new ConcurrentMarkThread(this);
@@ -558,8 +597,8 @@
   }
 
   assert(CGC_lock != NULL, "Where's the CGC_lock?");
-  assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
-  assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
+  assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
+  assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
 
   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
   satb_qs.set_buffer_size(G1SATBBufferSize);
@@ -719,38 +758,17 @@
   clear_all_count_data();
 
   // so that the call below can read a sensible value
-  _heap_start = (HeapWord*) heap_rs.base();
+  _heap_start = g1h->reserved_region().start();
   set_non_marking_state();
   _completed_initialization = true;
 }
 
-void ConcurrentMark::update_g1_committed(bool force) {
-  // If concurrent marking is not in progress, then we do not need to
-  // update _heap_end.
-  if (!concurrent_marking_in_progress() && !force) return;
-
-  MemRegion committed = _g1h->g1_committed();
-  assert(committed.start() == _heap_start, "start shouldn't change");
-  HeapWord* new_end = committed.end();
-  if (new_end > _heap_end) {
-    // The heap has been expanded.
-
-    _heap_end = new_end;
-  }
-  // Notice that the heap can also shrink. However, this only happens
-  // during a Full GC (at least currently) and the entire marking
-  // phase will bail out and the task will not be restarted. So, let's
-  // do nothing.
-}
-
 void ConcurrentMark::reset() {
   // Starting values for these two. This should be called in a STW
-  // phase. CM will be notified of any future g1_committed expansions
-  // will be at the end of evacuation pauses, when tasks are
-  // inactive.
-  MemRegion committed = _g1h->g1_committed();
-  _heap_start = committed.start();
-  _heap_end   = committed.end();
+  // phase.
+  MemRegion reserved = _g1h->g1_reserved();
+  _heap_start = reserved.start();
+  _heap_end   = reserved.end();
 
   // Separated the asserts so that we know which one fires.
   assert(_heap_start != NULL, "heap bounds should look ok");
@@ -822,7 +840,6 @@
     assert(out_of_regions(),
            err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
                    p2i(_finger), p2i(_heap_end)));
-    update_g1_committed(true);
   }
 }
 
@@ -841,7 +858,6 @@
 
 void ConcurrentMark::clearNextBitmap() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  G1CollectorPolicy* g1p = g1h->g1_policy();
 
   // Make sure that the concurrent mark thread looks to still be in
   // the current cycle.
@@ -853,39 +869,47 @@
   // is the case.
   guarantee(!g1h->mark_in_progress(), "invariant");
 
-  // clear the mark bitmap (no grey objects to start with).
-  // We need to do this in chunks and offer to yield in between
-  // each chunk.
-  HeapWord* start  = _nextMarkBitMap->startWord();
-  HeapWord* end    = _nextMarkBitMap->endWord();
-  HeapWord* cur    = start;
-  size_t chunkSize = M;
-  while (cur < end) {
-    HeapWord* next = cur + chunkSize;
-    if (next > end) {
-      next = end;
-    }
-    MemRegion mr(cur,next);
-    _nextMarkBitMap->clearRange(mr);
-    cur = next;
-    do_yield_check();
-
-    // Repeat the asserts from above. We'll do them as asserts here to
-    // minimize their overhead on the product. However, we'll have
-    // them as guarantees at the beginning / end of the bitmap
-    // clearing to get some checking in the product.
-    assert(cmThread()->during_cycle(), "invariant");
-    assert(!g1h->mark_in_progress(), "invariant");
+  ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
+  g1h->heap_region_iterate(&cl);
+
+  // Clear the liveness counting data. If the marking has been aborted, the abort()
+  // call already did that.
+  if (cl.complete()) {
+    clear_all_count_data();
   }
 
-  // Clear the liveness counting data
-  clear_all_count_data();
-
   // Repeat the asserts from above.
   guarantee(cmThread()->during_cycle(), "invariant");
   guarantee(!g1h->mark_in_progress(), "invariant");
 }
 
+class CheckBitmapClearHRClosure : public HeapRegionClosure {
+  CMBitMap* _bitmap;
+  bool _error;
+ public:
+  CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
+  }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    // This closure can be called concurrently to the mutator, so we must make sure
+    // that the result of the getNextMarkedWordAddress() call is compared to the
+    // value passed to it as limit to detect any found bits.
+    // We can use the region's orig_end() for the limit and the comparison value
+    // as it always contains the "real" end of the region that never changes and
+    // has no side effects.
+    // Due to the latter, there can also be no problem with the compiler generating
+    // reloads of the orig_end() call.
+    HeapWord* end = r->orig_end();
+    return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
+  }
+};
+
+bool ConcurrentMark::nextMarkBitmapIsClear() {
+  CheckBitmapClearHRClosure cl(_nextMarkBitMap);
+  _g1h->heap_region_iterate(&cl);
+  return cl.complete();
+}
+
 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
 public:
   bool doHeapRegion(HeapRegion* r) {
@@ -976,13 +1000,13 @@
   }
 
   if (concurrent()) {
-    ConcurrentGCThread::stsLeave();
+    SuspendibleThreadSet::leave();
   }
 
   bool barrier_aborted = !_first_overflow_barrier_sync.enter();
 
   if (concurrent()) {
-    ConcurrentGCThread::stsJoin();
+    SuspendibleThreadSet::join();
   }
   // at this point everyone should have synced up and not be doing any
   // more work
@@ -1019,8 +1043,7 @@
       force_overflow()->update();
 
       if (G1Log::fine()) {
-        gclog_or_tty->date_stamp(PrintGCDateStamps);
-        gclog_or_tty->stamp(PrintGCTimeStamps);
+        gclog_or_tty->gclog_stamp(concurrent_gc_id());
         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
       }
     }
@@ -1036,13 +1059,13 @@
   }
 
   if (concurrent()) {
-    ConcurrentGCThread::stsLeave();
+    SuspendibleThreadSet::leave();
   }
 
   bool barrier_aborted = !_second_overflow_barrier_sync.enter();
 
   if (concurrent()) {
-    ConcurrentGCThread::stsJoin();
+    SuspendibleThreadSet::join();
   }
   // at this point everything should be re-initialized and ready to go
 
@@ -1094,7 +1117,7 @@
 
     double start_vtime = os::elapsedVTime();
 
-    ConcurrentGCThread::stsJoin();
+    SuspendibleThreadSet::join();
 
     assert(worker_id < _cm->active_tasks(), "invariant");
     CMTask* the_task = _cm->task(worker_id);
@@ -1102,46 +1125,32 @@
     if (!_cm->has_aborted()) {
       do {
         double start_vtime_sec = os::elapsedVTime();
-        double start_time_sec = os::elapsedTime();
         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
 
         the_task->do_marking_step(mark_step_duration_ms,
                                   true  /* do_termination */,
                                   false /* is_serial*/);
 
-        double end_time_sec = os::elapsedTime();
         double end_vtime_sec = os::elapsedVTime();
         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
-        double elapsed_time_sec = end_time_sec - start_time_sec;
         _cm->clear_has_overflown();
 
-        bool ret = _cm->do_yield_check(worker_id);
+        _cm->do_yield_check(worker_id);
 
         jlong sleep_time_ms;
         if (!_cm->has_aborted() && the_task->has_aborted()) {
           sleep_time_ms =
             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
-          ConcurrentGCThread::stsLeave();
+          SuspendibleThreadSet::leave();
           os::sleep(Thread::current(), sleep_time_ms, false);
-          ConcurrentGCThread::stsJoin();
+          SuspendibleThreadSet::join();
         }
-        double end_time2_sec = os::elapsedTime();
-        double elapsed_time2_sec = end_time2_sec - start_time_sec;
-
-#if 0
-          gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
-                                 "overhead %1.4lf",
-                                 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
-                                 the_task->conc_overhead(os::elapsedTime()) * 8.0);
-          gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
-                                 elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
-#endif
       } while (!_cm->has_aborted() && the_task->has_aborted());
     }
     the_task->record_end_time();
     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
 
-    ConcurrentGCThread::stsLeave();
+    SuspendibleThreadSet::leave();
 
     double end_vtime = os::elapsedVTime();
     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
@@ -1221,6 +1230,9 @@
 };
 
 void ConcurrentMark::scanRootRegions() {
+  // Start of concurrent marking.
+  ClassLoaderDataGraph::clear_claimed_marks();
+
   // scan_in_progress() will have been set to true only if there was
   // at least one root region to scan. So, if it's false, we
   // should not attempt to do any further work.
@@ -1269,7 +1281,7 @@
   CMConcurrentMarkingTask markingTask(this, cmThread());
   if (use_parallel_marking_threads()) {
     _parallel_workers->set_active_workers((int)active_workers);
-    // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
+    // Don't set _n_par_threads because it affects MT in process_roots()
     // and the decisions on that MT processing is made elsewhere.
     assert(_parallel_workers->active_workers() > 0, "Should have been set");
     _parallel_workers->run_task(&markingTask);
@@ -1300,6 +1312,7 @@
     Universe::verify(VerifyOption_G1UsePrevMarking,
                      " VerifyDuringGC:(before)");
   }
+  g1h->check_bitmaps("Remark Start");
 
   G1CollectorPolicy* g1p = g1h->g1_policy();
   g1p->record_concurrent_mark_remark_start();
@@ -1348,6 +1361,7 @@
       Universe::verify(VerifyOption_G1UseNextMarking,
                        " VerifyDuringGC:(after)");
     }
+    g1h->check_bitmaps("Remark End");
     assert(!restart_for_overflow(), "sanity");
     // Completely reset the marking state since marking completed
     set_non_marking_state();
@@ -1389,7 +1403,7 @@
   void set_bit_for_region(HeapRegion* hr) {
     assert(!hr->continuesHumongous(), "should have filtered those out");
 
-    BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
+    BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
     if (!hr->startsHumongous()) {
       // Normal (non-humongous) case: just set the bit.
       _region_bm->par_at_put(index, true);
@@ -1577,7 +1591,7 @@
       if (_verbose) {
         gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
-                               hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
+                               hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
       }
       failures += 1;
     }
@@ -1586,7 +1600,7 @@
     // (which was just calculated) region bit maps.
     // We're not OK if the bit in the calculated expected region
     // bitmap is set and the bit in the actual region bitmap is not.
-    BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
+    BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
 
     bool expected = _exp_region_bm->at(index);
     bool actual = _region_bm->at(index);
@@ -1594,7 +1608,7 @@
       if (_verbose) {
         gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
                                "expected: %s, actual: %s",
-                               hr->hrs_index(),
+                               hr->hrm_index(),
                                BOOL_TO_STR(expected), BOOL_TO_STR(actual));
       }
       failures += 1;
@@ -1615,7 +1629,7 @@
         if (_verbose) {
           gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
                                  "expected: %s, actual: %s",
-                                 hr->hrs_index(), i,
+                                 hr->hrm_index(), i,
                                  BOOL_TO_STR(expected), BOOL_TO_STR(actual));
         }
         failures += 1;
@@ -1997,6 +2011,7 @@
     Universe::verify(VerifyOption_G1UsePrevMarking,
                      " VerifyDuringGC:(before)");
   }
+  g1h->check_bitmaps("Cleanup Start");
 
   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
   g1p->record_concurrent_mark_cleanup_start();
@@ -2034,8 +2049,8 @@
     // that calculated by walking the marking bitmap.
 
     // Bitmaps to hold expected values
-    BitMap expected_region_bm(_region_bm.size(), false);
-    BitMap expected_card_bm(_card_bm.size(), false);
+    BitMap expected_region_bm(_region_bm.size(), true);
+    BitMap expected_card_bm(_card_bm.size(), true);
 
     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
                                                  &_region_bm,
@@ -2137,22 +2152,31 @@
   // Update the soft reference policy with the new heap occupancy.
   Universe::update_heap_info_at_gc();
 
-  // We need to make this be a "collection" so any collection pause that
-  // races with it goes around and waits for completeCleanup to finish.
-  g1h->increment_total_collections();
-
-  // We reclaimed old regions so we should calculate the sizes to make
-  // sure we update the old gen/space data.
-  g1h->g1mm()->update_sizes();
-
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
     Universe::heap()->prepare_for_verify();
     Universe::verify(VerifyOption_G1UsePrevMarking,
                      " VerifyDuringGC:(after)");
   }
+  g1h->check_bitmaps("Cleanup End");
 
   g1h->verify_region_sets_optional();
+
+  // We need to make this be a "collection" so any collection pause that
+  // races with it goes around and waits for completeCleanup to finish.
+  g1h->increment_total_collections();
+
+  // Clean out dead classes and update Metaspace sizes.
+  if (ClassUnloadingWithConcurrentMark) {
+    ClassLoaderDataGraph::purge();
+  }
+  MetaspaceGC::compute_new_size();
+
+  // We reclaimed old regions so we should calculate the sizes to make
+  // sure we update the old gen/space data.
+  g1h->g1mm()->update_sizes();
+  g1h->allocation_context_stats().update_after_mark();
+
   g1h->trace_heap_after_concurrent_cycle();
 }
 
@@ -2170,10 +2194,10 @@
                            _cleanup_list.length());
   }
 
-  // Noone else should be accessing the _cleanup_list at this point,
-  // so it's not necessary to take any locks
+  // No one else should be accessing the _cleanup_list at this point,
+  // so it is not necessary to take any locks
   while (!_cleanup_list.is_empty()) {
-    HeapRegion* hr = _cleanup_list.remove_head();
+    HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
     assert(hr != NULL, "Got NULL from a non-empty list");
     hr->par_clear();
     tmp_free_list.add_ordered(hr);
@@ -2382,6 +2406,8 @@
   }
 
   virtual void work(uint worker_id) {
+    ResourceMark rm;
+    HandleMark hm;
     CMTask* task = _cm->task(worker_id);
     G1CMIsAliveClosure g1_is_alive(_g1h);
     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
@@ -2439,6 +2465,26 @@
   _g1h->set_par_threads(0);
 }
 
+void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
+  G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
+}
+
+// Helper class to get rid of some boilerplate code.
+class G1RemarkGCTraceTime : public GCTraceTime {
+  static bool doit_and_prepend(bool doit) {
+    if (doit) {
+      gclog_or_tty->put(' ');
+    }
+    return doit;
+  }
+
+ public:
+  G1RemarkGCTraceTime(const char* title, bool doit)
+    : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
+        G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
+  }
+};
+
 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
   if (has_overflown()) {
     // Skip processing the discovered references if we have
@@ -2464,7 +2510,7 @@
     if (G1Log::finer()) {
       gclog_or_tty->put(' ');
     }
-    GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm());
+    GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id());
 
     ReferenceProcessor* rp = g1h->ref_processor_cm();
 
@@ -2521,7 +2567,8 @@
                                           &g1_keep_alive,
                                           &g1_drain_mark_stack,
                                           executor,
-                                          g1h->gc_timer_cm());
+                                          g1h->gc_timer_cm(),
+                                          concurrent_gc_id());
     g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
 
     // The do_oop work routines of the keep_alive and drain_marking_stack
@@ -2550,9 +2597,41 @@
     return;
   }
 
-  g1h->unlink_string_and_symbol_table(&g1_is_alive,
-                                      /* process_strings */ false, // currently strings are always roots
-                                      /* process_symbols */ true);
+  assert(_markStack.isEmpty(), "Marking should have completed");
+
+  // Unload Klasses, String, Symbols, Code Cache, etc.
+  {
+    G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
+
+    if (ClassUnloadingWithConcurrentMark) {
+      // Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack
+      // part is too slow to be done serially, so it is handled during the weakRefsWorkParallelPart phase.
+      // Defer the cleaning until we have complete on_stack data.
+      MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */);
+
+      bool purged_classes;
+
+      {
+        G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
+        purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
+      }
+
+      {
+        G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
+        weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
+      }
+
+      {
+        G1RemarkGCTraceTime trace("Deallocate Metadata", G1Log::finest());
+        ClassLoaderDataGraph::free_deallocate_lists();
+      }
+    }
+
+    if (G1StringDedup::is_enabled()) {
+      G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
+      G1StringDedup::unlink(&g1_is_alive);
+    }
+  }
 }
 
 void ConcurrentMark::swapMarkBitMaps() {
@@ -2561,6 +2640,57 @@
   _nextMarkBitMap  = (CMBitMap*)  temp;
 }
 
+class CMObjectClosure;
+
+// Closure for iterating over objects, currently only used for
+// processing SATB buffers.
+class CMObjectClosure : public ObjectClosure {
+private:
+  CMTask* _task;
+
+public:
+  void do_object(oop obj) {
+    _task->deal_with_reference(obj);
+  }
+
+  CMObjectClosure(CMTask* task) : _task(task) { }
+};
+
+class G1RemarkThreadsClosure : public ThreadClosure {
+  CMObjectClosure _cm_obj;
+  G1CMOopClosure _cm_cl;
+  MarkingCodeBlobClosure _code_cl;
+  int _thread_parity;
+  bool _is_par;
+
+ public:
+  G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
+    _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
+    _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
+
+  void do_thread(Thread* thread) {
+    if (thread->is_Java_thread()) {
+      if (thread->claim_oops_do(_is_par, _thread_parity)) {
+        JavaThread* jt = (JavaThread*)thread;
+
+        // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
+        // however the liveness of oops reachable from nmethods have very complex lifecycles:
+        // * Alive if on the stack of an executing method
+        // * Weakly reachable otherwise
+        // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
+        // live by the SATB invariant but other oops recorded in nmethods may behave differently.
+        jt->nmethods_do(&_code_cl);
+
+        jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
+      }
+    } else if (thread->is_VM_thread()) {
+      if (thread->claim_oops_do(_is_par, _thread_parity)) {
+        JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
+      }
+    }
+  }
+};
+
 class CMRemarkTask: public AbstractGangTask {
 private:
   ConcurrentMark* _cm;
@@ -2572,6 +2702,14 @@
     if (worker_id < _cm->active_tasks()) {
       CMTask* task = _cm->task(worker_id);
       task->record_start_time();
+      {
+        ResourceMark rm;
+        HandleMark hm;
+
+        G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
+        Threads::threads_do(&threads_f);
+      }
+
       do {
         task->do_marking_step(1000000000.0 /* something very large */,
                               true         /* do_termination       */,
@@ -2594,6 +2732,8 @@
   HandleMark   hm;
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
+  G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
+
   g1h->ensure_parsability(false);
 
   if (G1CollectedHeap::use_parallel_gc_threads()) {
@@ -2673,7 +2813,6 @@
       str = " O";
     } else {
       HeapRegion* hr  = _g1h->heap_region_containing(obj);
-      guarantee(hr != NULL, "invariant");
       bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
       bool marked = _g1h->is_marked(obj, _vo);
 
@@ -2814,11 +2953,6 @@
   _nextMarkBitMap->clearRange(mr);
 }
 
-void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
-  clearRangePrevBitmap(mr);
-  clearRangeNextBitmap(mr);
-}
-
 HeapRegion*
 ConcurrentMark::claim_region(uint worker_id) {
   // "checkpoint" the finger
@@ -2852,22 +2986,25 @@
     // claim_region() and a humongous object allocation might force us
     // to do a bit of unnecessary work (due to some unnecessary bitmap
     // iterations) but it should not introduce and correctness issues.
-    HeapRegion* curr_region   = _g1h->heap_region_containing_raw(finger);
-    HeapWord*   bottom        = curr_region->bottom();
-    HeapWord*   end           = curr_region->end();
-    HeapWord*   limit         = curr_region->next_top_at_mark_start();
-
-    if (verbose_low()) {
-      gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
-                             "["PTR_FORMAT", "PTR_FORMAT"), "
-                             "limit = "PTR_FORMAT,
-                             worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
-    }
+    HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
+
+    // Above heap_region_containing_raw may return NULL as we always scan claim
+    // until the end of the heap. In this case, just jump to the next region.
+    HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
 
     // Is the gap between reading the finger and doing the CAS too long?
     HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
-    if (res == finger) {
+    if (res == finger && curr_region != NULL) {
       // we succeeded
+      HeapWord*   bottom        = curr_region->bottom();
+      HeapWord*   limit         = curr_region->next_top_at_mark_start();
+
+      if (verbose_low()) {
+        gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
+                               "["PTR_FORMAT", "PTR_FORMAT"), "
+                               "limit = "PTR_FORMAT,
+                               worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
+      }
 
       // notice that _finger == end cannot be guaranteed here since,
       // someone else might have moved the finger even further
@@ -2898,10 +3035,17 @@
     } else {
       assert(_finger > finger, "the finger should have moved forward");
       if (verbose_low()) {
-        gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
-                               "global finger = "PTR_FORMAT", "
-                               "our finger = "PTR_FORMAT,
-                               worker_id, p2i(_finger), p2i(finger));
+        if (curr_region == NULL) {
+          gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
+                                 "global finger = "PTR_FORMAT", "
+                                 "our finger = "PTR_FORMAT,
+                                 worker_id, p2i(_finger), p2i(finger));
+        } else {
+          gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
+                                 "global finger = "PTR_FORMAT", "
+                                 "our finger = "PTR_FORMAT,
+                                 worker_id, p2i(_finger), p2i(finger));
+        }
       }
 
       // read it again
@@ -3016,8 +3160,10 @@
       // happens, heap_region_containing() will return the bottom of the
       // corresponding starts humongous region and the check below will
       // not hold any more.
+      // Since we always iterate over all regions, we might get a NULL HeapRegion
+      // here.
       HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
-      guarantee(global_finger == global_hr->bottom(),
+      guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
                 err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
                         p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
     }
@@ -3030,7 +3176,7 @@
       if (task_finger != NULL && task_finger < _heap_end) {
         // See above note on the global finger verification.
         HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
-        guarantee(task_finger == task_hr->bottom() ||
+        guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
                   !task_hr->in_collection_set(),
                   err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
                           p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
@@ -3109,7 +3255,7 @@
     assert(limit_idx <= end_idx, "or else use atomics");
 
     // Aggregate the "stripe" in the count data associated with hr.
-    uint hrs_index = hr->hrs_index();
+    uint hrm_index = hr->hrm_index();
     size_t marked_bytes = 0;
 
     for (uint i = 0; i < _max_worker_id; i += 1) {
@@ -3118,7 +3264,7 @@
 
       // Fetch the marked_bytes in this region for task i and
       // add it to the running total for this region.
-      marked_bytes += marked_bytes_array[hrs_index];
+      marked_bytes += marked_bytes_array[hrm_index];
 
       // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
       // into the global card bitmap.
@@ -3241,8 +3387,14 @@
 
 // abandon current marking iteration due to a Full GC
 void ConcurrentMark::abort() {
-  // Clear all marks to force marking thread to do nothing
+  // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
+  // concurrent bitmap clearing.
   _nextMarkBitMap->clearAll();
+
+  // Note we cannot clear the previous marking bitmap here
+  // since VerifyDuringGC verifies the objects marked during
+  // a full GC against the previous bitmap.
+
   // Clear the liveness counting data
   clear_all_count_data();
   // Empty mark stack
@@ -3252,6 +3404,12 @@
   }
   _first_overflow_barrier_sync.abort();
   _second_overflow_barrier_sync.abort();
+  const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
+  if (!gc_id.is_undefined()) {
+    // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
+    // to detect that it was aborted. Only keep track of the first GC id that we aborted.
+    _aborted_gc_id = gc_id;
+   }
   _has_aborted = true;
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
@@ -3266,6 +3424,13 @@
   _g1h->register_concurrent_cycle_end();
 }
 
+const GCId& ConcurrentMark::concurrent_gc_id() {
+  if (has_aborted()) {
+    return _aborted_gc_id;
+  }
+  return _g1h->gc_tracer_cm()->gc_id();
+}
+
 static void print_ms_time_info(const char* prefix, const char* name,
                                NumberSeq& ns) {
   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
@@ -3322,32 +3487,17 @@
 
 // We take a break if someone is trying to stop the world.
 bool ConcurrentMark::do_yield_check(uint worker_id) {
-  if (should_yield()) {
+  if (SuspendibleThreadSet::should_yield()) {
     if (worker_id == 0) {
       _g1h->g1_policy()->record_concurrent_pause();
     }
-    cmThread()->yield();
+    SuspendibleThreadSet::yield();
     return true;
   } else {
     return false;
   }
 }
 
-bool ConcurrentMark::should_yield() {
-  return cmThread()->should_yield();
-}
-
-bool ConcurrentMark::containing_card_is_marked(void* p) {
-  size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
-  return _card_bm.at(offset >> CardTableModRefBS::card_shift);
-}
-
-bool ConcurrentMark::containing_cards_are_marked(void* start,
-                                                 void* last) {
-  return containing_card_is_marked(start) &&
-         containing_card_is_marked(last);
-}
-
 #ifndef PRODUCT
 // for debugging purposes
 void ConcurrentMark::print_finger() {
@@ -3410,20 +3560,6 @@
   }
 };
 
-// Closure for iterating over objects, currently only used for
-// processing SATB buffers.
-class CMObjectClosure : public ObjectClosure {
-private:
-  CMTask* _task;
-
-public:
-  void do_object(oop obj) {
-    _task->deal_with_reference(obj);
-  }
-
-  CMObjectClosure(CMTask* task) : _task(task) { }
-};
-
 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
                                ConcurrentMark* cm,
                                CMTask* task)
@@ -3437,9 +3573,8 @@
 }
 
 void CMTask::setup_for_region(HeapRegion* hr) {
-  // Separated the asserts so that we know which one fires.
   assert(hr != NULL,
-        "claim_region() should have filtered out continues humongous regions");
+        "claim_region() should have filtered out NULL regions");
   assert(!hr->continuesHumongous(),
         "claim_region() should have filtered out continues humongous regions");
 
@@ -3615,7 +3750,7 @@
 
   if (_cm->verbose_medium()) {
       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
-                        "scanned = %d%s, refs reached = %d%s",
+                        "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
                         _worker_id, last_interval_ms,
                         _words_scanned,
                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
@@ -3625,7 +3760,7 @@
 #endif // _MARKING_STATS_
 
   // (4) We check whether we should yield. If we have to, then we abort.
-  if (_cm->should_yield()) {
+  if (SuspendibleThreadSet::should_yield()) {
     // We should yield. To do this we abort the task. The caller is
     // responsible for yielding.
     set_has_aborted();
@@ -3889,15 +4024,6 @@
     }
   }
 
-  if (!concurrent() && !has_aborted()) {
-    // We should only do this during remark.
-    if (G1CollectedHeap::use_parallel_gc_threads()) {
-      satb_mq_set.par_iterate_closure_all_threads(_worker_id);
-    } else {
-      satb_mq_set.iterate_closure_all_threads();
-    }
-  }
-
   _draining_satb_buffers = false;
 
   assert(has_aborted() ||
@@ -4555,7 +4681,6 @@
     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  MemRegion g1_committed = g1h->g1_committed();
   MemRegion g1_reserved = g1h->g1_reserved();
   double now = os::elapsedTime();
 
@@ -4563,10 +4688,8 @@
   _out->cr();
   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
-                 G1PPRL_SUM_ADDR_FORMAT("committed")
                  G1PPRL_SUM_ADDR_FORMAT("reserved")
                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
-                 p2i(g1_committed.start()), p2i(g1_committed.end()),
                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),
                  HeapRegion::GrainBytes);
   _out->print_cr(G1PPRL_LINE_PREFIX);
@@ -4627,7 +4750,7 @@
 }
 
 bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
-  const char* type = "";
+  const char* type       = r->get_type_str();
   HeapWord* bottom       = r->bottom();
   HeapWord* end          = r->end();
   size_t capacity_bytes  = r->capacity();
@@ -4638,15 +4761,7 @@
   size_t remset_bytes    = r->rem_set()->mem_size();
   size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
 
-  if (r->used() == 0) {
-    type = "FREE";
-  } else if (r->is_survivor()) {
-    type = "SURV";
-  } else if (r->is_young()) {
-    type = "EDEN";
-  } else if (r->startsHumongous()) {
-    type = "HUMS";
-
+  if (r->startsHumongous()) {
     assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
            _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
            "they should have been zeroed after the last time we used them");
@@ -4659,12 +4774,9 @@
                   &prev_live_bytes, &next_live_bytes);
     end = bottom + HeapRegion::GrainWords;
   } else if (r->continuesHumongous()) {
-    type = "HUMC";
     get_hum_bytes(&used_bytes, &capacity_bytes,
                   &prev_live_bytes, &next_live_bytes);
     assert(end == bottom + HeapRegion::GrainWords, "invariant");
-  } else {
-    type = "OLD";
   }
 
   _total_used_bytes      += used_bytes;
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,10 +25,14 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
 
+#include "classfile/javaClasses.hpp"
 #include "gc_implementation/g1/heapRegionSet.hpp"
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
+#include "gc_implementation/shared/gcId.hpp"
 #include "utilities/taskqueue.hpp"
 
 class G1CollectedHeap;
+class CMBitMap;
 class CMTask;
 typedef GenericTaskQueue<oop, mtGC>            CMTaskQueue;
 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
@@ -55,7 +59,6 @@
   HeapWord* _bmStartWord;      // base address of range covered by map
   size_t    _bmWordSize;       // map size (in #HeapWords covered)
   const int _shifter;          // map to char or bit
-  VirtualSpace _virtual_space; // underlying the bit map
   BitMap    _bm;               // the bit map itself
 
  public:
@@ -85,19 +88,19 @@
   // Return the address corresponding to the next marked bit at or after
   // "addr", and before "limit", if "limit" is non-NULL.  If there is no
   // such bit, returns "limit" if that is non-NULL, or else "endWord()".
-  HeapWord* getNextMarkedWordAddress(HeapWord* addr,
-                                     HeapWord* limit = NULL) const;
+  HeapWord* getNextMarkedWordAddress(const HeapWord* addr,
+                                     const HeapWord* limit = NULL) const;
   // Return the address corresponding to the next unmarked bit at or after
   // "addr", and before "limit", if "limit" is non-NULL.  If there is no
   // such bit, returns "limit" if that is non-NULL, or else "endWord()".
-  HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
-                                       HeapWord* limit = NULL) const;
+  HeapWord* getNextUnmarkedWordAddress(const HeapWord* addr,
+                                       const HeapWord* limit = NULL) const;
 
   // conversion utilities
   HeapWord* offsetToHeapWord(size_t offset) const {
     return _bmStartWord + (offset << _shifter);
   }
-  size_t heapWordToOffset(HeapWord* addr) const {
+  size_t heapWordToOffset(const HeapWord* addr) const {
     return pointer_delta(addr, _bmStartWord) >> _shifter;
   }
   int heapWordDiffToOffsetDiff(size_t diff) const;
@@ -113,42 +116,41 @@
   void print_on_error(outputStream* st, const char* prefix) const;
 
   // debugging
-  NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
+  NOT_PRODUCT(bool covers(MemRegion rs) const;)
+};
+
+class CMBitMapMappingChangedListener : public G1MappingChangedListener {
+ private:
+  CMBitMap* _bm;
+ public:
+  CMBitMapMappingChangedListener() : _bm(NULL) {}
+
+  void set_bitmap(CMBitMap* bm) { _bm = bm; }
+
+  virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 };
 
 class CMBitMap : public CMBitMapRO {
+ private:
+  CMBitMapMappingChangedListener _listener;
 
  public:
-  // constructor
-  CMBitMap(int shifter) :
-    CMBitMapRO(shifter) {}
+  static size_t compute_size(size_t heap_size);
+  // Returns the amount of bytes on the heap between two marks in the bitmap.
+  static size_t mark_distance();
 
-  // Allocates the back store for the marking bitmap
-  bool allocate(ReservedSpace heap_rs);
+  CMBitMap() : CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
 
-  // write marks
-  void mark(HeapWord* addr) {
-    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-           "outside underlying space?");
-    _bm.set_bit(heapWordToOffset(addr));
-  }
-  void clear(HeapWord* addr) {
-    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-           "outside underlying space?");
-    _bm.clear_bit(heapWordToOffset(addr));
-  }
-  bool parMark(HeapWord* addr) {
-    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-           "outside underlying space?");
-    return _bm.par_set_bit(heapWordToOffset(addr));
-  }
-  bool parClear(HeapWord* addr) {
-    assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-           "outside underlying space?");
-    return _bm.par_clear_bit(heapWordToOffset(addr));
-  }
+  // Initializes the underlying BitMap to cover the given area.
+  void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
+
+  // Write marks.
+  inline void mark(HeapWord* addr);
+  inline void clear(HeapWord* addr);
+  inline bool parMark(HeapWord* addr);
+  inline bool parClear(HeapWord* addr);
+
   void markRange(MemRegion mr);
-  void clearAll();
   void clearRange(MemRegion mr);
 
   // Starting at the bit corresponding to "addr" (inclusive), find the next
@@ -159,6 +161,9 @@
   // the run.  If there is no "1" bit at or after "addr", return an empty
   // MemRegion.
   MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
+
+  // Clear the whole mark bitmap.
+  void clearAll();
 };
 
 // Represents a marking stack used by ConcurrentMarking in the G1 collector.
@@ -444,6 +449,7 @@
   volatile bool           _concurrent;
   // set at the end of a Full GC so that marking aborts
   volatile bool           _has_aborted;
+  GCId                    _aborted_gc_id;
 
   // used when remark aborts due to an overflow to indicate that
   // another concurrent marking phase should start
@@ -474,6 +480,7 @@
   ForceOverflowSettings _force_overflow_conc;
   ForceOverflowSettings _force_overflow_stw;
 
+  void weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes);
   void weakRefsWork(bool clear_all_soft_refs);
 
   void swapMarkBitMaps();
@@ -676,7 +683,9 @@
     return _task_queues->steal(worker_id, hash_seed, obj);
   }
 
-  ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
+  ConcurrentMark(G1CollectedHeap* g1h,
+                 G1RegionToSpaceMapper* prev_bitmap_storage,
+                 G1RegionToSpaceMapper* next_bitmap_storage);
   ~ConcurrentMark();
 
   ConcurrentMarkThread* cmThread() { return _cmThread; }
@@ -705,8 +714,10 @@
   // inconsistent) and always passing the size. hr is the region that
   // contains the object and it's passed optionally from callers who
   // might already have it (no point in recalculating it).
-  inline void grayRoot(oop obj, size_t word_size,
-                       uint worker_id, HeapRegion* hr = NULL);
+  inline void grayRoot(oop obj,
+                       size_t word_size,
+                       uint worker_id,
+                       HeapRegion* hr = NULL);
 
   // It iterates over the heap and for each object it comes across it
   // will dump the contents of its reference fields, as well as
@@ -727,11 +738,16 @@
   //   AND MARKED : indicates that an object is both explicitly and
   //   implicitly live (it should be one or the other, not both)
   void print_reachable(const char* str,
-                       VerifyOption vo, bool all) PRODUCT_RETURN;
+                       VerifyOption vo,
+                       bool all) PRODUCT_RETURN;
 
   // Clear the next marking bitmap (will be called concurrently).
   void clearNextBitmap();
 
+  // Return whether the next mark bitmap has no marks set. To be used for assertions
+  // only. Will not yield to pause requests.
+  bool nextMarkBitmapIsClear();
+
   // These two do the work that needs to be done before and after the
   // initial root checkpoint. Since this checkpoint can be done at two
   // different points (i.e. an explicit pause or piggy-backed on a
@@ -760,12 +776,11 @@
   // this carefully!
   inline void markPrev(oop p);
 
-  // Clears marks for all objects in the given range, for the prev,
-  // next, or both bitmaps.  NB: the previous bitmap is usually
+  // Clears marks for all objects in the given range, for the prev or
+  // next bitmaps.  NB: the previous bitmap is usually
   // read-only, so use this carefully!
   void clearRangePrevBitmap(MemRegion mr);
   void clearRangeNextBitmap(MemRegion mr);
-  void clearRangeBothBitmaps(MemRegion mr);
 
   // Notify data structures that a GC has started.
   void note_start_of_gc() {
@@ -787,27 +802,6 @@
                            bool verify_thread_buffers,
                            bool verify_fingers) PRODUCT_RETURN;
 
-  // It is called at the end of an evacuation pause during marking so
-  // that CM is notified of where the new end of the heap is. It
-  // doesn't do anything if concurrent_marking_in_progress() is false,
-  // unless the force parameter is true.
-  void update_g1_committed(bool force = false);
-
-  bool isMarked(oop p) const {
-    assert(p != NULL && p->is_oop(), "expected an oop");
-    HeapWord* addr = (HeapWord*)p;
-    assert(addr >= _nextMarkBitMap->startWord() ||
-           addr < _nextMarkBitMap->endWord(), "in a region");
-
-    return _nextMarkBitMap->isMarked(addr);
-  }
-
-  inline bool not_yet_marked(oop p) const;
-
-  // XXX Debug code
-  bool containing_card_is_marked(void* p);
-  bool containing_cards_are_marked(void* start, void* last);
-
   bool isPrevMarked(oop p) const {
     assert(p != NULL && p->is_oop(), "expected an oop");
     HeapWord* addr = (HeapWord*)p;
@@ -818,13 +812,14 @@
   }
 
   inline bool do_yield_check(uint worker_i = 0);
-  inline bool should_yield();
 
   // Called to abort the marking cycle after a Full GC takes palce.
   void abort();
 
   bool has_aborted()      { return _has_aborted; }
 
+  const GCId& concurrent_gc_id();
+
   // This prints the global/local fingers. It is used for debugging.
   NOT_PRODUCT(void print_finger();)
 
@@ -892,7 +887,8 @@
   // marked_bytes array slot for the given HeapRegion.
   // Sets the bits in the given card bitmap that are associated with the
   // cards that are spanned by the memory region.
-  inline void count_region(MemRegion mr, HeapRegion* hr,
+  inline void count_region(MemRegion mr,
+                           HeapRegion* hr,
                            size_t* marked_bytes_array,
                            BitMap* task_card_bm);
 
@@ -900,56 +896,27 @@
   // data structures for the given worker id.
   inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
 
-  // Counts the given memory region in the task/worker counting
-  // data structures for the given worker id.
-  inline void count_region(MemRegion mr, uint worker_id);
-
   // Counts the given object in the given task/worker counting
   // data structures.
-  inline void count_object(oop obj, HeapRegion* hr,
+  inline void count_object(oop obj,
+                           HeapRegion* hr,
                            size_t* marked_bytes_array,
                            BitMap* task_card_bm);
 
-  // Counts the given object in the task/worker counting data
-  // structures for the given worker id.
-  inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
-
   // Attempts to mark the given object and, if successful, counts
   // the object in the given task/worker counting structures.
-  inline bool par_mark_and_count(oop obj, HeapRegion* hr,
+  inline bool par_mark_and_count(oop obj,
+                                 HeapRegion* hr,
                                  size_t* marked_bytes_array,
                                  BitMap* task_card_bm);
 
   // Attempts to mark the given object and, if successful, counts
   // the object in the task/worker counting structures for the
   // given worker id.
-  inline bool par_mark_and_count(oop obj, size_t word_size,
-                                 HeapRegion* hr, uint worker_id);
-
-  // Attempts to mark the given object and, if successful, counts
-  // the object in the task/worker counting structures for the
-  // given worker id.
-  inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
-
-  // Similar to the above routine but we don't know the heap region that
-  // contains the object to be marked/counted, which this routine looks up.
-  inline bool par_mark_and_count(oop obj, uint worker_id);
-
-  // Similar to the above routine but there are times when we cannot
-  // safely calculate the size of obj due to races and we, therefore,
-  // pass the size in as a parameter. It is the caller's reponsibility
-  // to ensure that the size passed in for obj is valid.
-  inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
-
-  // Unconditionally mark the given object, and unconditinally count
-  // the object in the counting structures for worker id 0.
-  // Should *not* be called from parallel code.
-  inline bool mark_and_count(oop obj, HeapRegion* hr);
-
-  // Similar to the above routine but we don't know the heap region that
-  // contains the object to be marked/counted, which this routine looks up.
-  // Should *not* be called from parallel code.
-  inline bool mark_and_count(oop obj);
+  inline bool par_mark_and_count(oop obj,
+                                 size_t word_size,
+                                 HeapRegion* hr,
+                                 uint worker_id);
 
   // Returns true if initialization was successfully completed.
   bool completed_initialization() const {
@@ -1221,9 +1188,12 @@
     _finger = new_finger;
   }
 
-  CMTask(uint worker_id, ConcurrentMark *cm,
-         size_t* marked_bytes, BitMap* card_bm,
-         CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
+  CMTask(uint worker_id,
+         ConcurrentMark *cm,
+         size_t* marked_bytes,
+         BitMap* card_bm,
+         CMTaskQueue* task_queue,
+         CMTaskQueueSet* task_queues);
 
   // it prints statistics associated with this task
   void print_stats();
--- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -86,7 +86,7 @@
   HeapWord* start = mr.start();
   HeapWord* end = mr.end();
   size_t region_size_bytes = mr.byte_size();
-  uint index = hr->hrs_index();
+  uint index = hr->hrm_index();
 
   assert(!hr->continuesHumongous(), "should not be HC region");
   assert(hr == g1h->heap_region_containing(start), "sanity");
@@ -125,14 +125,6 @@
   count_region(mr, hr, marked_bytes_array, task_card_bm);
 }
 
-// Counts the given memory region, which may be a single object, in the
-// task/worker counting data structures for the given worker id.
-inline void ConcurrentMark::count_region(MemRegion mr, uint worker_id) {
-  HeapWord* addr = mr.start();
-  HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
-  count_region(mr, hr, worker_id);
-}
-
 // Counts the given object in the given task/worker counting data structures.
 inline void ConcurrentMark::count_object(oop obj,
                                          HeapRegion* hr,
@@ -142,17 +134,6 @@
   count_region(mr, hr, marked_bytes_array, task_card_bm);
 }
 
-// Counts the given object in the task/worker counting data
-// structures for the given worker id.
-inline void ConcurrentMark::count_object(oop obj,
-                                         HeapRegion* hr,
-                                         uint worker_id) {
-  size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
-  BitMap* task_card_bm = count_card_bitmap_for(worker_id);
-  HeapWord* addr = (HeapWord*) obj;
-  count_object(obj, hr, marked_bytes_array, task_card_bm);
-}
-
 // Attempts to mark the given object and, if successful, counts
 // the object in the given task/worker counting structures.
 inline bool ConcurrentMark::par_mark_and_count(oop obj,
@@ -184,63 +165,6 @@
   return false;
 }
 
-// Attempts to mark the given object and, if successful, counts
-// the object in the task/worker counting structures for the
-// given worker id.
-inline bool ConcurrentMark::par_mark_and_count(oop obj,
-                                               HeapRegion* hr,
-                                               uint worker_id) {
-  HeapWord* addr = (HeapWord*)obj;
-  if (_nextMarkBitMap->parMark(addr)) {
-    // Update the task specific count data for the object.
-    count_object(obj, hr, worker_id);
-    return true;
-  }
-  return false;
-}
-
-// As above - but we don't know the heap region containing the
-// object and so have to supply it.
-inline bool ConcurrentMark::par_mark_and_count(oop obj, uint worker_id) {
-  HeapWord* addr = (HeapWord*)obj;
-  HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
-  return par_mark_and_count(obj, hr, worker_id);
-}
-
-// Similar to the above routine but we already know the size, in words, of
-// the object that we wish to mark/count
-inline bool ConcurrentMark::par_mark_and_count(oop obj,
-                                               size_t word_size,
-                                               uint worker_id) {
-  HeapWord* addr = (HeapWord*)obj;
-  if (_nextMarkBitMap->parMark(addr)) {
-    // Update the task specific count data for the object.
-    MemRegion mr(addr, word_size);
-    count_region(mr, worker_id);
-    return true;
-  }
-  return false;
-}
-
-// Unconditionally mark the given object, and unconditinally count
-// the object in the counting structures for worker id 0.
-// Should *not* be called from parallel code.
-inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
-  HeapWord* addr = (HeapWord*)obj;
-  _nextMarkBitMap->mark(addr);
-  // Update the task specific count data for the object.
-  count_object(obj, hr, 0 /* worker_id */);
-  return true;
-}
-
-// As above - but we don't have the heap region containing the
-// object, so we have to supply it.
-inline bool ConcurrentMark::mark_and_count(oop obj) {
-  HeapWord* addr = (HeapWord*)obj;
-  HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
-  return mark_and_count(obj, hr);
-}
-
 inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
   HeapWord* start_addr = MAX2(startWord(), mr.start());
   HeapWord* end_addr = MIN2(endWord(), mr.end());
@@ -268,6 +192,36 @@
   return iterate(cl, mr);
 }
 
+#define check_mark(addr)                                                       \
+  assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize),      \
+         "outside underlying space?");                                         \
+  assert(G1CollectedHeap::heap()->is_in_exact(addr),                           \
+         err_msg("Trying to access not available bitmap "PTR_FORMAT            \
+                 " corresponding to "PTR_FORMAT" (%u)",                        \
+                 p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr)));
+
+inline void CMBitMap::mark(HeapWord* addr) {
+  check_mark(addr);
+  _bm.set_bit(heapWordToOffset(addr));
+}
+
+inline void CMBitMap::clear(HeapWord* addr) {
+  check_mark(addr);
+  _bm.clear_bit(heapWordToOffset(addr));
+}
+
+inline bool CMBitMap::parMark(HeapWord* addr) {
+  check_mark(addr);
+  return _bm.par_set_bit(heapWordToOffset(addr));
+}
+
+inline bool CMBitMap::parClear(HeapWord* addr) {
+  check_mark(addr);
+  return _bm.par_clear_bit(heapWordToOffset(addr));
+}
+
+#undef check_mark
+
 inline void CMTask::push(oop obj) {
   HeapWord* objAddr = (HeapWord*) obj;
   assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,4 +1,4 @@
-/*
+ /*
  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -29,6 +29,7 @@
 #include "gc_implementation/g1/g1Log.hpp"
 #include "gc_implementation/g1/g1MMUTracker.hpp"
 #include "gc_implementation/g1/vm_operations_g1.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/vmThread.hpp"
 
@@ -109,8 +110,7 @@
       double scan_start = os::elapsedTime();
       if (!cm()->has_aborted()) {
         if (G1Log::fine()) {
-          gclog_or_tty->date_stamp(PrintGCDateStamps);
-          gclog_or_tty->stamp(PrintGCTimeStamps);
+          gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
           gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
         }
 
@@ -118,8 +118,7 @@
 
         double scan_end = os::elapsedTime();
         if (G1Log::fine()) {
-          gclog_or_tty->date_stamp(PrintGCDateStamps);
-          gclog_or_tty->stamp(PrintGCTimeStamps);
+          gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
           gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]",
                                  scan_end - scan_start);
         }
@@ -127,8 +126,7 @@
 
       double mark_start_sec = os::elapsedTime();
       if (G1Log::fine()) {
-        gclog_or_tty->date_stamp(PrintGCDateStamps);
-        gclog_or_tty->stamp(PrintGCTimeStamps);
+        gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
         gclog_or_tty->print_cr("[GC concurrent-mark-start]");
       }
 
@@ -151,8 +149,7 @@
           }
 
           if (G1Log::fine()) {
-            gclog_or_tty->date_stamp(PrintGCDateStamps);
-            gclog_or_tty->stamp(PrintGCTimeStamps);
+            gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
             gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf secs]",
                                       mark_end_sec - mark_start_sec);
           }
@@ -167,8 +164,7 @@
                                    "in remark (restart #%d).", iter);
           }
           if (G1Log::fine()) {
-            gclog_or_tty->date_stamp(PrintGCDateStamps);
-            gclog_or_tty->stamp(PrintGCTimeStamps);
+            gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
             gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]");
           }
         }
@@ -194,9 +190,8 @@
       } else {
         // We don't want to update the marking status if a GC pause
         // is already underway.
-        _sts.join();
+        SuspendibleThreadSetJoiner sts;
         g1h->set_marking_complete();
-        _sts.leave();
       }
 
       // Check if cleanup set the free_regions_coming flag. If it
@@ -212,8 +207,7 @@
 
         double cleanup_start_sec = os::elapsedTime();
         if (G1Log::fine()) {
-          gclog_or_tty->date_stamp(PrintGCDateStamps);
-          gclog_or_tty->stamp(PrintGCTimeStamps);
+          gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
           gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
         }
 
@@ -233,8 +227,7 @@
 
         double cleanup_end_sec = os::elapsedTime();
         if (G1Log::fine()) {
-          gclog_or_tty->date_stamp(PrintGCDateStamps);
-          gclog_or_tty->stamp(PrintGCTimeStamps);
+          gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
           gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf secs]",
                                  cleanup_end_sec - cleanup_start_sec);
         }
@@ -266,46 +259,47 @@
       // record_concurrent_mark_cleanup_completed() (and, in fact, it's
       // not needed any more as the concurrent mark state has been
       // already reset).
-      _sts.join();
-      if (!cm()->has_aborted()) {
-        g1_policy->record_concurrent_mark_cleanup_completed();
+      {
+        SuspendibleThreadSetJoiner sts;
+        if (!cm()->has_aborted()) {
+          g1_policy->record_concurrent_mark_cleanup_completed();
+        }
       }
-      _sts.leave();
 
       if (cm()->has_aborted()) {
         if (G1Log::fine()) {
-          gclog_or_tty->date_stamp(PrintGCDateStamps);
-          gclog_or_tty->stamp(PrintGCTimeStamps);
+          gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
           gclog_or_tty->print_cr("[GC concurrent-mark-abort]");
         }
       }
 
       // We now want to allow clearing of the marking bitmap to be
       // suspended by a collection pause.
-      _sts.join();
-      _cm->clearNextBitmap();
-      _sts.leave();
+      // We may have aborted just before the remark. Do not bother clearing the
+      // bitmap then, as it has been done during mark abort.
+      if (!cm()->has_aborted()) {
+        SuspendibleThreadSetJoiner sts;
+        _cm->clearNextBitmap();
+      } else {
+        assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
+      }
     }
 
     // Update the number of full collections that have been
     // completed. This will also notify the FullGCCount_lock in case a
     // Java thread is waiting for a full GC to happen (e.g., it
     // called System.gc() with +ExplicitGCInvokesConcurrent).
-    _sts.join();
-    g1h->increment_old_marking_cycles_completed(true /* concurrent */);
-    g1h->register_concurrent_cycle_end();
-    _sts.leave();
+    {
+      SuspendibleThreadSetJoiner sts;
+      g1h->increment_old_marking_cycles_completed(true /* concurrent */);
+      g1h->register_concurrent_cycle_end();
+    }
   }
   assert(_should_terminate, "just checking");
 
   terminate();
 }
 
-
-void ConcurrentMarkThread::yield() {
-  _sts.yield("Concurrent Mark");
-}
-
 void ConcurrentMarkThread::stop() {
   {
     MutexLockerEx ml(Terminator_lock);
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -89,9 +89,6 @@
   // that started() is set and set in_progress().
   bool during_cycle()      { return started() || in_progress(); }
 
-  // Yield for GC
-  void            yield();
-
   // shutdown
   void stop();
 };
--- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -70,7 +70,7 @@
 
 DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
   PtrQueueSet(notify_when_complete),
-  _closure(NULL),
+  _mut_process_closure(NULL),
   _shared_dirty_card_queue(this, true /*perm*/),
   _free_ids(NULL),
   _processed_buffers_mut(0), _processed_buffers_rs_thread(0)
@@ -83,10 +83,11 @@
   return (uint)os::processor_count();
 }
 
-void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
+void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl, Monitor* cbl_mon, Mutex* fl_lock,
                                    int process_completed_threshold,
                                    int max_completed_queue,
                                    Mutex* lock, PtrQueueSet* fl_owner) {
+  _mut_process_closure = cl;
   PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold,
                           max_completed_queue, fl_owner);
   set_buffer_size(G1UpdateBufferSize);
@@ -98,18 +99,15 @@
   t->dirty_card_queue().handle_zero_index();
 }
 
-void DirtyCardQueueSet::set_closure(CardTableEntryClosure* closure) {
-  _closure = closure;
-}
-
-void DirtyCardQueueSet::iterate_closure_all_threads(bool consume,
+void DirtyCardQueueSet::iterate_closure_all_threads(CardTableEntryClosure* cl,
+                                                    bool consume,
                                                     uint worker_i) {
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
   for(JavaThread* t = Threads::first(); t; t = t->next()) {
-    bool b = t->dirty_card_queue().apply_closure(_closure, consume);
+    bool b = t->dirty_card_queue().apply_closure(cl, consume);
     guarantee(b, "Should not be interrupted.");
   }
-  bool b = shared_dirty_card_queue()->apply_closure(_closure,
+  bool b = shared_dirty_card_queue()->apply_closure(cl,
                                                     consume,
                                                     worker_i);
   guarantee(b, "Should not be interrupted.");
@@ -143,7 +141,7 @@
 
   bool b = false;
   if (worker_i != UINT_MAX) {
-    b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0,
+    b = DirtyCardQueue::apply_closure_to_buffer(_mut_process_closure, buf, 0,
                                                 _sz, true, worker_i);
     if (b) Atomic::inc(&_processed_buffers_mut);
 
@@ -218,18 +216,11 @@
   return res;
 }
 
-bool DirtyCardQueueSet::apply_closure_to_completed_buffer(uint worker_i,
-                                                          int stop_at,
-                                                          bool during_pause) {
-  return apply_closure_to_completed_buffer(_closure, worker_i,
-                                           stop_at, during_pause);
-}
-
-void DirtyCardQueueSet::apply_closure_to_all_completed_buffers() {
+void DirtyCardQueueSet::apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
   BufferNode* nd = _completed_buffers_head;
   while (nd != NULL) {
     bool b =
-      DirtyCardQueue::apply_closure_to_buffer(_closure,
+      DirtyCardQueue::apply_closure_to_buffer(cl,
                                               BufferNode::make_buffer_from_node(nd),
                                               0, _sz, false);
     guarantee(b, "Should not stop early.");
@@ -237,6 +228,24 @@
   }
 }
 
+void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
+  BufferNode* nd = _cur_par_buffer_node;
+  while (nd != NULL) {
+    BufferNode* next = (BufferNode*)nd->next();
+    BufferNode* actual = (BufferNode*)Atomic::cmpxchg_ptr((void*)next, (volatile void*)&_cur_par_buffer_node, (void*)nd);
+    if (actual == nd) {
+      bool b =
+        DirtyCardQueue::apply_closure_to_buffer(cl,
+                                                BufferNode::make_buffer_from_node(actual),
+                                                0, _sz, false);
+      guarantee(b, "Should not stop early.");
+      nd = next;
+    } else {
+      nd = actual;
+    }
+  }
+}
+
 // Deallocates any completed log buffers
 void DirtyCardQueueSet::clear() {
   BufferNode* buffers_to_delete = NULL;
--- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,13 @@
     // active field set to true.
     PtrQueue(qset_, perm, true /* active */) { }
 
+  // Flush before destroying; queue may be used to capture pending work while
+  // doing something else, with auto-flush on completion.
+  ~DirtyCardQueue() { if (!is_permanent()) flush(); }
+
+  // Process queue entries and release resources.
+  void flush() { flush_impl(); }
+
   // Apply the closure to all elements, and reset the index to make the
   // buffer empty.  If a closure application returns "false", return
   // "false" immediately, halting the iteration.  If "consume" is true,
@@ -73,7 +80,8 @@
 
 
 class DirtyCardQueueSet: public PtrQueueSet {
-  CardTableEntryClosure* _closure;
+  // The closure used in mut_process_buffer().
+  CardTableEntryClosure* _mut_process_closure;
 
   DirtyCardQueue _shared_dirty_card_queue;
 
@@ -88,10 +96,12 @@
   jint _processed_buffers_mut;
   jint _processed_buffers_rs_thread;
 
+  // Current buffer node used for parallel iteration.
+  BufferNode* volatile _cur_par_buffer_node;
 public:
   DirtyCardQueueSet(bool notify_when_complete = true);
 
-  void initialize(Monitor* cbl_mon, Mutex* fl_lock,
+  void initialize(CardTableEntryClosure* cl, Monitor* cbl_mon, Mutex* fl_lock,
                   int process_completed_threshold,
                   int max_completed_queue,
                   Mutex* lock, PtrQueueSet* fl_owner = NULL);
@@ -102,33 +112,15 @@
 
   static void handle_zero_index_for_thread(JavaThread* t);
 
-  // Register "blk" as "the closure" for all queues.  Only one such closure
-  // is allowed.  The "apply_closure_to_completed_buffer" method will apply
-  // this closure to a completed buffer, and "iterate_closure_all_threads"
-  // applies it to partially-filled buffers (the latter should only be done
-  // with the world stopped).
-  void set_closure(CardTableEntryClosure* closure);
-
-  // If there is a registered closure for buffers, apply it to all entries
-  // in all currently-active buffers.  This should only be applied at a
-  // safepoint.  (Currently must not be called in parallel; this should
-  // change in the future.)  If "consume" is true, processed entries are
-  // discarded.
-  void iterate_closure_all_threads(bool consume = true,
+  // Apply the given closure to all entries in all currently-active buffers.
+  // This should only be applied at a safepoint. (Currently must not be called
+  // in parallel; this should change in the future.)  If "consume" is true,
+  // processed entries are discarded.
+  void iterate_closure_all_threads(CardTableEntryClosure* cl,
+                                   bool consume = true,
                                    uint worker_i = 0);
 
   // If there exists some completed buffer, pop it, then apply the
-  // registered closure to all its elements, nulling out those elements
-  // processed.  If all elements are processed, returns "true".  If no
-  // completed buffers exist, returns false.  If a completed buffer exists,
-  // but is only partially completed before a "yield" happens, the
-  // partially completed buffer (with its processed elements set to NULL)
-  // is returned to the completed buffer set, and this call returns false.
-  bool apply_closure_to_completed_buffer(uint worker_i = 0,
-                                         int stop_at = 0,
-                                         bool during_pause = false);
-
-  // If there exists some completed buffer, pop it, then apply the
   // specified closure to all its elements, nulling out those elements
   // processed.  If all elements are processed, returns "true".  If no
   // completed buffers exist, returns false.  If a completed buffer exists,
@@ -149,7 +141,12 @@
 
   // Applies the current closure to all completed buffers,
   // non-consumptively.
-  void apply_closure_to_all_completed_buffers();
+  void apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl);
+
+  void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; }
+  // Applies the current closure to all completed buffers, non-consumptively.
+  // Parallel version.
+  void par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl);
 
   DirtyCardQueue* shared_dirty_card_queue() {
     return &_shared_dirty_card_queue;
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 
 G1CollectedHeap* G1AllocRegion::_g1h = NULL;
 HeapRegion* G1AllocRegion::_dummy_region = NULL;
@@ -128,8 +129,7 @@
     // Note that we first perform the allocation and then we store the
     // region in _alloc_region. This is the reason why an active region
     // can never be empty.
-    _alloc_region = new_alloc_region;
-    _count += 1;
+    update_alloc_region(new_alloc_region);
     trace("region allocation successful");
     return result;
   } else {
@@ -171,6 +171,19 @@
   trace("set");
 }
 
+void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
+  trace("update");
+  // We explicitly check that the region is not empty to make sure we
+  // maintain the "the alloc region cannot be empty" invariant.
+  assert(alloc_region != NULL && !alloc_region->is_empty(),
+         ar_ext_msg(this, "pre-condition"));
+
+  _alloc_region = alloc_region;
+  _alloc_region->set_allocation_context(allocation_context());
+  _count += 1;
+  trace("updated");
+}
+
 HeapRegion* G1AllocRegion::release() {
   trace("releasing");
   HeapRegion* alloc_region = _alloc_region;
@@ -224,5 +237,70 @@
 G1AllocRegion::G1AllocRegion(const char* name,
                              bool bot_updates)
   : _name(name), _bot_updates(bot_updates),
-    _alloc_region(NULL), _count(0), _used_bytes_before(0) { }
+    _alloc_region(NULL), _count(0), _used_bytes_before(0),
+    _allocation_context(AllocationContext::system()) { }
+
+
+HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
+                                                    bool force) {
+  return _g1h->new_mutator_alloc_region(word_size, force);
+}
+
+void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
+                                       size_t allocated_bytes) {
+  _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
+}
+
+HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
+                                                       bool force) {
+  assert(!force, "not supported for GC alloc regions");
+  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
+}
+
+void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
+                                          size_t allocated_bytes) {
+  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
+                               GCAllocForSurvived);
+}
+
+HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
+                                                  bool force) {
+  assert(!force, "not supported for GC alloc regions");
+  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
+}
 
+void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
+                                     size_t allocated_bytes) {
+  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
+                               GCAllocForTenured);
+}
+
+HeapRegion* OldGCAllocRegion::release() {
+  HeapRegion* cur = get();
+  if (cur != NULL) {
+    // Determine how far we are from the next card boundary. If it is smaller than
+    // the minimum object size we can allocate into, expand into the next card.
+    HeapWord* top = cur->top();
+    HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
+
+    size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
+
+    if (to_allocate_words != 0) {
+      // We are not at a card boundary. Fill up, possibly into the next, taking the
+      // end of the region and the minimum object size into account.
+      to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
+                               MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
+
+      // Skip allocation if there is not enough space to allocate even the smallest
+      // possible object. In this case this region will not be retained, so the
+      // original problem cannot occur.
+      if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
+        HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
+        CollectedHeap::fill_with_object(dummy, to_allocate_words);
+      }
+    }
+  }
+  return G1AllocRegion::release();
+}
+
+
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -57,6 +57,9 @@
   // correct use of init() and release()).
   HeapRegion* volatile _alloc_region;
 
+  // Allocation context associated with this alloc region.
+  AllocationContext_t _allocation_context;
+
   // It keeps track of the distinct number of regions that are used
   // for allocation in the active interval of this object, i.e.,
   // between a call to init() and a call to release(). The count
@@ -110,6 +113,10 @@
   // else can allocate out of it.
   void retire(bool fill_up);
 
+  // After a region is allocated by alloc_new_region, this
+  // method is used to set it as the active alloc_region
+  void update_alloc_region(HeapRegion* alloc_region);
+
   // Allocate a new active region and use it to perform a word_size
   // allocation. The force parameter will be passed on to
   // G1CollectedHeap::allocate_new_alloc_region() and tells it to try
@@ -137,6 +144,9 @@
     return (hr == _dummy_region) ? NULL : hr;
   }
 
+  void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
+  AllocationContext_t  allocation_context() { return _allocation_context; }
+
   uint count() { return _count; }
 
   // The following two are the building blocks for the allocation method.
@@ -173,16 +183,7 @@
 
   // Should be called when we want to release the active region which
   // is returned after it's been retired.
-  HeapRegion* release();
-#ifdef GRAAL
-  HeapWord** top_addr()  const {
-    return _alloc_region->top_addr();
-  }
-
-  HeapWord** end_addr()  const {
-    return _alloc_region->end_addr();
-  }
-#endif
+  virtual HeapRegion* release();
 
 #if G1_ALLOC_REGION_TRACING
   void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL);
@@ -191,6 +192,40 @@
 #endif // G1_ALLOC_REGION_TRACING
 };
 
+class MutatorAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  MutatorAllocRegion()
+    : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
+};
+
+class SurvivorGCAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  SurvivorGCAllocRegion()
+  : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
+};
+
+class OldGCAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  OldGCAllocRegion()
+  : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
+
+  // This specialization of release() makes sure that the last card that has
+  // been allocated into has been completely filled by a dummy object.  This
+  // avoids races when remembered set scanning wants to update the BOT of the
+  // last card in the retained old gc alloc region, and allocation threads
+  // allocating into that card at the same time.
+  virtual HeapRegion* release();
+};
+
 class ar_ext_msg : public err_msg {
 public:
   ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("%s", "") {
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
 
 #include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/g1/heapRegion.inline.hpp"
 
 inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region,
                                          size_t word_size,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
+
+#include "memory/allocation.hpp"
+
+typedef unsigned char AllocationContext_t;
+
+class AllocationContext : AllStatic {
+public:
+  // Currently used context
+  static AllocationContext_t current() {
+    return 0;
+  }
+  // System wide default context
+  static AllocationContext_t system() {
+    return 0;
+  }
+};
+
+class AllocationContextStats: public StackObj {
+public:
+  inline void clear() { }
+  inline void update(bool full_gc) { }
+  inline void update_after_mark() { }
+  inline bool available() { return false; }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1Allocator.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/heapRegion.inline.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
+
+void G1DefaultAllocator::init_mutator_alloc_region() {
+  assert(_mutator_alloc_region.get() == NULL, "pre-condition");
+  _mutator_alloc_region.init();
+}
+
+void G1DefaultAllocator::release_mutator_alloc_region() {
+  _mutator_alloc_region.release();
+  assert(_mutator_alloc_region.get() == NULL, "post-condition");
+}
+
+void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
+                                            OldGCAllocRegion* old,
+                                            HeapRegion** retained_old) {
+  HeapRegion* retained_region = *retained_old;
+  *retained_old = NULL;
+
+  // We will discard the current GC alloc region if:
+  // a) it's in the collection set (it can happen!),
+  // b) it's already full (no point in using it),
+  // c) it's empty (this means that it was emptied during
+  // a cleanup and it should be on the free list now), or
+  // d) it's humongous (this means that it was emptied
+  // during a cleanup and was added to the free list, but
+  // has been subsequently used to allocate a humongous
+  // object that may be less than the region size).
+  if (retained_region != NULL &&
+      !retained_region->in_collection_set() &&
+      !(retained_region->top() == retained_region->end()) &&
+      !retained_region->is_empty() &&
+      !retained_region->isHumongous()) {
+    retained_region->record_top_and_timestamp();
+    // The retained region was added to the old region set when it was
+    // retired. We have to remove it now, since we don't allow regions
+    // we allocate to in the region sets. We'll re-add it later, when
+    // it's retired again.
+    _g1h->_old_set.remove(retained_region);
+    bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
+    retained_region->note_start_of_copying(during_im);
+    old->set(retained_region);
+    _g1h->_hr_printer.reuse(retained_region);
+    evacuation_info.set_alloc_regions_used_before(retained_region->used());
+  }
+}
+
+void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
+  assert_at_safepoint(true /* should_be_vm_thread */);
+
+  _survivor_gc_alloc_region.init();
+  _old_gc_alloc_region.init();
+  reuse_retained_old_region(evacuation_info,
+                            &_old_gc_alloc_region,
+                            &_retained_old_gc_alloc_region);
+}
+
+void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
+  AllocationContext_t context = AllocationContext::current();
+  evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
+                                         old_gc_alloc_region(context)->count());
+  survivor_gc_alloc_region(context)->release();
+  // If we have an old GC alloc region to release, we'll save it in
+  // _retained_old_gc_alloc_region. If we don't
+  // _retained_old_gc_alloc_region will become NULL. This is what we
+  // want either way so no reason to check explicitly for either
+  // condition.
+  _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
+
+  if (ResizePLAB) {
+    _g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
+    _g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
+  }
+}
+
+void G1DefaultAllocator::abandon_gc_alloc_regions() {
+  assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
+  assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
+  _retained_old_gc_alloc_region = NULL;
+}
+
+G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
+  ParGCAllocBuffer(gclab_word_size), _retired(true) { }
+
+HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
+  HeapWord* obj = NULL;
+  size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
+  if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
+    G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
+    add_to_alloc_buffer_waste(alloc_buf->words_remaining());
+    alloc_buf->retire(false /* end_of_gc */, false /* retain */);
+
+    HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
+    if (buf == NULL) {
+      return NULL; // Let caller handle allocation failure.
+    }
+    // Otherwise.
+    alloc_buf->set_word_size(gclab_word_size);
+    alloc_buf->set_buf(buf);
+
+    obj = alloc_buf->allocate(word_sz);
+    assert(obj != NULL, "buffer was definitely big enough...");
+  } else {
+    obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
+  }
+  return obj;
+}
+
+G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
+            G1ParGCAllocator(g1h),
+            _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
+            _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
+
+  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
+  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
+
+}
+
+void G1DefaultParGCAllocator::retire_alloc_buffers() {
+  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
+    size_t waste = _alloc_buffers[ap]->words_remaining();
+    add_to_alloc_buffer_waste(waste);
+    _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
+                                               true /* end_of_gc */,
+                                               false /* retain */);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1Allocator.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
+
+#include "gc_implementation/g1/g1AllocationContext.hpp"
+#include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+
+enum GCAllocPurpose {
+  GCAllocForTenured,
+  GCAllocForSurvived,
+  GCAllocPurposeCount
+};
+
+// Base class for G1 allocators.
+class G1Allocator : public CHeapObj<mtGC> {
+  friend class VMStructs;
+protected:
+  G1CollectedHeap* _g1h;
+
+  // Outside of GC pauses, the number of bytes used in all regions other
+  // than the current allocation region.
+  size_t _summary_bytes_used;
+
+public:
+   G1Allocator(G1CollectedHeap* heap) :
+     _g1h(heap), _summary_bytes_used(0) { }
+
+   static G1Allocator* create_allocator(G1CollectedHeap* g1h);
+
+   virtual void init_mutator_alloc_region() = 0;
+   virtual void release_mutator_alloc_region() = 0;
+
+   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
+   virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
+   virtual void abandon_gc_alloc_regions() = 0;
+
+   virtual MutatorAllocRegion*    mutator_alloc_region(AllocationContext_t context) = 0;
+   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
+   virtual OldGCAllocRegion*      old_gc_alloc_region(AllocationContext_t context) = 0;
+   virtual size_t                 used() = 0;
+   virtual bool                   is_retained_old_region(HeapRegion* hr) = 0;
+
+   void                           reuse_retained_old_region(EvacuationInfo& evacuation_info,
+                                                            OldGCAllocRegion* old,
+                                                            HeapRegion** retained);
+
+   size_t used_unlocked() const {
+     return _summary_bytes_used;
+   }
+
+   void increase_used(size_t bytes) {
+     _summary_bytes_used += bytes;
+   }
+
+   void decrease_used(size_t bytes) {
+     assert(_summary_bytes_used >= bytes,
+            err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
+                _summary_bytes_used, bytes));
+     _summary_bytes_used -= bytes;
+   }
+
+   void set_used(size_t bytes) {
+     _summary_bytes_used = bytes;
+   }
+
+   virtual HeapRegion* new_heap_region(uint hrs_index,
+                                       G1BlockOffsetSharedArray* sharedOffsetArray,
+                                       MemRegion mr) {
+     return new HeapRegion(hrs_index, sharedOffsetArray, mr);
+   }
+};
+
+// The default allocator for G1.
+class G1DefaultAllocator : public G1Allocator {
+protected:
+  // Alloc region used to satisfy mutator allocation requests.
+  MutatorAllocRegion _mutator_alloc_region;
+
+  // Alloc region used to satisfy allocation requests by the GC for
+  // survivor objects.
+  SurvivorGCAllocRegion _survivor_gc_alloc_region;
+
+  // Alloc region used to satisfy allocation requests by the GC for
+  // old objects.
+  OldGCAllocRegion _old_gc_alloc_region;
+
+  HeapRegion* _retained_old_gc_alloc_region;
+public:
+  G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
+
+  virtual void init_mutator_alloc_region();
+  virtual void release_mutator_alloc_region();
+
+  virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
+  virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
+  virtual void abandon_gc_alloc_regions();
+
+  virtual bool is_retained_old_region(HeapRegion* hr) {
+    return _retained_old_gc_alloc_region == hr;
+  }
+
+  virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
+    return &_mutator_alloc_region;
+  }
+
+  virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
+    return &_survivor_gc_alloc_region;
+  }
+
+  virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
+    return &_old_gc_alloc_region;
+  }
+
+  virtual size_t used() {
+    assert(Heap_lock->owner() != NULL,
+           "Should be owned on this thread's behalf.");
+    size_t result = _summary_bytes_used;
+
+    // Read only once in case it is set to NULL concurrently
+    HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
+    if (hr != NULL) {
+      result += hr->used();
+    }
+    return result;
+  }
+};
+
+class G1ParGCAllocBuffer: public ParGCAllocBuffer {
+private:
+  bool _retired;
+
+public:
+  G1ParGCAllocBuffer(size_t gclab_word_size);
+  virtual ~G1ParGCAllocBuffer() {
+    guarantee(_retired, "Allocation buffer has not been retired");
+  }
+
+  virtual void set_buf(HeapWord* buf) {
+    ParGCAllocBuffer::set_buf(buf);
+    _retired = false;
+  }
+
+  virtual void retire(bool end_of_gc, bool retain) {
+    if (_retired) {
+      return;
+    }
+    ParGCAllocBuffer::retire(end_of_gc, retain);
+    _retired = true;
+  }
+};
+
+class G1ParGCAllocator : public CHeapObj<mtGC> {
+  friend class G1ParScanThreadState;
+protected:
+  G1CollectedHeap* _g1h;
+
+  size_t _alloc_buffer_waste;
+  size_t _undo_waste;
+
+  void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
+  void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
+
+  HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
+
+  virtual void retire_alloc_buffers() = 0;
+  virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
+
+public:
+  G1ParGCAllocator(G1CollectedHeap* g1h) :
+    _g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
+  }
+
+  static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
+
+  size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
+  size_t undo_waste() {return _undo_waste; }
+
+  HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
+    HeapWord* obj = NULL;
+    if (purpose == GCAllocForSurvived) {
+      obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
+    } else {
+      obj = alloc_buffer(purpose, context)->allocate(word_sz);
+    }
+    if (obj != NULL) {
+      return obj;
+    }
+    return allocate_slow(purpose, word_sz, context);
+  }
+
+  void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
+    if (alloc_buffer(purpose, context)->contains(obj)) {
+      assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
+             "should contain whole object");
+      alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
+    } else {
+      CollectedHeap::fill_with_object(obj, word_sz);
+      add_to_undo_waste(word_sz);
+    }
+  }
+};
+
+class G1DefaultParGCAllocator : public G1ParGCAllocator {
+  G1ParGCAllocBuffer  _surviving_alloc_buffer;
+  G1ParGCAllocBuffer  _tenured_alloc_buffer;
+  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+
+public:
+  G1DefaultParGCAllocator(G1CollectedHeap* g1h);
+
+  virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
+    return _alloc_buffers[purpose];
+  }
+
+  virtual void retire_alloc_buffers() ;
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1Allocator_ext.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+
+G1Allocator* G1Allocator::create_allocator(G1CollectedHeap* g1h) {
+  return new G1DefaultAllocator(g1h);
+}
+
+G1ParGCAllocator* G1ParGCAllocator::create_allocator(G1CollectedHeap* g1h) {
+  return new G1DefaultParGCAllocator(g1h);
+}
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
 #include "memory/space.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
@@ -35,60 +36,26 @@
 // G1BlockOffsetSharedArray
 //////////////////////////////////////////////////////////////////////
 
-G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
-                                                   size_t init_word_size) :
-  _reserved(reserved), _end(NULL)
-{
-  size_t size = compute_size(reserved.word_size());
-  ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
-  if (!rs.is_reserved()) {
-    vm_exit_during_initialization("Could not reserve enough space for heap offset array");
-  }
-  if (!_vs.initialize(rs, 0)) {
-    vm_exit_during_initialization("Could not reserve enough space for heap offset array");
-  }
+G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) :
+  _reserved(), _end(NULL), _listener(), _offset_array(NULL) {
+
+  _reserved = heap;
+  _end = NULL;
 
-  MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
+  MemRegion bot_reserved = storage->reserved();
 
-  _offset_array = (u_char*)_vs.low_boundary();
-  resize(init_word_size);
+  _offset_array = (u_char*)bot_reserved.start();
+  _end = _reserved.end();
+
+  storage->set_mapping_changed_listener(&_listener);
+
   if (TraceBlockOffsetTable) {
     gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
     gclog_or_tty->print_cr("  "
                   "  rs.base(): " INTPTR_FORMAT
                   "  rs.size(): " INTPTR_FORMAT
                   "  rs end(): " INTPTR_FORMAT,
-                  rs.base(), rs.size(), rs.base() + rs.size());
-    gclog_or_tty->print_cr("  "
-                  "  _vs.low_boundary(): " INTPTR_FORMAT
-                  "  _vs.high_boundary(): " INTPTR_FORMAT,
-                  _vs.low_boundary(),
-                  _vs.high_boundary());
-  }
-}
-
-void G1BlockOffsetSharedArray::resize(size_t new_word_size) {
-  assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
-  size_t new_size = compute_size(new_word_size);
-  size_t old_size = _vs.committed_size();
-  size_t delta;
-  char* high = _vs.high();
-  _end = _reserved.start() + new_word_size;
-  if (new_size > old_size) {
-    delta = ReservedSpace::page_align_size_up(new_size - old_size);
-    assert(delta > 0, "just checking");
-    if (!_vs.expand_by(delta)) {
-      // Do better than this for Merlin
-      vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion");
-    }
-    assert(_vs.high() == high + delta, "invalid expansion");
-    // Initialization of the contents is left to the
-    // G1BlockOffsetArray that uses it.
-  } else {
-    delta = ReservedSpace::page_align_size_down(old_size - new_size);
-    if (delta == 0) return;
-    _vs.shrink_by(delta);
-    assert(_vs.high() == high - delta, "invalid expansion");
+                  bot_reserved.start(), bot_reserved.byte_size(), bot_reserved.end());
   }
 }
 
@@ -98,28 +65,20 @@
   return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
 }
 
-
 //////////////////////////////////////////////////////////////////////
 // G1BlockOffsetArray
 //////////////////////////////////////////////////////////////////////
 
 G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
-                                       MemRegion mr, bool init_to_zero) :
+                                       MemRegion mr) :
   G1BlockOffsetTable(mr.start(), mr.end()),
   _unallocated_block(_bottom),
-  _array(array), _csp(NULL),
-  _init_to_zero(init_to_zero) {
+  _array(array), _gsp(NULL) {
   assert(_bottom <= _end, "arguments out of order");
-  if (!_init_to_zero) {
-    // initialize cards to point back to mr.start()
-    set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
-    _array->set_offset_array(0, 0);  // set first card to 0
-  }
 }
 
-void G1BlockOffsetArray::set_space(Space* sp) {
-  _sp = sp;
-  _csp = sp->toContiguousSpace();
+void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) {
+  _gsp = sp;
 }
 
 // The arguments follow the normal convention of denoting
@@ -205,93 +164,6 @@
   DEBUG_ONLY(check_all_cards(start_card, end_card);)
 }
 
-// The block [blk_start, blk_end) has been allocated;
-// adjust the block offset table to represent this information;
-// right-open interval: [blk_start, blk_end)
-void
-G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
-  mark_block(blk_start, blk_end);
-  allocated(blk_start, blk_end);
-}
-
-// Adjust BOT to show that a previously whole block has been split
-// into two.
-void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
-                                     size_t left_blk_size) {
-  // Verify that the BOT shows [blk, blk + blk_size) to be one block.
-  verify_single_block(blk, blk_size);
-  // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
-  // is one single block.
-  mark_block(blk + left_blk_size, blk + blk_size);
-}
-
-
-// Action_mark - update the BOT for the block [blk_start, blk_end).
-//               Current typical use is for splitting a block.
-// Action_single - update the BOT for an allocation.
-// Action_verify - BOT verification.
-void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
-                                           HeapWord* blk_end,
-                                           Action action) {
-  assert(Universe::heap()->is_in_reserved(blk_start),
-         "reference must be into the heap");
-  assert(Universe::heap()->is_in_reserved(blk_end-1),
-         "limit must be within the heap");
-  // This is optimized to make the test fast, assuming we only rarely
-  // cross boundaries.
-  uintptr_t end_ui = (uintptr_t)(blk_end - 1);
-  uintptr_t start_ui = (uintptr_t)blk_start;
-  // Calculate the last card boundary preceding end of blk
-  intptr_t boundary_before_end = (intptr_t)end_ui;
-  clear_bits(boundary_before_end, right_n_bits(LogN));
-  if (start_ui <= (uintptr_t)boundary_before_end) {
-    // blk starts at or crosses a boundary
-    // Calculate index of card on which blk begins
-    size_t    start_index = _array->index_for(blk_start);
-    // Index of card on which blk ends
-    size_t    end_index   = _array->index_for(blk_end - 1);
-    // Start address of card on which blk begins
-    HeapWord* boundary    = _array->address_for_index(start_index);
-    assert(boundary <= blk_start, "blk should start at or after boundary");
-    if (blk_start != boundary) {
-      // blk starts strictly after boundary
-      // adjust card boundary and start_index forward to next card
-      boundary += N_words;
-      start_index++;
-    }
-    assert(start_index <= end_index, "monotonicity of index_for()");
-    assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
-    switch (action) {
-      case Action_mark: {
-        if (init_to_zero()) {
-          _array->set_offset_array(start_index, boundary, blk_start);
-          break;
-        } // Else fall through to the next case
-      }
-      case Action_single: {
-        _array->set_offset_array(start_index, boundary, blk_start);
-        // We have finished marking the "offset card". We need to now
-        // mark the subsequent cards that this blk spans.
-        if (start_index < end_index) {
-          HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
-          HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
-          set_remainder_to_point_to_start(rem_st, rem_end);
-        }
-        break;
-      }
-      case Action_check: {
-        _array->check_offset_array(start_index, boundary, blk_start);
-        // We have finished checking the "offset card". We need to now
-        // check the subsequent cards that this blk spans.
-        check_all_cards(start_index + 1, end_index);
-        break;
-      }
-      default:
-        ShouldNotReachHere();
-    }
-  }
-}
-
 // The card-interval [start_card, end_card] is a closed interval; this
 // is an expensive check -- use with care and only under protection of
 // suitable flag.
@@ -330,25 +202,6 @@
   }
 }
 
-// The range [blk_start, blk_end) represents a single contiguous block
-// of storage; modify the block offset table to represent this
-// information; Right-open interval: [blk_start, blk_end)
-// NOTE: this method does _not_ adjust _unallocated_block.
-void
-G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) {
-  do_block_internal(blk_start, blk_end, Action_single);
-}
-
-// Mark the BOT such that if [blk_start, blk_end) straddles a card
-// boundary, the card following the first such boundary is marked
-// with the appropriate offset.
-// NOTE: this method does _not_ adjust _unallocated_block or
-// any cards subsequent to the first one.
-void
-G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
-  do_block_internal(blk_start, blk_end, Action_mark);
-}
-
 HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
   assert(_bottom <= addr && addr < _end,
          "addr must be covered by this Array");
@@ -378,7 +231,7 @@
   }
   // Otherwise, find the block start using the table.
   HeapWord* q = block_at_or_preceding(addr, false, 0);
-  HeapWord* n = q + _sp->block_size(q);
+  HeapWord* n = q + block_size(q);
   return forward_to_block_containing_addr_const(q, n, addr);
 }
 
@@ -406,86 +259,28 @@
          err_msg("next_boundary is beyond the end of the covered region "
                  " next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
                  next_boundary, _array->_end));
-  if (csp() != NULL) {
-    if (addr >= csp()->top()) return csp()->top();
-    while (next_boundary < addr) {
-      while (n <= next_boundary) {
-        q = n;
-        oop obj = oop(q);
-        if (obj->klass_or_null() == NULL) return q;
-        n += obj->size();
-      }
-      assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
-      // [q, n) is the block that crosses the boundary.
-      alloc_block_work2(&next_boundary, &next_index, q, n);
+  if (addr >= gsp()->top()) return gsp()->top();
+  while (next_boundary < addr) {
+    while (n <= next_boundary) {
+      q = n;
+      oop obj = oop(q);
+      if (obj->klass_or_null() == NULL) return q;
+      n += block_size(q);
     }
-  } else {
-    while (next_boundary < addr) {
-      while (n <= next_boundary) {
-        q = n;
-        oop obj = oop(q);
-        if (obj->klass_or_null() == NULL) return q;
-        n += _sp->block_size(q);
-      }
-      assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
-      // [q, n) is the block that crosses the boundary.
-      alloc_block_work2(&next_boundary, &next_index, q, n);
-    }
+    assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
+    // [q, n) is the block that crosses the boundary.
+    alloc_block_work2(&next_boundary, &next_index, q, n);
   }
   return forward_to_block_containing_addr_const(q, n, addr);
 }
 
-HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
-  assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
-
-  assert(_bottom <= addr && addr < _end,
-         "addr must be covered by this Array");
-  // Must read this exactly once because it can be modified by parallel
-  // allocation.
-  HeapWord* ub = _unallocated_block;
-  if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
-    assert(ub < _end, "tautology (see above)");
-    return ub;
-  }
-
-  // Otherwise, find the block start using the table, but taking
-  // care (cf block_start_unsafe() above) not to parse any objects/blocks
-  // on the cards themsleves.
-  size_t index = _array->index_for(addr);
-  assert(_array->address_for_index(index) == addr,
-         "arg should be start of card");
-
-  HeapWord* q = (HeapWord*)addr;
-  uint offset;
-  do {
-    offset = _array->offset_array(index--);
-    q -= offset;
-  } while (offset == N_words);
-  assert(q <= addr, "block start should be to left of arg");
-  return q;
-}
-
 // Note that the committed size of the covered space may have changed,
 // so the table size might also wish to change.
 void G1BlockOffsetArray::resize(size_t new_word_size) {
   HeapWord* new_end = _bottom + new_word_size;
-  if (_end < new_end && !init_to_zero()) {
-    // verify that the old and new boundaries are also card boundaries
-    assert(_array->is_card_boundary(_end),
-           "_end not a card boundary");
-    assert(_array->is_card_boundary(new_end),
-           "new _end would not be a card boundary");
-    // set all the newly added cards
-    _array->set_offset_array(_end, new_end, N_words);
-  }
   _end = new_end;  // update _end
 }
 
-void G1BlockOffsetArray::set_region(MemRegion mr) {
-  _bottom = mr.start();
-  _end = mr.end();
-}
-
 //
 //              threshold_
 //              |   _index_
@@ -638,19 +433,38 @@
   assert(_bottom <= addr && addr < _end,
          "addr must be covered by this Array");
   HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
-  HeapWord* n = q + _sp->block_size(q);
+  HeapWord* n = q + block_size(q);
   return forward_to_block_containing_addr_const(q, n, addr);
 }
 
 G1BlockOffsetArrayContigSpace::
 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
                               MemRegion mr) :
-  G1BlockOffsetArray(array, mr, true)
+  G1BlockOffsetArray(array, mr)
 {
   _next_offset_threshold = NULL;
   _next_offset_index = 0;
 }
 
+HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
+  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
+         "just checking");
+  _next_offset_index = _array->index_for_raw(_bottom);
+  _next_offset_index++;
+  _next_offset_threshold =
+    _array->address_for_index_raw(_next_offset_index);
+  return _next_offset_threshold;
+}
+
+void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
+  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
+         "just checking");
+  size_t bottom_index = _array->index_for_raw(_bottom);
+  assert(_array->address_for_index_raw(bottom_index) == _bottom,
+         "Precondition of call");
+  _array->set_offset_array_raw(bottom_index, 0);
+}
+
 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
          "just checking");
@@ -661,22 +475,12 @@
   return _next_offset_threshold;
 }
 
-void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
-  assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
-         "just checking");
-  size_t bottom_index = _array->index_for(_bottom);
-  assert(_array->address_for_index(bottom_index) == _bottom,
-         "Precondition of call");
-  _array->set_offset_array(bottom_index, 0);
-}
-
 void
 G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
   assert(new_top <= _end, "_end should have already been updated");
 
   // The first BOT entry should have offset 0.
-  zero_bottom_entry();
-  initialize_threshold();
+  reset_bot();
   alloc_block(_bottom, new_top);
  }
 
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
 
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "memory/memRegion.hpp"
 #include "runtime/virtualspace.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -52,8 +53,8 @@
 // consolidation.
 
 // Forward declarations
-class ContiguousSpace;
 class G1BlockOffsetSharedArray;
+class G1OffsetTableContigSpace;
 
 class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
   friend class VMStructs;
@@ -106,6 +107,16 @@
   inline HeapWord* block_start_const(const void* addr) const;
 };
 
+class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
+ public:
+  virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
+    // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
+    // retrieve it here since this would cause firing of several asserts. The code
+    // executed after commit of a region already needs to do some re-initialization of
+    // the HeapRegion, so we combine that.
+  }
+};
+
 // This implementation of "G1BlockOffsetTable" divides the covered region
 // into "N"-word subregions (where "N" = 2^"LogN".  An array with an entry
 // for each such subregion indicates how far back one must go to find the
@@ -125,6 +136,7 @@
   friend class VMStructs;
 
 private:
+  G1BlockOffsetSharedArrayMappingChangedListener _listener;
   // The reserved region covered by the shared array.
   MemRegion _reserved;
 
@@ -133,16 +145,8 @@
 
   // Array for keeping offsets for retrieving object start fast given an
   // address.
-  VirtualSpace _vs;
   u_char* _offset_array;          // byte array keeping backwards offsets
 
-  void check_index(size_t index, const char* msg) const {
-    assert(index < _vs.committed_size(),
-           err_msg("%s - "
-                   "index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
-                   msg, index, _vs.committed_size()));
-  }
-
   void check_offset(size_t offset, const char* msg) const {
     assert(offset <= N_words,
            err_msg("%s - "
@@ -152,76 +156,29 @@
 
   // Bounds checking accessors:
   // For performance these have to devolve to array accesses in product builds.
-  u_char offset_array(size_t index) const {
-    check_index(index, "index out of range");
-    return _offset_array[index];
-  }
+  inline u_char offset_array(size_t index) const;
 
-  void set_offset_array(size_t index, u_char offset) {
-    check_index(index, "index out of range");
-    check_offset(offset, "offset too large");
+  void set_offset_array_raw(size_t index, u_char offset) {
     _offset_array[index] = offset;
   }
 
-  void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
-    check_index(index, "index out of range");
-    assert(high >= low, "addresses out of order");
-    check_offset(pointer_delta(high, low), "offset too large");
-    _offset_array[index] = (u_char) pointer_delta(high, low);
-  }
-
-  void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
-    check_index(index_for(right - 1), "right address out of range");
-    assert(left  < right, "Heap addresses out of order");
-    size_t num_cards = pointer_delta(right, left) >> LogN_words;
-    if (UseMemSetInBOT) {
-      memset(&_offset_array[index_for(left)], offset, num_cards);
-    } else {
-      size_t i = index_for(left);
-      const size_t end = i + num_cards;
-      for (; i < end; i++) {
-        _offset_array[i] = offset;
-      }
-    }
-  }
+  inline void set_offset_array(size_t index, u_char offset);
 
-  void set_offset_array(size_t left, size_t right, u_char offset) {
-    check_index(right, "right index out of range");
-    assert(left <= right, "indexes out of order");
-    size_t num_cards = right - left + 1;
-    if (UseMemSetInBOT) {
-      memset(&_offset_array[left], offset, num_cards);
-    } else {
-      size_t i = left;
-      const size_t end = i + num_cards;
-      for (; i < end; i++) {
-        _offset_array[i] = offset;
-      }
-    }
-  }
+  inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low);
 
-  void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
-    check_index(index, "index out of range");
-    assert(high >= low, "addresses out of order");
-    check_offset(pointer_delta(high, low), "offset too large");
-    assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
-  }
+  inline void set_offset_array(size_t left, size_t right, u_char offset);
 
   bool is_card_boundary(HeapWord* p) const;
 
+public:
+
   // Return the number of slots needed for an offset array
   // that covers mem_region_words words.
-  // We always add an extra slot because if an object
-  // ends on a card boundary we put a 0 in the next
-  // offset array slot, so we want that slot always
-  // to be reserved.
-
-  size_t compute_size(size_t mem_region_words) {
-    size_t number_of_slots = (mem_region_words / N_words) + 1;
-    return ReservedSpace::page_align_size_up(number_of_slots);
+  static size_t compute_size(size_t mem_region_words) {
+    size_t number_of_slots = (mem_region_words / N_words);
+    return ReservedSpace::allocation_align_size_up(number_of_slots);
   }
 
-public:
   enum SomePublicConstants {
     LogN = 9,
     LogN_words = LogN - LogHeapWordSize,
@@ -235,25 +192,19 @@
   // least "init_word_size".) The contents of the initial table are
   // undefined; it is the responsibility of the constituent
   // G1BlockOffsetTable(s) to initialize cards.
-  G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
-
-  // Notes a change in the committed size of the region covered by the
-  // table.  The "new_word_size" may not be larger than the size of the
-  // reserved region this table covers.
-  void resize(size_t new_word_size);
-
-  void set_bottom(HeapWord* new_bottom);
-
-  // Updates all the BlockOffsetArray's sharing this shared array to
-  // reflect the current "top"'s of their spaces.
-  void update_offset_arrays();
+  G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
 
   // Return the appropriate index into "_offset_array" for "p".
   inline size_t index_for(const void* p) const;
+  inline size_t index_for_raw(const void* p) const;
 
   // Return the address indicating the start of the region corresponding to
   // "index" in "_offset_array".
   inline HeapWord* address_for_index(size_t index) const;
+  // Variant of address_for_index that does not check the index for validity.
+  inline HeapWord* address_for_index_raw(size_t index) const {
+    return _reserved.start() + (index << LogN_words);
+  }
 };
 
 // And here is the G1BlockOffsetTable subtype that uses the array.
@@ -268,28 +219,12 @@
     LogN    = G1BlockOffsetSharedArray::LogN
   };
 
-  // The following enums are used by do_block_helper
-  enum Action {
-    Action_single,      // BOT records a single block (see single_block())
-    Action_mark,        // BOT marks the start of a block (see mark_block())
-    Action_check        // Check that BOT records block correctly
-                        // (see verify_single_block()).
-  };
-
   // This is the array, which can be shared by several BlockOffsetArray's
   // servicing different
   G1BlockOffsetSharedArray* _array;
 
   // The space that owns this subregion.
-  Space* _sp;
-
-  // If "_sp" is a contiguous space, the field below is the view of "_sp"
-  // as a contiguous space, else NULL.
-  ContiguousSpace* _csp;
-
-  // If true, array entries are initialized to 0; otherwise, they are
-  // initialized to point backwards to the beginning of the covered region.
-  bool _init_to_zero;
+  G1OffsetTableContigSpace* _gsp;
 
   // The portion [_unallocated_block, _sp.end()) of the space that
   // is a single block known not to contain any objects.
@@ -305,12 +240,11 @@
   // that is closed: [start_index, end_index]
   void set_remainder_to_point_to_start_incl(size_t start, size_t end);
 
-  // A helper function for BOT adjustment/verification work
-  void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action);
-
 protected:
 
-  ContiguousSpace* csp() const { return _csp; }
+  G1OffsetTableContigSpace* gsp() const { return _gsp; }
+
+  inline size_t block_size(const HeapWord* p) const;
 
   // Returns the address of a block whose start is at most "addr".
   // If "has_max_index" is true, "assumes "max_index" is the last valid one
@@ -353,126 +287,29 @@
 
 public:
   // The space may not have it's bottom and top set yet, which is why the
-  // region is passed as a parameter.  If "init_to_zero" is true, the
-  // elements of the array are initialized to zero.  Otherwise, they are
-  // initialized to point backwards to the beginning.
-  G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr,
-                     bool init_to_zero);
+  // region is passed as a parameter. The elements of the array are
+  // initialized to zero.
+  G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr);
 
   // Note: this ought to be part of the constructor, but that would require
   // "this" to be passed as a parameter to a member constructor for
   // the containing concrete subtype of Space.
   // This would be legal C++, but MS VC++ doesn't allow it.
-  void set_space(Space* sp);
-
-  // Resets the covered region to the given "mr".
-  void set_region(MemRegion mr);
+  void set_space(G1OffsetTableContigSpace* sp);
 
   // Resets the covered region to one with the same _bottom as before but
   // the "new_word_size".
   void resize(size_t new_word_size);
 
-  // These must be guaranteed to work properly (i.e., do nothing)
-  // when "blk_start" ("blk" for second version) is "NULL".
-  virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
-  virtual void alloc_block(HeapWord* blk, size_t size) {
-    alloc_block(blk, blk + size);
-  }
-
-  // The following methods are useful and optimized for a
-  // general, non-contiguous space.
-
-  // Given a block [blk_start, blk_start + full_blk_size), and
-  // a left_blk_size < full_blk_size, adjust the BOT to show two
-  // blocks [blk_start, blk_start + left_blk_size) and
-  // [blk_start + left_blk_size, blk_start + full_blk_size).
-  // It is assumed (and verified in the non-product VM) that the
-  // BOT was correct for the original block.
-  void split_block(HeapWord* blk_start, size_t full_blk_size,
-                           size_t left_blk_size);
-
-  // Adjust the BOT to show that it has a single block in the
-  // range [blk_start, blk_start + size). All necessary BOT
-  // cards are adjusted, but _unallocated_block isn't.
-  void single_block(HeapWord* blk_start, HeapWord* blk_end);
-  void single_block(HeapWord* blk, size_t size) {
-    single_block(blk, blk + size);
-  }
-
-  // Adjust BOT to show that it has a block in the range
-  // [blk_start, blk_start + size). Only the first card
-  // of BOT is touched. It is assumed (and verified in the
-  // non-product VM) that the remaining cards of the block
-  // are correct.
-  void mark_block(HeapWord* blk_start, HeapWord* blk_end);
-  void mark_block(HeapWord* blk, size_t size) {
-    mark_block(blk, blk + size);
-  }
-
-  // Adjust _unallocated_block to indicate that a particular
-  // block has been newly allocated or freed. It is assumed (and
-  // verified in the non-product VM) that the BOT is correct for
-  // the given block.
-  inline void allocated(HeapWord* blk_start, HeapWord* blk_end) {
-    // Verify that the BOT shows [blk, blk + blk_size) to be one block.
-    verify_single_block(blk_start, blk_end);
-    if (BlockOffsetArrayUseUnallocatedBlock) {
-      _unallocated_block = MAX2(_unallocated_block, blk_end);
-    }
-  }
-
-  inline void allocated(HeapWord* blk, size_t size) {
-    allocated(blk, blk + size);
-  }
-
-  inline void freed(HeapWord* blk_start, HeapWord* blk_end);
-
-  inline void freed(HeapWord* blk, size_t size);
-
   virtual HeapWord* block_start_unsafe(const void* addr);
   virtual HeapWord* block_start_unsafe_const(const void* addr) const;
 
-  // Requires "addr" to be the start of a card and returns the
-  // start of the block that contains the given address.
-  HeapWord* block_start_careful(const void* addr) const;
-
-  // If true, initialize array slots with no allocated blocks to zero.
-  // Otherwise, make them point back to the front.
-  bool init_to_zero() { return _init_to_zero; }
-
-  // Verification & debugging - ensure that the offset table reflects the fact
-  // that the block [blk_start, blk_end) or [blk, blk + size) is a
-  // single block of storage. NOTE: can;t const this because of
-  // call to non-const do_block_internal() below.
-  inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) {
-    if (VerifyBlockOffsetArray) {
-      do_block_internal(blk_start, blk_end, Action_check);
-    }
-  }
-
-  inline void verify_single_block(HeapWord* blk, size_t size) {
-    verify_single_block(blk, blk + size);
-  }
-
   // Used by region verification. Checks that the contents of the
   // BOT reflect that there's a single object that spans the address
   // range [obj_start, obj_start + word_size); returns true if this is
   // the case, returns false if it's not.
   bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
 
-  // Verify that the given block is before _unallocated_block
-  inline void verify_not_unallocated(HeapWord* blk_start,
-                                     HeapWord* blk_end) const {
-    if (BlockOffsetArrayUseUnallocatedBlock) {
-      assert(blk_start < blk_end, "Block inconsistency?");
-      assert(blk_end <= _unallocated_block, "_unallocated_block problem");
-    }
-  }
-
-  inline void verify_not_unallocated(HeapWord* blk, size_t size) const {
-    verify_not_unallocated(blk, blk + size);
-  }
-
   void check_all_cards(size_t left_card, size_t right_card) const;
 
   virtual void print_on(outputStream* out) PRODUCT_RETURN;
@@ -495,6 +332,12 @@
                       blk_start, blk_end);
   }
 
+  // Zero out the entry for _bottom (offset will be zero). Does not check for availability of the
+  // memory first.
+  void zero_bottom_entry_raw();
+  // Variant of initialize_threshold that does not check for availability of the
+  // memory first.
+  HeapWord* initialize_threshold_raw();
  public:
   G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
 
@@ -502,8 +345,10 @@
   // bottom of the covered region.
   HeapWord* initialize_threshold();
 
-  // Zero out the entry for _bottom (offset will be zero).
-  void      zero_bottom_entry();
+  void reset_bot() {
+    zero_bottom_entry_raw();
+    initialize_threshold_raw();
+  }
 
   // Return the next threshold, the point at which the table should be
   // updated.
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,6 +26,8 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
 
 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/heapRegion.inline.hpp"
 #include "memory/space.hpp"
 
 inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
@@ -45,14 +47,62 @@
   }
 }
 
+#define check_index(index, msg)                                                \
+  assert((index) < (_reserved.word_size() >> LogN_words),                      \
+         err_msg("%s - index: "SIZE_FORMAT", _vs.committed_size: "SIZE_FORMAT, \
+                 msg, (index), (_reserved.word_size() >> LogN_words)));        \
+  assert(G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)),   \
+         err_msg("Index "SIZE_FORMAT" corresponding to "PTR_FORMAT             \
+                 " (%u) is not in committed area.",                            \
+                 (index),                                                      \
+                 p2i(address_for_index_raw(index)),                            \
+                 G1CollectedHeap::heap()->addr_to_region(address_for_index_raw(index))));
+
+u_char G1BlockOffsetSharedArray::offset_array(size_t index) const {
+  check_index(index, "index out of range");
+  return _offset_array[index];
+}
+
+void G1BlockOffsetSharedArray::set_offset_array(size_t index, u_char offset) {
+  check_index(index, "index out of range");
+  set_offset_array_raw(index, offset);
+}
+
+void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
+  check_index(index, "index out of range");
+  assert(high >= low, "addresses out of order");
+  size_t offset = pointer_delta(high, low);
+  check_offset(offset, "offset too large");
+  set_offset_array(index, (u_char)offset);
+}
+
+void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_char offset) {
+  check_index(right, "right index out of range");
+  assert(left <= right, "indexes out of order");
+  size_t num_cards = right - left + 1;
+  if (UseMemSetInBOT) {
+    memset(&_offset_array[left], offset, num_cards);
+  } else {
+    size_t i = left;
+    const size_t end = i + num_cards;
+    for (; i < end; i++) {
+      _offset_array[i] = offset;
+    }
+  }
+}
+
+// Variant of index_for that does not check the index for validity.
+inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const {
+  return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN;
+}
+
 inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
   char* pc = (char*)p;
   assert(pc >= (char*)_reserved.start() &&
          pc <  (char*)_reserved.end(),
          err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")",
                  p2i(p), p2i(_reserved.start()), p2i(_reserved.end())));
-  size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
-  size_t result = delta >> LogN;
+  size_t result = index_for_raw(p);
   check_index(result, "bad index from address");
   return result;
 }
@@ -60,7 +110,7 @@
 inline HeapWord*
 G1BlockOffsetSharedArray::address_for_index(size_t index) const {
   check_index(index, "index out of range");
-  HeapWord* result = _reserved.start() + (index << LogN_words);
+  HeapWord* result = address_for_index_raw(index);
   assert(result >= _reserved.start() && result < _reserved.end(),
          err_msg("bad address from index result " PTR_FORMAT
                  " _reserved.start() " PTR_FORMAT " _reserved.end() "
@@ -69,6 +119,13 @@
   return result;
 }
 
+#undef check_index
+
+inline size_t
+G1BlockOffsetArray::block_size(const HeapWord* p) const {
+  return gsp()->block_size(p);
+}
+
 inline HeapWord*
 G1BlockOffsetArray::block_at_or_preceding(const void* addr,
                                           bool has_max_index,
@@ -88,7 +145,7 @@
     // to go back by.
     size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset);
     q -= (N_words * n_cards_back);
-    assert(q >= _sp->bottom(), "Went below bottom!");
+    assert(q >= gsp()->bottom(), "Went below bottom!");
     index -= n_cards_back;
     offset = _array->offset_array(index);
   }
@@ -101,21 +158,12 @@
 G1BlockOffsetArray::
 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
                                        const void* addr) const {
-  if (csp() != NULL) {
-    if (addr >= csp()->top()) return csp()->top();
-    while (n <= addr) {
-      q = n;
-      oop obj = oop(q);
-      if (obj->klass_or_null() == NULL) return q;
-      n += obj->size();
-    }
-  } else {
-    while (n <= addr) {
-      q = n;
-      oop obj = oop(q);
-      if (obj->klass_or_null() == NULL) return q;
-      n += _sp->block_size(q);
-    }
+  if (addr >= gsp()->top()) return gsp()->top();
+  while (n <= addr) {
+    q = n;
+    oop obj = oop(q);
+    if (obj->klass_or_null() == NULL) return q;
+    n += block_size(q);
   }
   assert(q <= n, "wrong order for q and addr");
   assert(addr < n, "wrong order for addr and n");
@@ -126,7 +174,7 @@
 G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q,
                                                      const void* addr) {
   if (oop(q)->klass_or_null() == NULL) return q;
-  HeapWord* n = q + _sp->block_size(q);
+  HeapWord* n = q + block_size(q);
   // In the normal case, where the query "addr" is a card boundary, and the
   // offset table chunks are the same size as cards, the block starting at
   // "q" will contain addr, so the test below will fail, and we'll fall
@@ -138,28 +186,4 @@
   return q;
 }
 
-//////////////////////////////////////////////////////////////////////////
-// BlockOffsetArrayNonContigSpace inlines
-//////////////////////////////////////////////////////////////////////////
-inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) {
-  // Verify that the BOT shows [blk_start, blk_end) to be one block.
-  verify_single_block(blk_start, blk_end);
-  // adjust _unallocated_block upward or downward
-  // as appropriate
-  if (BlockOffsetArrayUseUnallocatedBlock) {
-    assert(_unallocated_block <= _end,
-           "Inconsistent value for _unallocated_block");
-    if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) {
-      // CMS-specific note: a block abutting _unallocated_block to
-      // its left is being freed, a new block is being added or
-      // we are resetting following a compaction
-      _unallocated_block = blk_start;
-    }
-  }
-}
-
-inline void G1BlockOffsetArray::freed(HeapWord* blk, size_t size) {
-  freed(blk, blk + size);
-}
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -33,31 +33,29 @@
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
+void G1CardCountsMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
+  if (zero_filled) {
+    return;
+  }
+  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
+  _counts->clear_range(mr);
+}
+
 void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
   if (has_count_table()) {
-    assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
-           err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
     assert(from_card_num < to_card_num,
            err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
                    from_card_num, to_card_num));
-    assert(to_card_num <= _committed_max_card_num,
-           err_msg("to card num out of range: "
-                   "to: "SIZE_FORMAT ", "
-                   "max: "SIZE_FORMAT,
-                   to_card_num, _committed_max_card_num));
-
-    to_card_num = MIN2(_committed_max_card_num, to_card_num);
-
     Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num));
   }
 }
 
 G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
-  _g1h(g1h), _card_counts(NULL),
-  _reserved_max_card_num(0), _committed_max_card_num(0),
-  _committed_size(0) {}
+  _listener(), _g1h(g1h), _card_counts(NULL), _reserved_max_card_num(0) {
+  _listener.set_cardcounts(this);
+}
 
-void G1CardCounts::initialize() {
+void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
   assert(_g1h->max_capacity() > 0, "initialization order");
   assert(_g1h->capacity() == 0, "initialization order");
 
@@ -70,70 +68,9 @@
     _ct_bs = _g1h->g1_barrier_set();
     _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
 
-    // Allocate/Reserve the counts table
-    size_t reserved_bytes = _g1h->max_capacity();
-    _reserved_max_card_num = reserved_bytes >> CardTableModRefBS::card_shift;
-
-    size_t reserved_size = _reserved_max_card_num * sizeof(jbyte);
-    ReservedSpace rs(ReservedSpace::allocation_align_size_up(reserved_size));
-    if (!rs.is_reserved()) {
-      warning("Could not reserve enough space for the card counts table");
-      guarantee(!has_reserved_count_table(), "should be NULL");
-      return;
-    }
-
-    MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
-
-    _card_counts_storage.initialize(rs, 0);
-    _card_counts = (jubyte*) _card_counts_storage.low();
-  }
-}
-
-void G1CardCounts::resize(size_t heap_capacity) {
-  // Expand the card counts table to handle a heap with the given capacity.
-
-  if (!has_reserved_count_table()) {
-    // Don't expand if we failed to reserve the card counts table.
-    return;
-  }
-
-  assert(_committed_size ==
-         ReservedSpace::allocation_align_size_up(_committed_size),
-         err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size));
-
-  // Verify that the committed space for the card counts matches our
-  // committed max card num. Note for some allocation alignments, the
-  // amount of space actually committed for the counts table will be able
-  // to span more cards than the number spanned by the maximum heap.
-  size_t prev_committed_size = _committed_size;
-  size_t prev_committed_card_num = committed_to_card_num(prev_committed_size);
-
-  assert(prev_committed_card_num == _committed_max_card_num,
-         err_msg("Card mismatch: "
-                 "prev: " SIZE_FORMAT ", "
-                 "committed: "SIZE_FORMAT", "
-                 "reserved: "SIZE_FORMAT,
-                 prev_committed_card_num, _committed_max_card_num, _reserved_max_card_num));
-
-  size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte);
-  size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size);
-  size_t new_committed_card_num = committed_to_card_num(new_committed_size);
-
-  if (_committed_max_card_num < new_committed_card_num) {
-    // we need to expand the backing store for the card counts
-    size_t expand_size = new_committed_size - prev_committed_size;
-
-    if (!_card_counts_storage.expand_by(expand_size)) {
-      warning("Card counts table backing store commit failure");
-      return;
-    }
-    assert(_card_counts_storage.committed_size() == new_committed_size,
-           "expansion commit failure");
-
-    _committed_size = new_committed_size;
-    _committed_max_card_num = new_committed_card_num;
-
-    clear_range(prev_committed_card_num, _committed_max_card_num);
+    _card_counts = (jubyte*) mapper->reserved().start();
+    _reserved_max_card_num = mapper->reserved().byte_size();
+    mapper->set_mapping_changed_listener(&_listener);
   }
 }
 
@@ -149,12 +86,13 @@
   uint count = 0;
   if (has_count_table()) {
     size_t card_num = ptr_2_card_num(card_ptr);
-    if (card_num < _committed_max_card_num) {
-      count = (uint) _card_counts[card_num];
-      if (count < G1ConcRSHotCardLimit) {
-        _card_counts[card_num] =
-          (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
-      }
+    assert(card_num < _reserved_max_card_num,
+           err_msg("Card "SIZE_FORMAT" outside of card counts table (max size "SIZE_FORMAT")",
+                   card_num, _reserved_max_card_num));
+    count = (uint) _card_counts[card_num];
+    if (count < G1ConcRSHotCardLimit) {
+      _card_counts[card_num] =
+        (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
     }
   }
   return count;
@@ -165,31 +103,23 @@
 }
 
 void G1CardCounts::clear_region(HeapRegion* hr) {
-  assert(!hr->isHumongous(), "Should have been cleared");
-  if (has_count_table()) {
-    HeapWord* bottom = hr->bottom();
+  MemRegion mr(hr->bottom(), hr->end());
+  clear_range(mr);
+}
 
-    // We use the last address in hr as hr could be the
-    // last region in the heap. In which case trying to find
-    // the card for hr->end() will be an OOB accesss to the
-    // card table.
-    HeapWord* last = hr->end() - 1;
-    assert(_g1h->g1_committed().contains(last),
-           err_msg("last not in committed: "
-                   "last: " PTR_FORMAT ", "
-                   "committed: [" PTR_FORMAT ", " PTR_FORMAT ")",
-                   last,
-                   _g1h->g1_committed().start(),
-                   _g1h->g1_committed().end()));
-
-    const jbyte* from_card_ptr = _ct_bs->byte_for_const(bottom);
-    const jbyte* last_card_ptr = _ct_bs->byte_for_const(last);
+void G1CardCounts::clear_range(MemRegion mr) {
+  if (has_count_table()) {
+    const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start());
+    // We use the last address in the range as the range could represent the
+    // last region in the heap. In which case trying to find the card will be an
+    // OOB access to the card table.
+    const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last());
 
 #ifdef ASSERT
     HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
-    assert(start_addr == hr->bottom(), "alignment");
+    assert(start_addr == mr.start(), "MemRegion start must be aligned to a card.");
     HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
-    assert((last_addr + CardTableModRefBS::card_size_in_words) == hr->end(), "alignment");
+    assert((last_addr + CardTableModRefBS::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
 #endif // ASSERT
 
     // Clear the counts for the (exclusive) card range.
@@ -199,14 +129,22 @@
   }
 }
 
+class G1CardCountsClearClosure : public HeapRegionClosure {
+ private:
+  G1CardCounts* _card_counts;
+ public:
+  G1CardCountsClearClosure(G1CardCounts* card_counts) :
+    HeapRegionClosure(), _card_counts(card_counts) { }
+
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    _card_counts->clear_region(r);
+    return false;
+  }
+};
+
 void G1CardCounts::clear_all() {
   assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise");
-  clear_range((size_t)0, _committed_max_card_num);
+  G1CardCountsClearClosure cl(this);
+  _g1h->heap_region_iterate(&cl);
 }
-
-G1CardCounts::~G1CardCounts() {
-  if (has_reserved_count_table()) {
-    _card_counts_storage.release();
-  }
-}
-
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,14 +25,26 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
 
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/virtualspace.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 class CardTableModRefBS;
+class G1CardCounts;
 class G1CollectedHeap;
+class G1RegionToSpaceMapper;
 class HeapRegion;
 
+class G1CardCountsMappingChangedListener : public G1MappingChangedListener {
+ private:
+  G1CardCounts* _counts;
+ public:
+  void set_cardcounts(G1CardCounts* counts) { _counts = counts; }
+
+  virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
+};
+
 // Table to track the number of times a card has been refined. Once
 // a card has been refined a certain number of times, it is
 // considered 'hot' and its refinement is delayed by inserting the
@@ -41,6 +53,8 @@
 // is 'drained' during the next evacuation pause.
 
 class G1CardCounts: public CHeapObj<mtGC> {
+  G1CardCountsMappingChangedListener _listener;
+
   G1CollectedHeap* _g1h;
 
   // The table of counts
@@ -49,27 +63,18 @@
   // Max capacity of the reserved space for the counts table
   size_t _reserved_max_card_num;
 
-  // Max capacity of the committed space for the counts table
-  size_t _committed_max_card_num;
-
-  // Size of committed space for the counts table
-  size_t _committed_size;
-
   // CardTable bottom.
   const jbyte* _ct_bot;
 
   // Barrier set
   CardTableModRefBS* _ct_bs;
 
-  // The virtual memory backing the counts table
-  VirtualSpace _card_counts_storage;
-
   // Returns true if the card counts table has been reserved.
   bool has_reserved_count_table() { return _card_counts != NULL; }
 
   // Returns true if the card counts table has been reserved and committed.
   bool has_count_table() {
-    return has_reserved_count_table() && _committed_max_card_num > 0;
+    return has_reserved_count_table();
   }
 
   size_t ptr_2_card_num(const jbyte* card_ptr) {
@@ -79,37 +84,24 @@
                    "_ct_bot: " PTR_FORMAT,
                    p2i(card_ptr), p2i(_ct_bot)));
     size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
-    assert(card_num >= 0 && card_num < _committed_max_card_num,
+    assert(card_num >= 0 && card_num < _reserved_max_card_num,
            err_msg("card pointer out of range: " PTR_FORMAT, p2i(card_ptr)));
     return card_num;
   }
 
   jbyte* card_num_2_ptr(size_t card_num) {
-    assert(card_num >= 0 && card_num < _committed_max_card_num,
+    assert(card_num >= 0 && card_num < _reserved_max_card_num,
            err_msg("card num out of range: "SIZE_FORMAT, card_num));
     return (jbyte*) (_ct_bot + card_num);
   }
 
-  // Helper routine.
-  // Returns the number of cards that can be counted by the given committed
-  // table size, with a maximum of the number of cards spanned by the max
-  // capacity of the heap.
-  size_t committed_to_card_num(size_t committed_size) {
-    return MIN2(_reserved_max_card_num, committed_size / sizeof(jbyte));
-  }
-
   // Clear the counts table for the given (exclusive) index range.
   void clear_range(size_t from_card_num, size_t to_card_num);
 
  public:
   G1CardCounts(G1CollectedHeap* g1h);
-  ~G1CardCounts();
 
-  void initialize();
-
-  // Resize the committed space for the card counts table in
-  // response to a resize of the committed space for the heap.
-  void resize(size_t heap_capacity);
+  void initialize(G1RegionToSpaceMapper* mapper);
 
   // Increments the refinement count for the given card.
   // Returns the pre-increment count value.
@@ -122,8 +114,10 @@
   // Clears the card counts for the cards spanned by the region
   void clear_region(HeapRegion* hr);
 
+  // Clears the card counts for the cards spanned by the MemRegion
+  void clear_range(MemRegion mr);
+
   // Clear the entire card counts table during GC.
-  // Updates the policy stats with the duration.
   void clear_all();
 };
 
--- a/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -22,298 +22,386 @@
  *
  */
 
-
 #include "precompiled.hpp"
+#include "code/codeCache.hpp"
 #include "code/nmethod.hpp"
 #include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "memory/heap.hpp"
 #include "memory/iterator.hpp"
+#include "oops/oop.inline.hpp"
+#include "utilities/hashtable.inline.hpp"
+#include "utilities/stack.inline.hpp"
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
-G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL) {
-  _top = bottom();
+class CodeRootSetTable : public Hashtable<nmethod*, mtGC> {
+  friend class G1CodeRootSetTest;
+  typedef HashtableEntry<nmethod*, mtGC> Entry;
+
+  static CodeRootSetTable* volatile _purge_list;
+
+  CodeRootSetTable* _purge_next;
+
+  unsigned int compute_hash(nmethod* nm) {
+    uintptr_t hash = (uintptr_t)nm;
+    return hash ^ (hash >> 7); // code heap blocks are 128byte aligned
+  }
+
+  void remove_entry(Entry* e, Entry* previous);
+  Entry* new_entry(nmethod* nm);
+
+ public:
+  CodeRootSetTable(int size) : Hashtable<nmethod*, mtGC>(size, sizeof(Entry)), _purge_next(NULL) {}
+  ~CodeRootSetTable();
+
+  // Needs to be protected locks
+  bool add(nmethod* nm);
+  bool remove(nmethod* nm);
+
+  // Can be called without locking
+  bool contains(nmethod* nm);
+
+  int entry_size() const { return BasicHashtable<mtGC>::entry_size(); }
+
+  void copy_to(CodeRootSetTable* new_table);
+  void nmethods_do(CodeBlobClosure* blk);
+
+  template<typename CB>
+  int remove_if(CB& should_remove);
+
+  static void purge_list_append(CodeRootSetTable* tbl);
+  static void purge();
+
+  static size_t static_mem_size() {
+    return sizeof(_purge_list);
+  }
+};
+
+CodeRootSetTable* volatile CodeRootSetTable::_purge_list = NULL;
+
+CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) {
+  unsigned int hash = compute_hash(nm);
+  Entry* entry = (Entry*) new_entry_free_list();
+  if (entry == NULL) {
+    entry = (Entry*) NEW_C_HEAP_ARRAY2(char, entry_size(), mtGC, CURRENT_PC);
+  }
+  entry->set_next(NULL);
+  entry->set_hash(hash);
+  entry->set_literal(nm);
+  return entry;
 }
 
-void G1CodeRootChunk::reset() {
-  _next = _prev = NULL;
-  _top = bottom();
+void CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
+  int index = hash_to_index(e->hash());
+  assert((e == bucket(index)) == (previous == NULL), "if e is the first entry then previous should be null");
+
+  if (previous == NULL) {
+    set_entry(index, e->next());
+  } else {
+    previous->set_next(e->next());
+  }
+  free_entry(e);
 }
 
-void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) {
-  nmethod** cur = bottom();
-  while (cur != _top) {
-    cl->do_code_blob(*cur);
-    cur++;
+CodeRootSetTable::~CodeRootSetTable() {
+  for (int index = 0; index < table_size(); ++index) {
+    for (Entry* e = bucket(index); e != NULL; ) {
+      Entry* to_remove = e;
+      // read next before freeing.
+      e = e->next();
+      unlink_entry(to_remove);
+      FREE_C_HEAP_ARRAY(char, to_remove, mtGC);
+    }
+  }
+  assert(number_of_entries() == 0, "should have removed all entries");
+  free_buckets();
+  for (BasicHashtableEntry<mtGC>* e = new_entry_free_list(); e != NULL; e = new_entry_free_list()) {
+    FREE_C_HEAP_ARRAY(char, e, mtGC);
   }
 }
 
-FreeList<G1CodeRootChunk> G1CodeRootSet::_free_list;
-size_t G1CodeRootSet::_num_chunks_handed_out = 0;
+bool CodeRootSetTable::add(nmethod* nm) {
+  if (!contains(nm)) {
+    Entry* e = new_entry(nm);
+    int index = hash_to_index(e->hash());
+    add_entry(index, e);
+    return true;
+  }
+  return false;
+}
 
-G1CodeRootChunk* G1CodeRootSet::new_chunk() {
-  G1CodeRootChunk* result = _free_list.get_chunk_at_head();
-  if (result == NULL) {
-    result = new G1CodeRootChunk();
+bool CodeRootSetTable::contains(nmethod* nm) {
+  int index = hash_to_index(compute_hash(nm));
+  for (Entry* e = bucket(index); e != NULL; e = e->next()) {
+    if (e->literal() == nm) {
+      return true;
+    }
   }
-  G1CodeRootSet::_num_chunks_handed_out++;
-  result->reset();
-  return result;
+  return false;
 }
 
-void G1CodeRootSet::free_chunk(G1CodeRootChunk* chunk) {
-  _free_list.return_chunk_at_head(chunk);
-  G1CodeRootSet::_num_chunks_handed_out--;
+bool CodeRootSetTable::remove(nmethod* nm) {
+  int index = hash_to_index(compute_hash(nm));
+  Entry* previous = NULL;
+  for (Entry* e = bucket(index); e != NULL; previous = e, e = e->next()) {
+    if (e->literal() == nm) {
+      remove_entry(e, previous);
+      return true;
+    }
+  }
+  return false;
+}
+
+void CodeRootSetTable::copy_to(CodeRootSetTable* new_table) {
+  for (int index = 0; index < table_size(); ++index) {
+    for (Entry* e = bucket(index); e != NULL; e = e->next()) {
+      new_table->add(e->literal());
+    }
+  }
+  new_table->copy_freelist(this);
+}
+
+void CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {
+  for (int index = 0; index < table_size(); ++index) {
+    for (Entry* e = bucket(index); e != NULL; e = e->next()) {
+      blk->do_code_blob(e->literal());
+    }
+  }
 }
 
-void G1CodeRootSet::free_all_chunks(FreeList<G1CodeRootChunk>* list) {
-  G1CodeRootSet::_num_chunks_handed_out -= list->count();
-  _free_list.prepend(list);
+template<typename CB>
+int CodeRootSetTable::remove_if(CB& should_remove) {
+  int num_removed = 0;
+  for (int index = 0; index < table_size(); ++index) {
+    Entry* previous = NULL;
+    Entry* e = bucket(index);
+    while (e != NULL) {
+      Entry* next = e->next();
+      if (should_remove(e->literal())) {
+        remove_entry(e, previous);
+        ++num_removed;
+      } else {
+        previous = e;
+      }
+      e = next;
+    }
+  }
+  return num_removed;
+}
+
+G1CodeRootSet::~G1CodeRootSet() {
+  delete _table;
+}
+
+CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
+  return (CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table);
+}
+
+void G1CodeRootSet::allocate_small_table() {
+  _table = new CodeRootSetTable(SmallSize);
 }
 
-void G1CodeRootSet::purge_chunks(size_t keep_ratio) {
-  size_t keep = G1CodeRootSet::_num_chunks_handed_out * keep_ratio / 100;
-
-  if (keep >= (size_t)_free_list.count()) {
-    return;
+void CodeRootSetTable::purge_list_append(CodeRootSetTable* table) {
+  for (;;) {
+    table->_purge_next = _purge_list;
+    CodeRootSetTable* old = (CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next);
+    if (old == table->_purge_next) {
+      break;
+    }
   }
+}
 
-  FreeList<G1CodeRootChunk> temp;
-  temp.initialize();
-  temp.set_size(G1CodeRootChunk::word_size());
-
-  _free_list.getFirstNChunksFromList((size_t)_free_list.count() - keep, &temp);
+void CodeRootSetTable::purge() {
+  CodeRootSetTable* table = _purge_list;
+  _purge_list = NULL;
+  while (table != NULL) {
+    CodeRootSetTable* to_purge = table;
+    table = table->_purge_next;
+    delete to_purge;
+  }
+}
 
-  G1CodeRootChunk* cur = temp.get_chunk_at_head();
-  while (cur != NULL) {
-    delete cur;
-    cur = temp.get_chunk_at_head();
-  }
+void G1CodeRootSet::move_to_large() {
+  CodeRootSetTable* temp = new CodeRootSetTable(LargeSize);
+
+  _table->copy_to(temp);
+
+  CodeRootSetTable::purge_list_append(_table);
+
+  OrderAccess::release_store_ptr(&_table, temp);
+}
+
+
+void G1CodeRootSet::purge() {
+  CodeRootSetTable::purge();
 }
 
 size_t G1CodeRootSet::static_mem_size() {
-  return sizeof(_free_list) + sizeof(_num_chunks_handed_out);
-}
-
-size_t G1CodeRootSet::fl_mem_size() {
-  return _free_list.count() * _free_list.size();
-}
-
-void G1CodeRootSet::initialize() {
-  _free_list.initialize();
-  _free_list.set_size(G1CodeRootChunk::word_size());
-}
-
-G1CodeRootSet::G1CodeRootSet() : _list(), _length(0) {
-  _list.initialize();
-  _list.set_size(G1CodeRootChunk::word_size());
-}
-
-G1CodeRootSet::~G1CodeRootSet() {
-  clear();
+  return CodeRootSetTable::static_mem_size();
 }
 
 void G1CodeRootSet::add(nmethod* method) {
-  if (!contains(method)) {
-    // Try to add the nmethod. If there is not enough space, get a new chunk.
-    if (_list.head() == NULL || _list.head()->is_full()) {
-      G1CodeRootChunk* cur = new_chunk();
-      _list.return_chunk_at_head(cur);
-    }
-    bool result = _list.head()->add(method);
-    guarantee(result, err_msg("Not able to add nmethod "PTR_FORMAT" to newly allocated chunk.", method));
-    _length++;
+  bool added = false;
+  if (is_empty()) {
+    allocate_small_table();
+  }
+  added = _table->add(method);
+  if (_length == Threshold) {
+    move_to_large();
+  }
+  if (added) {
+    ++_length;
   }
 }
 
-void G1CodeRootSet::remove(nmethod* method) {
-  G1CodeRootChunk* found = find(method);
-  if (found != NULL) {
-    bool result = found->remove(method);
-    guarantee(result, err_msg("could not find nmethod "PTR_FORMAT" during removal although we previously found it", method));
-    // eventually free completely emptied chunk
-    if (found->is_empty()) {
-      _list.remove_chunk(found);
-      free(found);
-    }
+bool G1CodeRootSet::remove(nmethod* method) {
+  bool removed = false;
+  if (_table != NULL) {
+    removed = _table->remove(method);
+  }
+  if (removed) {
     _length--;
-  }
-  assert(!contains(method), err_msg(PTR_FORMAT" still contains nmethod "PTR_FORMAT, this, method));
-}
-
-nmethod* G1CodeRootSet::pop() {
-  do {
-    G1CodeRootChunk* cur = _list.head();
-    if (cur == NULL) {
-      assert(_length == 0, "when there are no chunks, there should be no elements");
-      return NULL;
+    if (_length == 0) {
+      clear();
     }
-    nmethod* result = cur->pop();
-    if (result != NULL) {
-      _length--;
-      return result;
-    } else {
-      free(_list.get_chunk_at_head());
-    }
-  } while (true);
-}
-
-G1CodeRootChunk* G1CodeRootSet::find(nmethod* method) {
-  G1CodeRootChunk* cur = _list.head();
-  while (cur != NULL) {
-    if (cur->contains(method)) {
-      return cur;
-    }
-    cur = (G1CodeRootChunk*)cur->next();
   }
-  return NULL;
-}
-
-void G1CodeRootSet::free(G1CodeRootChunk* chunk) {
-  free_chunk(chunk);
+  return removed;
 }
 
 bool G1CodeRootSet::contains(nmethod* method) {
-  return find(method) != NULL;
+  CodeRootSetTable* table = load_acquire_table();
+  if (table != NULL) {
+    return table->contains(method);
+  }
+  return false;
 }
 
 void G1CodeRootSet::clear() {
-  free_all_chunks(&_list);
+  delete _table;
+  _table = NULL;
   _length = 0;
 }
 
+size_t G1CodeRootSet::mem_size() {
+  return sizeof(*this) +
+      (_table != NULL ? sizeof(CodeRootSetTable) + _table->entry_size() * _length : 0);
+}
+
 void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
-  G1CodeRootChunk* cur = _list.head();
-  while (cur != NULL) {
-    cur->nmethods_do(blk);
-    cur = (G1CodeRootChunk*)cur->next();
+  if (_table != NULL) {
+    _table->nmethods_do(blk);
   }
 }
 
-size_t G1CodeRootSet::mem_size() {
-  return sizeof(this) + _list.count() * _list.size();
+class CleanCallback : public StackObj {
+  class PointsIntoHRDetectionClosure : public OopClosure {
+    HeapRegion* _hr;
+   public:
+    bool _points_into;
+    PointsIntoHRDetectionClosure(HeapRegion* hr) : _hr(hr), _points_into(false) {}
+
+    void do_oop(narrowOop* o) {
+      do_oop_work(o);
+    }
+
+    void do_oop(oop* o) {
+      do_oop_work(o);
+    }
+
+    template <typename T>
+    void do_oop_work(T* p) {
+      if (_hr->is_in(oopDesc::load_decode_heap_oop(p))) {
+        _points_into = true;
+      }
+    }
+  };
+
+  PointsIntoHRDetectionClosure _detector;
+  CodeBlobToOopClosure _blobs;
+
+ public:
+  CleanCallback(HeapRegion* hr) : _detector(hr), _blobs(&_detector, !CodeBlobToOopClosure::FixRelocations) {}
+
+  bool operator() (nmethod* nm) {
+    _detector._points_into = false;
+    _blobs.do_code_blob(nm);
+    return !_detector._points_into;
+  }
+};
+
+void G1CodeRootSet::clean(HeapRegion* owner) {
+  CleanCallback should_clean(owner);
+  if (_table != NULL) {
+    int removed = _table->remove_if(should_clean);
+    assert((size_t)removed <= _length, "impossible");
+    _length -= removed;
+  }
+  if (_length == 0) {
+    clear();
+  }
 }
 
 #ifndef PRODUCT
 
-void G1CodeRootSet::test() {
-  initialize();
-
-  assert(_free_list.count() == 0, "Free List must be empty");
-  assert(_num_chunks_handed_out == 0, "No elements must have been handed out yet");
+class G1CodeRootSetTest {
+ public:
+  static void test() {
+    {
+      G1CodeRootSet set1;
+      assert(set1.is_empty(), "Code root set must be initially empty but is not.");
 
-  // The number of chunks that we allocate for purge testing.
-  size_t const num_chunks = 10;
-  {
-    G1CodeRootSet set1;
-    assert(set1.is_empty(), "Code root set must be initially empty but is not.");
+      assert(G1CodeRootSet::static_mem_size() == sizeof(void*),
+          err_msg("The code root set's static memory usage is incorrect, "SIZE_FORMAT" bytes", G1CodeRootSet::static_mem_size()));
 
-    set1.add((nmethod*)1);
-    assert(_num_chunks_handed_out == 1,
-           err_msg("Must have allocated and handed out one chunk, but handed out "
-                   SIZE_FORMAT" chunks", _num_chunks_handed_out));
-    assert(set1.length() == 1, err_msg("Added exactly one element, but set contains "
-                                       SIZE_FORMAT" elements", set1.length()));
+      set1.add((nmethod*)1);
+      assert(set1.length() == 1, err_msg("Added exactly one element, but set contains "
+          SIZE_FORMAT" elements", set1.length()));
+
+      const size_t num_to_add = (size_t)G1CodeRootSet::Threshold + 1;
 
-    // G1CodeRootChunk::word_size() is larger than G1CodeRootChunk::num_entries which
-    // we cannot access.
-    for (uint i = 0; i < G1CodeRootChunk::word_size() + 1; i++) {
-      set1.add((nmethod*)1);
-    }
-    assert(_num_chunks_handed_out == 1,
-           err_msg("Duplicate detection must have prevented allocation of further "
-                   "chunks but contains "SIZE_FORMAT, _num_chunks_handed_out));
-    assert(set1.length() == 1,
-           err_msg("Duplicate detection should not have increased the set size but "
-                   "is "SIZE_FORMAT, set1.length()));
+      for (size_t i = 1; i <= num_to_add; i++) {
+        set1.add((nmethod*)1);
+      }
+      assert(set1.length() == 1,
+          err_msg("Duplicate detection should not have increased the set size but "
+              "is "SIZE_FORMAT, set1.length()));
 
-    size_t num_total_after_add = G1CodeRootChunk::word_size() + 1;
-    for (size_t i = 0; i < num_total_after_add - 1; i++) {
-      set1.add((nmethod*)(2 + i));
-    }
-    assert(_num_chunks_handed_out > 1,
-           "After adding more code roots, more than one chunks should have been handed out");
-    assert(set1.length() == num_total_after_add,
-           err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they "
-                   "need to be in the set, but there are only "SIZE_FORMAT,
-                   num_total_after_add, set1.length()));
+      for (size_t i = 2; i <= num_to_add; i++) {
+        set1.add((nmethod*)(uintptr_t)(i));
+      }
+      assert(set1.length() == num_to_add,
+          err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they "
+              "need to be in the set, but there are only "SIZE_FORMAT,
+              num_to_add, set1.length()));
+
+      assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
 
-    size_t num_popped = 0;
-    while (set1.pop() != NULL) {
-      num_popped++;
-    }
-    assert(num_popped == num_total_after_add,
-           err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" "
-                   "were added", num_popped, num_total_after_add));
-    assert(_num_chunks_handed_out == 0,
-           err_msg("After popping all elements, all chunks must have been returned "
-                   "but are still "SIZE_FORMAT, _num_chunks_handed_out));
+      size_t num_popped = 0;
+      for (size_t i = 1; i <= num_to_add; i++) {
+        bool removed = set1.remove((nmethod*)i);
+        if (removed) {
+          num_popped += 1;
+        } else {
+          break;
+        }
+      }
+      assert(num_popped == num_to_add,
+          err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" "
+              "were added", num_popped, num_to_add));
+      assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable");
 
-    purge_chunks(0);
-    assert(_free_list.count() == 0,
-           err_msg("After purging everything, the free list must be empty but still "
-                   "contains "SIZE_FORMAT" chunks", _free_list.count()));
+      G1CodeRootSet::purge();
 
-    // Add some more handed out chunks.
-    size_t i = 0;
-    while (_num_chunks_handed_out < num_chunks) {
-      set1.add((nmethod*)i);
-      i++;
+      assert(CodeRootSetTable::_purge_list == NULL, "should have purged old small tables");
+
     }
 
-    {
-      // Generate chunks on the free list.
-      G1CodeRootSet set2;
-      size_t i = 0;
-      while (_num_chunks_handed_out < num_chunks * 2) {
-        set2.add((nmethod*)i);
-        i++;
-      }
-      // Exit of the scope of the set2 object will call the destructor that generates
-      // num_chunks elements on the free list.
-    }
-
-    assert(_num_chunks_handed_out == num_chunks,
-           err_msg("Deletion of the second set must have resulted in giving back "
-                   "those, but there is still "SIZE_FORMAT" handed out, expecting "
-                   SIZE_FORMAT, _num_chunks_handed_out, num_chunks));
-    assert((size_t)_free_list.count() == num_chunks,
-           err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list "
-                   "but there are only "SIZE_FORMAT, num_chunks, _free_list.count()));
+  }
+};
 
-    size_t const test_percentage = 50;
-    purge_chunks(test_percentage);
-    assert(_num_chunks_handed_out == num_chunks,
-           err_msg("Purging must not hand out chunks but there are "SIZE_FORMAT,
-                   _num_chunks_handed_out));
-    assert((size_t)_free_list.count() == (ssize_t)(num_chunks * test_percentage / 100),
-           err_msg("Must have purged "SIZE_FORMAT" percent of "SIZE_FORMAT" chunks"
-                   "but there are "SSIZE_FORMAT, test_percentage, num_chunks,
-                   _free_list.count()));
-    // Purge the remainder of the chunks on the free list.
-    purge_chunks(0);
-    assert(_free_list.count() == 0, "Free List must be empty");
-    assert(_num_chunks_handed_out == num_chunks,
-           err_msg("Expected to be "SIZE_FORMAT" chunks handed out from the first set "
-                   "but there are "SIZE_FORMAT, num_chunks, _num_chunks_handed_out));
-
-    // Exit of the scope of the set1 object will call the destructor that generates
-    // num_chunks additional elements on the free list.
-  }
-
-  assert(_num_chunks_handed_out == 0,
-         err_msg("Deletion of the only set must have resulted in no chunks handed "
-                 "out, but there is still "SIZE_FORMAT" handed out", _num_chunks_handed_out));
-  assert((size_t)_free_list.count() == num_chunks,
-         err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list "
-                 "but there are only "SSIZE_FORMAT, num_chunks, _free_list.count()));
-
-  // Restore initial state.
-  purge_chunks(0);
-  assert(_free_list.count() == 0, "Free List must be empty");
-  assert(_num_chunks_handed_out == 0, "No elements must have been handed out yet");
+void TestCodeCacheRemSet_test() {
+  G1CodeRootSetTest::test();
 }
 
-void TestCodeCacheRemSet_test() {
-  G1CodeRootSet::test();
-}
 #endif
--- a/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,155 +26,57 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
 
 #include "memory/allocation.hpp"
-#include "memory/freeList.hpp"
-#include "runtime/globals.hpp"
 
 class CodeBlobClosure;
-
-class G1CodeRootChunk : public CHeapObj<mtGC> {
- private:
-  static const int NUM_ENTRIES = 32;
- public:
-  G1CodeRootChunk*     _next;
-  G1CodeRootChunk*     _prev;
-
-  nmethod** _top;
-
-  nmethod* _data[NUM_ENTRIES];
-
-  nmethod** bottom() const {
-    return (nmethod**) &(_data[0]);
-  }
-
-  nmethod** end() const {
-    return (nmethod**) &(_data[NUM_ENTRIES]);
-  }
-
- public:
-  G1CodeRootChunk();
-  ~G1CodeRootChunk() {}
-
-  static size_t word_size() { return (size_t)(align_size_up_(sizeof(G1CodeRootChunk), HeapWordSize) / HeapWordSize); }
-
-  // FreeList "interface" methods
-
-  G1CodeRootChunk* next() const         { return _next; }
-  G1CodeRootChunk* prev() const         { return _prev; }
-  void set_next(G1CodeRootChunk* v)     { _next = v; assert(v != this, "Boom");}
-  void set_prev(G1CodeRootChunk* v)     { _prev = v; assert(v != this, "Boom");}
-  void clear_next()       { set_next(NULL); }
-  void clear_prev()       { set_prev(NULL); }
-
-  size_t size() const { return word_size(); }
-
-  void link_next(G1CodeRootChunk* ptr)  { set_next(ptr); }
-  void link_prev(G1CodeRootChunk* ptr)  { set_prev(ptr); }
-  void link_after(G1CodeRootChunk* ptr) {
-    link_next(ptr);
-    if (ptr != NULL) ptr->link_prev((G1CodeRootChunk*)this);
-  }
-
-  bool is_free()                 { return true; }
-
-  // New G1CodeRootChunk routines
-
-  void reset();
-
-  bool is_empty() const {
-    return _top == bottom();
-  }
-
-  bool is_full() const {
-    return _top == (nmethod**)end();
-  }
-
-  bool contains(nmethod* method) {
-    nmethod** cur = bottom();
-    while (cur != _top) {
-      if (*cur == method) return true;
-      cur++;
-    }
-    return false;
-  }
-
-  bool add(nmethod* method) {
-    if (is_full()) return false;
-    *_top = method;
-    _top++;
-    return true;
-  }
-
-  bool remove(nmethod* method) {
-    nmethod** cur = bottom();
-    while (cur != _top) {
-      if (*cur == method) {
-        memmove(cur, cur + 1, (_top - (cur + 1)) * sizeof(nmethod**));
-        _top--;
-        return true;
-      }
-      cur++;
-    }
-    return false;
-  }
-
-  void nmethods_do(CodeBlobClosure* blk);
-
-  nmethod* pop() {
-    if (is_empty()) {
-      return NULL;
-    }
-    _top--;
-    return *_top;
-  }
-};
+class CodeRootSetTable;
+class HeapRegion;
+class nmethod;
 
 // Implements storage for a set of code roots.
 // All methods that modify the set are not thread-safe except if otherwise noted.
 class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
+  friend class G1CodeRootSetTest;
  private:
-  // Global free chunk list management
-  static FreeList<G1CodeRootChunk> _free_list;
-  // Total number of chunks handed out
-  static size_t _num_chunks_handed_out;
 
-  static G1CodeRootChunk* new_chunk();
-  static void free_chunk(G1CodeRootChunk* chunk);
-  // Free all elements of the given list.
-  static void free_all_chunks(FreeList<G1CodeRootChunk>* list);
+  const static size_t SmallSize = 32;
+  const static size_t Threshold = 24;
+  const static size_t LargeSize = 512;
 
-  // Return the chunk that contains the given nmethod, NULL otherwise.
-  // Scans the list of chunks backwards, as this method is used to add new
-  // entries, which are typically added in bulk for a single nmethod.
-  G1CodeRootChunk* find(nmethod* method);
-  void free(G1CodeRootChunk* chunk);
+  CodeRootSetTable* _table;
+  CodeRootSetTable* load_acquire_table();
 
   size_t _length;
-  FreeList<G1CodeRootChunk> _list;
+
+  void move_to_large();
+  void allocate_small_table();
 
  public:
-  G1CodeRootSet();
+  G1CodeRootSet() : _table(NULL), _length(0) {}
   ~G1CodeRootSet();
 
-  static void initialize();
-  static void purge_chunks(size_t keep_ratio);
+  static void purge();
 
   static size_t static_mem_size();
-  static size_t fl_mem_size();
 
-  // Search for the code blob from the recently allocated ones to find duplicates more quickly, as this
-  // method is likely to be repeatedly called with the same nmethod.
   void add(nmethod* method);
 
-  void remove(nmethod* method);
-  nmethod* pop();
+  bool remove(nmethod* method);
 
+  // Safe to call without synchronization, but may return false negatives.
   bool contains(nmethod* method);
 
   void clear();
 
   void nmethods_do(CodeBlobClosure* blk) const;
 
-  bool is_empty() { return length() == 0; }
+  // Remove all nmethods which no longer contain pointers into our "owner" region
+  void clean(HeapRegion* owner);
+
+  bool is_empty() {
+    bool empty = length() == 0;
+    assert(empty == (_table == NULL), "is empty only if table is deallocated");
+    return empty;
+  }
 
   // Length in elements
   size_t length() const { return _length; }
@@ -182,7 +84,6 @@
   // Memory size in bytes taken by this set.
   size_t mem_size();
 
-  static void test() PRODUCT_RETURN;
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -27,6 +27,7 @@
 #endif
 
 #include "precompiled.hpp"
+#include "classfile/metadataOnStackMark.hpp"
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "gc_implementation/g1/bufferingOopClosure.hpp"
@@ -42,26 +43,29 @@
 #include "gc_implementation/g1/g1Log.hpp"
 #include "gc_implementation/g1/g1MarkSweep.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
+#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/g1StringDedup.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
 #include "gc_implementation/g1/heapRegion.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
 #include "gc_implementation/g1/vm_operations_g1.hpp"
 #include "gc_implementation/shared/gcHeapSummary.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/isGCActiveMark.hpp"
+#include "memory/allocation.hpp"
 #include "memory/gcLocker.inline.hpp"
 #include "memory/generationSpec.hpp"
 #include "memory/iterator.hpp"
 #include "memory/referenceProcessor.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
-#include "utilities/ticks.hpp"
 
 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
 
@@ -86,66 +90,64 @@
 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
 // The number of GC workers is passed to heap_region_par_iterate_chunked().
 // It does use run_task() which sets _n_workers in the task.
-// G1ParTask executes g1_process_strong_roots() ->
-// SharedHeap::process_strong_roots() which calls eventually to
+// G1ParTask executes g1_process_roots() ->
+// SharedHeap::process_roots() which calls eventually to
 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
-// SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
+// SequentialSubTasksDone.  SharedHeap::process_roots() also
 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
 //
 
 // Local to this file.
 
 class RefineCardTableEntryClosure: public CardTableEntryClosure {
-  SuspendibleThreadSet* _sts;
-  G1RemSet* _g1rs;
-  ConcurrentG1Refine* _cg1r;
   bool _concurrent;
 public:
-  RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
-                              G1RemSet* g1rs,
-                              ConcurrentG1Refine* cg1r) :
-    _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
-  {}
+  RefineCardTableEntryClosure() : _concurrent(true) { }
+
   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
-    bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
+    bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
     // This path is executed by the concurrent refine or mutator threads,
     // concurrently, and so we do not care if card_ptr contains references
     // that point into the collection set.
     assert(!oops_into_cset, "should be");
 
-    if (_concurrent && _sts->should_yield()) {
+    if (_concurrent && SuspendibleThreadSet::should_yield()) {
       // Caller will actually yield.
       return false;
     }
     // Otherwise, we finished successfully; return true.
     return true;
   }
+
   void set_concurrent(bool b) { _concurrent = b; }
 };
 
 
 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
-  int _calls;
-  G1CollectedHeap* _g1h;
+  size_t _num_processed;
   CardTableModRefBS* _ctbs;
   int _histo[256];
-public:
+
+ public:
   ClearLoggedCardTableEntryClosure() :
-    _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
+    _num_processed(0), _ctbs(G1CollectedHeap::heap()->g1_barrier_set())
   {
     for (int i = 0; i < 256; i++) _histo[i] = 0;
   }
+
   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
-    if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
-      _calls++;
-      unsigned char* ujb = (unsigned char*)card_ptr;
-      int ind = (int)(*ujb);
-      _histo[ind]++;
-      *card_ptr = -1;
-    }
+    unsigned char* ujb = (unsigned char*)card_ptr;
+    int ind = (int)(*ujb);
+    _histo[ind]++;
+
+    *card_ptr = (jbyte)CardTableModRefBS::clean_card_val();
+    _num_processed++;
+
     return true;
   }
-  int calls() { return _calls; }
+
+  size_t num_processed() { return _num_processed; }
+
   void print_histo() {
     gclog_or_tty->print_cr("Card table value histogram:");
     for (int i = 0; i < 256; i++) {
@@ -156,22 +158,20 @@
   }
 };
 
-class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
-  int _calls;
-  G1CollectedHeap* _g1h;
-  CardTableModRefBS* _ctbs;
-public:
-  RedirtyLoggedCardTableEntryClosure() :
-    _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
+class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
+ private:
+  size_t _num_processed;
+
+ public:
+  RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { }
 
   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
-    if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
-      _calls++;
-      *card_ptr = 0;
-    }
+    *card_ptr = CardTableModRefBS::dirty_card_val();
+    _num_processed++;
     return true;
   }
-  int calls() { return _calls; }
+
+  size_t num_processed() const { return _num_processed; }
 };
 
 YoungList::YoungList(G1CollectedHeap* g1h) :
@@ -208,7 +208,10 @@
     HeapRegion* next = list->get_next_young_region();
     list->set_next_young_region(NULL);
     list->uninstall_surv_rate_group();
-    list->set_not_young();
+    // This is called before a Full GC and all the non-empty /
+    // non-humongous regions at the end of the Full GC will end up as
+    // old anyway.
+    list->set_old();
     list = next;
   }
 }
@@ -367,7 +370,7 @@
     if (curr == NULL)
       gclog_or_tty->print_cr("  empty");
     while (curr != NULL) {
-      gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
+      gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT ", N: "PTR_FORMAT", age: %4d",
                              HR_FORMAT_PARAMS(curr),
                              curr->prev_top_at_mark_start(),
                              curr->next_top_at_mark_start(),
@@ -379,6 +382,16 @@
   gclog_or_tty->cr();
 }
 
+void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
+  OtherRegionsTable::invalidate(start_idx, num_regions);
+}
+
+void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
+  // The from card cache is not the memory that is actually committed. So we cannot
+  // take advantage of the zero_filled parameter.
+  reset_from_card_cache(start_idx, num_regions);
+}
+
 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
 {
   // Claim the right to put the region on the dirty cards region list
@@ -444,24 +457,18 @@
 // implementation of is_scavengable() for G1 will indicate that
 // all nmethods must be scanned during a partial collection.
 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
-  HeapRegion* hr = heap_region_containing(p);
-  return hr != NULL && hr->in_collection_set();
+  if (p == NULL) {
+    return false;
+  }
+  return heap_region_containing(p)->in_collection_set();
 }
 #endif
 
 // Returns true if the reference points to an object that
 // can move in an incremental collection.
 bool G1CollectedHeap::is_scavengable(const void* p) {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  G1CollectorPolicy* g1p = g1h->g1_policy();
   HeapRegion* hr = heap_region_containing(p);
-  if (hr == NULL) {
-     // null
-     assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
-     return false;
-  } else {
-    return !hr->isHumongous();
-  }
+  return !hr->isHumongous();
 }
 
 void G1CollectedHeap::check_ct_logs_at_safepoint() {
@@ -475,9 +482,8 @@
 
   // First clear the logged cards.
   ClearLoggedCardTableEntryClosure clear;
-  dcqs.set_closure(&clear);
-  dcqs.apply_closure_to_all_completed_buffers();
-  dcqs.iterate_closure_all_threads(false);
+  dcqs.apply_closure_to_all_completed_buffers(&clear);
+  dcqs.iterate_closure_all_threads(&clear, false);
   clear.print_histo();
 
   // Now ensure that there's no dirty cards.
@@ -490,13 +496,13 @@
   guarantee(count2.n() == 0, "Card table should be clean.");
 
   RedirtyLoggedCardTableEntryClosure redirty;
-  JavaThread::dirty_card_queue_set().set_closure(&redirty);
-  dcqs.apply_closure_to_all_completed_buffers();
-  dcqs.iterate_closure_all_threads(false);
+  dcqs.apply_closure_to_all_completed_buffers(&redirty);
+  dcqs.iterate_closure_all_threads(&redirty, false);
   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
-                         clear.calls(), orig_count);
-  guarantee(redirty.calls() == clear.calls(),
-            "Or else mechanism is broken.");
+                         clear.num_processed(), orig_count);
+  guarantee(redirty.num_processed() == clear.num_processed(),
+            err_msg("Redirtied "SIZE_FORMAT" cards, bug cleared "SIZE_FORMAT,
+                    redirty.num_processed(), clear.num_processed()));
 
   CountNonCleanMemRegionClosure count3(this);
   ct_bs->mod_card_iterate(&count3);
@@ -505,8 +511,6 @@
                            orig_count, count3.n());
     guarantee(count3.n() >= orig_count, "Should have restored them all.");
   }
-
-  JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
 }
 
 // Private class members.
@@ -530,9 +534,9 @@
       // again to allocate from it.
       append_secondary_free_list();
 
-      assert(!_free_list.is_empty(), "if the secondary_free_list was not "
+      assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
              "empty we should have moved at least one entry to the free_list");
-      HeapRegion* res = _free_list.remove_region(is_old);
+      HeapRegion* res = _hrm.allocate_free_region(is_old);
       if (G1ConcRegionFreeingVerbose) {
         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
                                "allocated "HR_FORMAT" from secondary_free_list",
@@ -573,7 +577,7 @@
     }
   }
 
-  res = _free_list.remove_region(is_old);
+  res = _hrm.allocate_free_region(is_old);
 
   if (res == NULL) {
     if (G1ConcRegionFreeingVerbose) {
@@ -598,8 +602,8 @@
       // Given that expand() succeeded in expanding the heap, and we
       // always expand the heap by an amount aligned to the heap
       // region size, the free list should in theory not be empty.
-      // In either case remove_region() will check for NULL.
-      res = _free_list.remove_region(is_old);
+      // In either case allocate_free_region() will check for NULL.
+      res = _hrm.allocate_free_region(is_old);
     } else {
       _expand_heap_after_alloc_failure = false;
     }
@@ -607,55 +611,12 @@
   return res;
 }
 
-uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
-                                                        size_t word_size) {
-  assert(isHumongous(word_size), "word_size should be humongous");
-  assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
-
-  uint first = G1_NULL_HRS_INDEX;
-  if (num_regions == 1) {
-    // Only one region to allocate, no need to go through the slower
-    // path. The caller will attempt the expansion if this fails, so
-    // let's not try to expand here too.
-    HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
-    if (hr != NULL) {
-      first = hr->hrs_index();
-    } else {
-      first = G1_NULL_HRS_INDEX;
-    }
-  } else {
-    // We can't allocate humongous regions while cleanupComplete() is
-    // running, since some of the regions we find to be empty might not
-    // yet be added to the free list and it is not straightforward to
-    // know which list they are on so that we can remove them. Note
-    // that we only need to do this if we need to allocate more than
-    // one region to satisfy the current humongous allocation
-    // request. If we are only allocating one region we use the common
-    // region allocation code (see above).
-    wait_while_free_regions_coming();
-    append_secondary_free_list_if_not_empty_with_lock();
-
-    if (free_regions() >= num_regions) {
-      first = _hrs.find_contiguous(num_regions);
-      if (first != G1_NULL_HRS_INDEX) {
-        for (uint i = first; i < first + num_regions; ++i) {
-          HeapRegion* hr = region_at(i);
-          assert(hr->is_empty(), "sanity");
-          assert(is_on_master_free_list(hr), "sanity");
-          hr->set_pending_removal(true);
-        }
-        _free_list.remove_all_pending(num_regions);
-      }
-    }
-  }
-  return first;
-}
-
 HeapWord*
 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
                                                            uint num_regions,
-                                                           size_t word_size) {
-  assert(first != G1_NULL_HRS_INDEX, "pre-condition");
+                                                           size_t word_size,
+                                                           AllocationContext_t context) {
+  assert(first != G1_NO_HRM_INDEX, "pre-condition");
   assert(isHumongous(word_size), "word_size should be humongous");
   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 
@@ -706,13 +667,14 @@
   // that there is a single object that starts at the bottom of the
   // first region.
   first_hr->set_startsHumongous(new_top, new_end);
-
+  first_hr->set_allocation_context(context);
   // Then, if there are any, we will set up the "continues
   // humongous" regions.
   HeapRegion* hr = NULL;
   for (uint i = first + 1; i < last; ++i) {
     hr = region_at(i);
     hr->set_continuesHumongous(first_hr);
+    hr->set_allocation_context(context);
   }
   // If we have "continues humongous" regions (hr != NULL), then the
   // end of the last one should match new_end.
@@ -776,9 +738,10 @@
   // match new_top.
   assert(hr == NULL ||
          (hr->end() == new_end && hr->top() == new_top), "sanity");
+  check_bitmaps("Humongous Region Allocation", first_hr);
 
   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
-  _summary_bytes_used += first_hr->used();
+  _allocator->increase_used(first_hr->used());
   _humongous_set.add(first_hr);
 
   return new_obj;
@@ -787,47 +750,77 @@
 // If could fit into free regions w/o expansion, try.
 // Otherwise, if can expand, do so.
 // Otherwise, if using ex regions might help, try with ex given back.
-HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
+HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 
   verify_region_sets_optional();
 
-  size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
-  uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
-  uint x_num = expansion_regions();
-  uint fs = _hrs.free_suffix();
-  uint first = humongous_obj_allocate_find_first(num_regions, word_size);
-  if (first == G1_NULL_HRS_INDEX) {
-    // The only thing we can do now is attempt expansion.
-    if (fs + x_num >= num_regions) {
-      // If the number of regions we're trying to allocate for this
-      // object is at most the number of regions in the free suffix,
-      // then the call to humongous_obj_allocate_find_first() above
-      // should have succeeded and we wouldn't be here.
-      //
-      // We should only be trying to expand when the free suffix is
-      // not sufficient for the object _and_ we have some expansion
-      // room available.
-      assert(num_regions > fs, "earlier allocation should have succeeded");
-
+  uint first = G1_NO_HRM_INDEX;
+  uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
+
+  if (obj_regions == 1) {
+    // Only one region to allocate, try to use a fast path by directly allocating
+    // from the free lists. Do not try to expand here, we will potentially do that
+    // later.
+    HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
+    if (hr != NULL) {
+      first = hr->hrm_index();
+    }
+  } else {
+    // We can't allocate humongous regions spanning more than one region while
+    // cleanupComplete() is running, since some of the regions we find to be
+    // empty might not yet be added to the free list. It is not straightforward
+    // to know in which list they are on so that we can remove them. We only
+    // need to do this if we need to allocate more than one region to satisfy the
+    // current humongous allocation request. If we are only allocating one region
+    // we use the one-region region allocation code (see above), that already
+    // potentially waits for regions from the secondary free list.
+    wait_while_free_regions_coming();
+    append_secondary_free_list_if_not_empty_with_lock();
+
+    // Policy: Try only empty regions (i.e. already committed first). Maybe we
+    // are lucky enough to find some.
+    first = _hrm.find_contiguous_only_empty(obj_regions);
+    if (first != G1_NO_HRM_INDEX) {
+      _hrm.allocate_free_regions_starting_at(first, obj_regions);
+    }
+  }
+
+  if (first == G1_NO_HRM_INDEX) {
+    // Policy: We could not find enough regions for the humongous object in the
+    // free list. Look through the heap to find a mix of free and uncommitted regions.
+    // If so, try expansion.
+    first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
+    if (first != G1_NO_HRM_INDEX) {
+      // We found something. Make sure these regions are committed, i.e. expand
+      // the heap. Alternatively we could do a defragmentation GC.
       ergo_verbose1(ErgoHeapSizing,
                     "attempt heap expansion",
                     ergo_format_reason("humongous allocation request failed")
                     ergo_format_byte("allocation request"),
                     word_size * HeapWordSize);
-      if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
-        // Even though the heap was expanded, it might not have
-        // reached the desired size. So, we cannot assume that the
-        // allocation will succeed.
-        first = humongous_obj_allocate_find_first(num_regions, word_size);
+
+      _hrm.expand_at(first, obj_regions);
+      g1_policy()->record_new_heap_size(num_regions());
+
+#ifdef ASSERT
+      for (uint i = first; i < first + obj_regions; ++i) {
+        HeapRegion* hr = region_at(i);
+        assert(hr->is_free(), "sanity");
+        assert(hr->is_empty(), "sanity");
+        assert(is_on_master_free_list(hr), "sanity");
       }
+#endif
+      _hrm.allocate_free_regions_starting_at(first, obj_regions);
+    } else {
+      // Policy: Potentially trigger a defragmentation GC.
     }
   }
 
   HeapWord* result = NULL;
-  if (first != G1_NULL_HRS_INDEX) {
-    result =
-      humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
+  if (first != G1_NO_HRM_INDEX) {
+    result = humongous_obj_allocate_initialize_regions(first, obj_regions,
+                                                       word_size, context);
     assert(result != NULL, "it should always return a valid result");
 
     // A successful humongous object allocation changes the used space
@@ -871,6 +864,8 @@
 
     // Create the garbage collection operation...
     VM_G1CollectForAllocation op(gc_count_before, word_size);
+    op.set_allocation_context(AllocationContext::current());
+
     // ...and get the VM thread to execute it.
     VMThread::execute(&op);
 
@@ -906,8 +901,9 @@
 }
 
 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
-                                           unsigned int *gc_count_before_ret,
-                                           int* gclocker_retry_count_ret) {
+                                                   AllocationContext_t context,
+                                                   unsigned int *gc_count_before_ret,
+                                                   int* gclocker_retry_count_ret) {
   // Make sure you read the note in attempt_allocation_humongous().
 
   assert_heap_not_locked_and_not_at_safepoint();
@@ -928,23 +924,22 @@
 
     {
       MutexLockerEx x(Heap_lock);
-
-      result = _mutator_alloc_region.attempt_allocation_locked(word_size,
-                                                      false /* bot_updates */);
+      result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
+                                                                                    false /* bot_updates */);
       if (result != NULL) {
         return result;
       }
 
       // If we reach here, attempt_allocation_locked() above failed to
       // allocate a new region. So the mutator alloc region should be NULL.
-      assert(_mutator_alloc_region.get() == NULL, "only way to get here");
+      assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
 
       if (GC_locker::is_active_and_needs_gc()) {
         if (g1_policy()->can_expand_young_list()) {
           // No need for an ergo verbose message here,
           // can_expand_young_list() does this when it returns true.
-          result = _mutator_alloc_region.attempt_allocation_force(word_size,
-                                                      false /* bot_updates */);
+          result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
+                                                                                       false /* bot_updates */);
           if (result != NULL) {
             return result;
           }
@@ -1004,8 +999,8 @@
     // first attempt (without holding the Heap_lock) here and the
     // follow-on attempt will be at the start of the next loop
     // iteration (after taking the Heap_lock).
-    result = _mutator_alloc_region.attempt_allocation(word_size,
-                                                      false /* bot_updates */);
+    result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
+                                                                           false /* bot_updates */);
     if (result != NULL) {
       return result;
     }
@@ -1023,8 +1018,8 @@
 }
 
 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
-                                          unsigned int * gc_count_before_ret,
-                                          int* gclocker_retry_count_ret) {
+                                                        unsigned int * gc_count_before_ret,
+                                                        int* gclocker_retry_count_ret) {
   // The structure of this method has a lot of similarities to
   // attempt_allocation_slow(). The reason these two were not merged
   // into a single one is that such a method would require several "if
@@ -1065,7 +1060,7 @@
       // Given that humongous objects are not allocated in young
       // regions, we'll first try to do the allocation without doing a
       // collection hoping that there's enough space in the heap.
-      result = humongous_obj_allocate(word_size);
+      result = humongous_obj_allocate(word_size, AllocationContext::current());
       if (result != NULL) {
         return result;
       }
@@ -1141,17 +1136,18 @@
 }
 
 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
-                                       bool expect_null_mutator_alloc_region) {
+                                                           AllocationContext_t context,
+                                                           bool expect_null_mutator_alloc_region) {
   assert_at_safepoint(true /* should_be_vm_thread */);
-  assert(_mutator_alloc_region.get() == NULL ||
+  assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
                                              !expect_null_mutator_alloc_region,
          "the current alloc region was unexpectedly found to be non-NULL");
 
   if (!isHumongous(word_size)) {
-    return _mutator_alloc_region.attempt_allocation_locked(word_size,
+    return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
                                                       false /* bot_updates */);
   } else {
-    HeapWord* result = humongous_obj_allocate(word_size);
+    HeapWord* result = humongous_obj_allocate(word_size, context);
     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
       g1_policy()->set_initiate_conc_mark_if_possible();
     }
@@ -1238,21 +1234,21 @@
 public:
   bool doHeapRegion(HeapRegion* hr) {
     assert(!hr->is_young(), "not expecting to find young regions");
-    // We only generate output for non-empty regions.
-    if (!hr->is_empty()) {
-      if (!hr->isHumongous()) {
-        _hr_printer->post_compaction(hr, G1HRPrinter::Old);
-      } else if (hr->startsHumongous()) {
-        if (hr->region_num() == 1) {
-          // single humongous region
-          _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
-        } else {
-          _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
-        }
+    if (hr->is_free()) {
+      // We only generate output for non-empty regions.
+    } else if (hr->startsHumongous()) {
+      if (hr->region_num() == 1) {
+        // single humongous region
+        _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
       } else {
-        assert(hr->continuesHumongous(), "only way to get here");
-        _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
+        _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
       }
+    } else if (hr->continuesHumongous()) {
+      _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
+    } else if (hr->is_old()) {
+      _hr_printer->post_compaction(hr, G1HRPrinter::Old);
+    } else {
+      ShouldNotReachHere();
     }
     return false;
   }
@@ -1261,7 +1257,7 @@
     : _hr_printer(hr_printer) { }
 };
 
-void G1CollectedHeap::print_hrs_post_compaction() {
+void G1CollectedHeap::print_hrm_post_compaction() {
   PostCompactionPrinterClosure cl(hr_printer());
   heap_region_iterate(&cl);
 }
@@ -1305,7 +1301,7 @@
     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 
     {
-      GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
+      GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
       TraceCollectorStats tcs(g1mm()->full_collection_counters());
       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
 
@@ -1334,6 +1330,7 @@
 
       verify_before_gc();
 
+      check_bitmaps("Full GC Start");
       pre_full_gc_dump(gc_timer);
 
       COMPILER2_PRESENT(DerivedPointerTable::clear());
@@ -1350,8 +1347,8 @@
       concurrent_mark()->abort();
 
       // Make sure we'll choose a new allocation region afterwards.
-      release_mutator_alloc_region();
-      abandon_gc_alloc_regions();
+      _allocator->release_mutator_alloc_region();
+      _allocator->abandon_gc_alloc_regions();
       g1_rem_set()->cleanupHRRS();
 
       // We should call this after we retire any currently active alloc
@@ -1389,7 +1386,7 @@
         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
       }
 
-      assert(free_regions() == 0, "we should not have added any free regions");
+      assert(num_free_regions() == 0, "we should not have added any free regions");
       rebuild_region_sets(false /* free_list_only */);
 
       // Enqueue any discovered reference objects that have
@@ -1429,7 +1426,7 @@
         // that all the COMMIT / UNCOMMIT events are generated before
         // the end GC event.
 
-        print_hrs_post_compaction();
+        print_hrm_post_compaction();
         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
       }
 
@@ -1489,9 +1486,7 @@
 
       // Discard all rset updates
       JavaThread::dirty_card_queue_set().abandon_logs();
-      assert(!G1DeferredRSUpdate
-             || (G1DeferredRSUpdate &&
-                (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
+      assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
 
       _young_list->reset_sampled_info();
       // At this point there should be no regions in the
@@ -1502,21 +1497,30 @@
       // Update the number of full collections that have been completed.
       increment_old_marking_cycles_completed(false /* concurrent */);
 
-      _hrs.verify_optional();
+      _hrm.verify_optional();
       verify_region_sets_optional();
 
       verify_after_gc();
 
+      // Clear the previous marking bitmap, if needed for bitmap verification.
+      // Note we cannot do this when we clear the next marking bitmap in
+      // ConcurrentMark::abort() above since VerifyDuringGC verifies the
+      // objects marked during a full GC against the previous bitmap.
+      // But we need to clear it before calling check_bitmaps below since
+      // the full GC has compacted objects and updated TAMS but not updated
+      // the prev bitmap.
+      if (G1VerifyBitmaps) {
+        ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
+      }
+      check_bitmaps("Full GC End");
+
       // Start a new incremental collection set for the next pause
       assert(g1_policy()->collection_set() == NULL, "must be");
       g1_policy()->start_incremental_cset_building();
 
-      // Clear the _cset_fast_test bitmap in anticipation of adding
-      // regions to the incremental collection set for the next
-      // evacuation pause.
       clear_cset_fast_test();
 
-      init_mutator_alloc_region();
+      _allocator->init_mutator_alloc_region();
 
       double end = os::elapsedTime();
       g1_policy()->record_full_collection_end();
@@ -1652,6 +1656,7 @@
 
 HeapWord*
 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
+                                           AllocationContext_t context,
                                            bool* succeeded) {
   assert_at_safepoint(true /* should_be_vm_thread */);
 
@@ -1659,7 +1664,8 @@
   // Let's attempt the allocation first.
   HeapWord* result =
     attempt_allocation_at_safepoint(word_size,
-                                 false /* expect_null_mutator_alloc_region */);
+                                    context,
+                                    false /* expect_null_mutator_alloc_region */);
   if (result != NULL) {
     assert(*succeeded, "sanity");
     return result;
@@ -1669,7 +1675,7 @@
   // incremental pauses.  Therefore, at least for now, we'll favor
   // expansion over collection.  (This might change in the future if we can
   // do something smarter than full collection to satisfy a failed alloc.)
-  result = expand_and_allocate(word_size);
+  result = expand_and_allocate(word_size, context);
   if (result != NULL) {
     assert(*succeeded, "sanity");
     return result;
@@ -1686,7 +1692,8 @@
 
   // Retry the allocation
   result = attempt_allocation_at_safepoint(word_size,
-                                  true /* expect_null_mutator_alloc_region */);
+                                           context,
+                                           true /* expect_null_mutator_alloc_region */);
   if (result != NULL) {
     assert(*succeeded, "sanity");
     return result;
@@ -1703,7 +1710,8 @@
 
   // Retry the allocation once more
   result = attempt_allocation_at_safepoint(word_size,
-                                  true /* expect_null_mutator_alloc_region */);
+                                           context,
+                                           true /* expect_null_mutator_alloc_region */);
   if (result != NULL) {
     assert(*succeeded, "sanity");
     return result;
@@ -1725,7 +1733,7 @@
 // successful, perform the allocation and return the address of the
 // allocated block, or else "NULL".
 
-HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
+HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
   assert_at_safepoint(true /* should_be_vm_thread */);
 
   verify_region_sets_optional();
@@ -1737,29 +1745,15 @@
                 ergo_format_byte("allocation request"),
                 word_size * HeapWordSize);
   if (expand(expand_bytes)) {
-    _hrs.verify_optional();
+    _hrm.verify_optional();
     verify_region_sets_optional();
     return attempt_allocation_at_safepoint(word_size,
-                                 false /* expect_null_mutator_alloc_region */);
+                                           context,
+                                           false /* expect_null_mutator_alloc_region */);
   }
   return NULL;
 }
 
-void G1CollectedHeap::update_committed_space(HeapWord* old_end,
-                                             HeapWord* new_end) {
-  assert(old_end != new_end, "don't call this otherwise");
-  assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
-
-  // Update the committed mem region.
-  _g1_committed.set_end(new_end);
-  // Tell the card table about the update.
-  Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
-  // Tell the BOT about the update.
-  _bot_shared->resize(_g1_committed.word_size());
-  // Tell the hot card cache about the update
-  _cg1r->hot_card_cache()->resize_card_counts(capacity());
-}
-
 bool G1CollectedHeap::expand(size_t expand_bytes) {
   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
@@ -1770,55 +1764,22 @@
                 ergo_format_byte("attempted expansion amount"),
                 expand_bytes, aligned_expand_bytes);
 
-  if (_g1_storage.uncommitted_size() == 0) {
+  if (is_maximal_no_gc()) {
     ergo_verbose0(ErgoHeapSizing,
                       "did not expand the heap",
                       ergo_format_reason("heap already fully expanded"));
     return false;
   }
 
-  // First commit the memory.
-  HeapWord* old_end = (HeapWord*) _g1_storage.high();
-  bool successful = _g1_storage.expand_by(aligned_expand_bytes);
-  if (successful) {
-    // Then propagate this update to the necessary data structures.
-    HeapWord* new_end = (HeapWord*) _g1_storage.high();
-    update_committed_space(old_end, new_end);
-
-    FreeRegionList expansion_list("Local Expansion List");
-    MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
-    assert(mr.start() == old_end, "post-condition");
-    // mr might be a smaller region than what was requested if
-    // expand_by() was unable to allocate the HeapRegion instances
-    assert(mr.end() <= new_end, "post-condition");
-
-    size_t actual_expand_bytes = mr.byte_size();
+  uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
+  assert(regions_to_expand > 0, "Must expand by at least one region");
+
+  uint expanded_by = _hrm.expand_by(regions_to_expand);
+
+  if (expanded_by > 0) {
+    size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
-    assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
-           "post-condition");
-    if (actual_expand_bytes < aligned_expand_bytes) {
-      // We could not expand _hrs to the desired size. In this case we
-      // need to shrink the committed space accordingly.
-      assert(mr.end() < new_end, "invariant");
-
-      size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
-      // First uncommit the memory.
-      _g1_storage.shrink_by(diff_bytes);
-      // Then propagate this update to the necessary data structures.
-      update_committed_space(new_end, mr.end());
-    }
-    _free_list.add_as_tail(&expansion_list);
-
-    if (_hr_printer.is_active()) {
-      HeapWord* curr = mr.start();
-      while (curr < mr.end()) {
-        HeapWord* curr_end = curr + HeapRegion::GrainWords;
-        _hr_printer.commit(curr, curr_end);
-        curr = curr_end;
-      }
-      assert(curr == mr.end(), "post-condition");
-    }
-    g1_policy()->record_new_heap_size(n_regions());
+    g1_policy()->record_new_heap_size(num_regions());
   } else {
     ergo_verbose0(ErgoHeapSizing,
                   "did not expand the heap",
@@ -1826,12 +1787,12 @@
     // The expansion of the virtual storage space was unsuccessful.
     // Let's see if it was because we ran out of swap.
     if (G1ExitOnExpansionFailure &&
-        _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
+        _hrm.available() >= regions_to_expand) {
       // We had head room...
       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
     }
   }
-  return successful;
+  return regions_to_expand > 0;
 }
 
 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
@@ -1841,8 +1802,7 @@
                                          HeapRegion::GrainBytes);
   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
 
-  uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
-  HeapWord* old_end = (HeapWord*) _g1_storage.high();
+  uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
 
   ergo_verbose3(ErgoHeapSizing,
@@ -1852,22 +1812,7 @@
                 ergo_format_byte("attempted shrinking amount"),
                 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
   if (num_regions_removed > 0) {
-    _g1_storage.shrink_by(shrunk_bytes);
-    HeapWord* new_end = (HeapWord*) _g1_storage.high();
-
-    if (_hr_printer.is_active()) {
-      HeapWord* curr = old_end;
-      while (curr > new_end) {
-        HeapWord* curr_end = curr;
-        curr -= HeapRegion::GrainWords;
-        _hr_printer.uncommit(curr, curr_end);
-      }
-    }
-
-    _expansion_regions += num_regions_removed;
-    update_committed_space(old_end, new_end);
-    HeapRegionRemSet::shrink_heap(n_regions());
-    g1_policy()->record_new_heap_size(n_regions());
+    g1_policy()->record_new_heap_size(num_regions());
   } else {
     ergo_verbose0(ErgoHeapSizing,
                   "did not shrink the heap",
@@ -1881,7 +1826,7 @@
   // We should only reach here at the end of a Full GC which means we
   // should not not be holding to any GC alloc regions. The method
   // below will make sure of that and do any remaining clean up.
-  abandon_gc_alloc_regions();
+  _allocator->abandon_gc_alloc_regions();
 
   // Instead of tearing down / rebuilding the free lists here, we
   // could instead use the remove_all_pending() method on free_list to
@@ -1890,7 +1835,7 @@
   shrink_helper(shrink_bytes);
   rebuild_region_sets(true /* free_list_only */);
 
-  _hrs.verify_optional();
+  _hrm.verify_optional();
   verify_region_sets_optional();
 }
 
@@ -1914,18 +1859,18 @@
   _bot_shared(NULL),
   _evac_failure_scan_stack(NULL),
   _mark_in_progress(false),
-  _cg1r(NULL), _summary_bytes_used(0),
+  _cg1r(NULL),
   _g1mm(NULL),
   _refine_cte_cl(NULL),
   _full_collection(false),
-  _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
+  _humongous_is_live(),
+  _has_humongous_reclaim_candidates(false),
   _free_regions_coming(false),
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
-  _retained_old_gc_alloc_region(NULL),
   _survivor_plab_stats(YoungPLABSize, PLABWeight),
   _old_plab_stats(OldPLABSize, PLABWeight),
   _expand_heap_after_alloc_failure(true),
@@ -1933,8 +1878,7 @@
   _old_marking_cycles_started(0),
   _old_marking_cycles_completed(0),
   _concurrent_cycle_started(false),
-  _in_cset_fast_test(NULL),
-  _in_cset_fast_test_base(NULL),
+  _in_cset_fast_test(),
   _dirty_cards_region_list(NULL),
   _worker_cset_start_region(NULL),
   _worker_cset_start_region_time_stamp(NULL),
@@ -1948,6 +1892,7 @@
     vm_exit_during_initialization("Failed necessary allocation.");
   }
 
+  _allocator = G1Allocator::create_allocator(_g1h);
   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
 
   int n_queues = MAX2((int)ParallelGCThreads, 1);
@@ -2004,7 +1949,9 @@
   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
 
-  _cg1r = new ConcurrentG1Refine(this);
+  _refine_cte_cl = new RefineCardTableEntryClosure();
+
+  _cg1r = new ConcurrentG1Refine(this, _refine_cte_cl);
 
   // Reserve the maximum.
 
@@ -2029,8 +1976,6 @@
   _reserved.set_start((HeapWord*)heap_rs.base());
   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
-
   // Create the gen rem set (and barrier set) for the entire reserved region.
   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
   set_barrier_set(rem_set()->bs());
@@ -2044,20 +1989,65 @@
 
   // Carve out the G1 part of the heap.
 
-  ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
-  _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
-                           g1_rs.size()/HeapWordSize);
-
-  _g1_storage.initialize(g1_rs, 0);
-  _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
-  _hrs.initialize((HeapWord*) _g1_reserved.start(),
-                  (HeapWord*) _g1_reserved.end());
-  assert(_hrs.max_length() == _expansion_regions,
-         err_msg("max length: %u expansion regions: %u",
-                 _hrs.max_length(), _expansion_regions));
-
-  // Do later initialization work for concurrent refinement.
-  _cg1r->init();
+  ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
+  G1RegionToSpaceMapper* heap_storage =
+    G1RegionToSpaceMapper::create_mapper(g1_rs,
+                                         UseLargePages ? os::large_page_size() : os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         1,
+                                         mtJavaHeap);
+  heap_storage->set_mapping_changed_listener(&_listener);
+
+  // Reserve space for the block offset table. We do not support automatic uncommit
+  // for the card table at this time. BOT only.
+  ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
+  G1RegionToSpaceMapper* bot_storage =
+    G1RegionToSpaceMapper::create_mapper(bot_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         G1BlockOffsetSharedArray::N_bytes,
+                                         mtGC);
+
+  ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
+  G1RegionToSpaceMapper* cardtable_storage =
+    G1RegionToSpaceMapper::create_mapper(cardtable_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         G1BlockOffsetSharedArray::N_bytes,
+                                         mtGC);
+
+  // Reserve space for the card counts table.
+  ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
+  G1RegionToSpaceMapper* card_counts_storage =
+    G1RegionToSpaceMapper::create_mapper(card_counts_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         G1BlockOffsetSharedArray::N_bytes,
+                                         mtGC);
+
+  // Reserve space for prev and next bitmap.
+  size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
+
+  ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
+  G1RegionToSpaceMapper* prev_bitmap_storage =
+    G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         CMBitMap::mark_distance(),
+                                         mtGC);
+
+  ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
+  G1RegionToSpaceMapper* next_bitmap_storage =
+    G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         CMBitMap::mark_distance(),
+                                         mtGC);
+
+  _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
+  g1_barrier_set()->initialize(cardtable_storage);
+   // Do later initialization work for concurrent refinement.
+  _cg1r->init(card_counts_storage);
 
   // 6843694 - ensure that the maximum region index can fit
   // in the remembered set structures.
@@ -2071,29 +2061,16 @@
 
   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
 
-  _bot_shared = new G1BlockOffsetSharedArray(_reserved,
-                                             heap_word_size(init_byte_size));
+  _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
 
   _g1h = this;
 
-  _in_cset_fast_test_length = max_regions();
-  _in_cset_fast_test_base =
-                   NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
-
-  // We're biasing _in_cset_fast_test to avoid subtracting the
-  // beginning of the heap every time we want to index; basically
-  // it's the same with what we do with the card table.
-  _in_cset_fast_test = _in_cset_fast_test_base -
-               ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
-
-  // Clear the _cset_fast_test bitmap in anticipation of adding
-  // regions to the incremental collection set for the first
-  // evacuation pause.
-  clear_cset_fast_test();
+  _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
+  _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
 
   // Create the ConcurrentMark data structure and thread.
   // (Must do this late, so that "max_regions" is defined.)
-  _cm = new ConcurrentMark(this, heap_rs);
+  _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
   if (_cm == NULL || !_cm->completed_initialization()) {
     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
     return JNI_ENOMEM;
@@ -2112,35 +2089,30 @@
   // Perform any initialization actions delegated to the policy.
   g1_policy()->init();
 
-  _refine_cte_cl =
-    new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
-                                    g1_rem_set(),
-                                    concurrent_g1_refine());
-  JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
-
   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
                                                SATB_Q_FL_lock,
                                                G1SATBProcessCompletedThreshold,
                                                Shared_SATB_Q_lock);
 
-  JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
+  JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
+                                                DirtyCardQ_CBL_mon,
                                                 DirtyCardQ_FL_lock,
                                                 concurrent_g1_refine()->yellow_zone(),
                                                 concurrent_g1_refine()->red_zone(),
                                                 Shared_DirtyCardQ_lock);
 
-  if (G1DeferredRSUpdate) {
-    dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
-                                      DirtyCardQ_FL_lock,
-                                      -1, // never trigger processing
-                                      -1, // no limit on length
-                                      Shared_DirtyCardQ_lock,
-                                      &JavaThread::dirty_card_queue_set());
-  }
+  dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
+                                    DirtyCardQ_CBL_mon,
+                                    DirtyCardQ_FL_lock,
+                                    -1, // never trigger processing
+                                    -1, // no limit on length
+                                    Shared_DirtyCardQ_lock,
+                                    &JavaThread::dirty_card_queue_set());
 
   // Initialize the card queue set used to hold cards containing
   // references into the collection set.
-  _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
+  _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
+                                             DirtyCardQ_CBL_mon,
                                              DirtyCardQ_FL_lock,
                                              -1, // never trigger processing
                                              -1, // no limit on length
@@ -2151,22 +2123,20 @@
   // counts and that mechanism.
   SpecializationStats::clear();
 
-  // Here we allocate the dummy full region that is required by the
-  // G1AllocRegion class. If we don't pass an address in the reserved
-  // space here, lots of asserts fire.
-
-  HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
-                                             _g1_reserved.start());
+  // Here we allocate the dummy HeapRegion that is required by the
+  // G1AllocRegion class.
+  HeapRegion* dummy_region = _hrm.get_dummy_region();
+
   // We'll re-use the same region whether the alloc region will
   // require BOT updates or not and, if it doesn't, then a non-young
   // region will complain that it cannot support allocations without
-  // BOT updates. So we'll tag the dummy region as young to avoid that.
-  dummy_region->set_young();
+  // BOT updates. So we'll tag the dummy region as eden to avoid that.
+  dummy_region->set_eden();
   // Make sure it's full.
   dummy_region->set_top(dummy_region->end());
   G1AllocRegion::setup(this, dummy_region);
 
-  init_mutator_alloc_region();
+  _allocator->init_mutator_alloc_region();
 
   // Do create of the monitoring and management support so that
   // values in the heap have been properly initialized.
@@ -2188,6 +2158,11 @@
   }
 }
 
+void G1CollectedHeap::clear_humongous_is_live_table() {
+  guarantee(G1ReclaimDeadHumongousObjectsAtYoungGC, "Should only be called if true");
+  _humongous_is_live.clear();
+}
+
 size_t G1CollectedHeap::conservative_max_heap_alignment() {
   return HeapRegion::max_region_size();
 }
@@ -2267,14 +2242,14 @@
 }
 
 size_t G1CollectedHeap::capacity() const {
-  return _g1_committed.byte_size();
+  return _hrm.length() * HeapRegion::GrainBytes;
 }
 
 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
   assert(!hr->continuesHumongous(), "pre-condition");
   hr->reset_gc_time_stamp();
   if (hr->startsHumongous()) {
-    uint first_index = hr->hrs_index() + 1;
+    uint first_index = hr->hrm_index() + 1;
     uint last_index = hr->last_hc_index();
     for (uint i = first_index; i < last_index; i += 1) {
       HeapRegion* chr = region_at(i);
@@ -2335,21 +2310,12 @@
 
 
 // Computes the sum of the storage used by the various regions.
-
 size_t G1CollectedHeap::used() const {
-  assert(Heap_lock->owner() != NULL,
-         "Should be owned on this thread's behalf.");
-  size_t result = _summary_bytes_used;
-  // Read only once in case it is set to NULL concurrently
-  HeapRegion* hr = _mutator_alloc_region.get();
-  if (hr != NULL)
-    result += hr->used();
-  return result;
+  return _allocator->used();
 }
 
 size_t G1CollectedHeap::used_unlocked() const {
-  size_t result = _summary_bytes_used;
-  return result;
+  return _allocator->used_unlocked();
 }
 
 class SumUsedClosure: public HeapRegionClosure {
@@ -2375,30 +2341,12 @@
   return blk.result();
 }
 
-size_t G1CollectedHeap::unsafe_max_alloc() {
-  if (free_regions() > 0) return HeapRegion::GrainBytes;
-  // otherwise, is there space in the current allocation region?
-
-  // We need to store the current allocation region in a local variable
-  // here. The problem is that this method doesn't take any locks and
-  // there may be other threads which overwrite the current allocation
-  // region field. attempt_allocation(), for example, sets it to NULL
-  // and this can happen *after* the NULL check here but before the call
-  // to free(), resulting in a SIGSEGV. Note that this doesn't appear
-  // to be a problem in the optimized build, since the two loads of the
-  // current allocation region field are optimized away.
-  HeapRegion* hr = _mutator_alloc_region.get();
-  if (hr == NULL) {
-    return 0;
-  }
-  return hr->free();
-}
-
 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
   switch (cause) {
     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
     case GCCause::_g1_humongous_allocation: return true;
+    case GCCause::_update_allocation_context_stats_inc: return true;
     default:                                return false;
   }
 }
@@ -2412,7 +2360,8 @@
 
   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
     // Let's use the existing mechanism for the allocation
-    HeapWord* dummy_obj = humongous_obj_allocate(word_size);
+    HeapWord* dummy_obj = humongous_obj_allocate(word_size,
+                                                 AllocationContext::system());
     if (dummy_obj != NULL) {
       MemRegion mr(dummy_obj, word_size);
       CollectedHeap::fill_with_object(mr);
@@ -2425,16 +2374,6 @@
 }
 #endif // !PRODUCT
 
-#ifdef GRAAL
-HeapWord** G1CollectedHeap::top_addr() const {
-  return _mutator_alloc_region.top_addr();
-}
-
-HeapWord** G1CollectedHeap::end_addr()  const {
-  return  _mutator_alloc_region.end_addr();
-}
-#endif
-
 void G1CollectedHeap::increment_old_marking_cycles_started() {
   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
     _old_marking_cycles_started == _old_marking_cycles_completed + 1,
@@ -2540,6 +2479,7 @@
 
   unsigned int gc_count_before;
   unsigned int old_marking_count_before;
+  unsigned int full_gc_count_before;
   bool retry_gc;
 
   do {
@@ -2550,6 +2490,7 @@
 
       // Read the GC count while holding the Heap_lock
       gc_count_before = total_collections();
+      full_gc_count_before = total_full_collections();
       old_marking_count_before = _old_marking_cycles_started;
     }
 
@@ -2562,6 +2503,7 @@
                                  true,  /* should_initiate_conc_mark */
                                  g1_policy()->max_pause_time_ms(),
                                  cause);
+      op.set_allocation_context(AllocationContext::current());
 
       VMThread::execute(&op);
       if (!op.pause_succeeded()) {
@@ -2580,7 +2522,7 @@
         }
       }
     } else {
-      if (cause == GCCause::_gc_locker
+      if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
 
         // Schedule a standard evacuation pause. We're setting word_size
@@ -2593,7 +2535,7 @@
         VMThread::execute(&op);
       } else {
         // Schedule a Full GC.
-        VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
+        VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
         VMThread::execute(&op);
       }
     }
@@ -2601,8 +2543,8 @@
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
-  if (_g1_committed.contains(p)) {
-    // Given that we know that p is in the committed space,
+  if (_hrm.reserved().contains(p)) {
+    // Given that we know that p is in the reserved space,
     // heap_region_containing_raw() should successfully
     // return the containing region.
     HeapRegion* hr = heap_region_containing_raw(p);
@@ -2612,17 +2554,26 @@
   }
 }
 
+#ifdef ASSERT
+bool G1CollectedHeap::is_in_exact(const void* p) const {
+  bool contains = reserved_region().contains(p);
+  bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
+  if (contains && available) {
+    return true;
+  } else {
+    return false;
+  }
+}
+#endif
+
 // Iteration functions.
 
-// Iterates an OopClosure over all ref-containing fields of objects
-// within a HeapRegion.
+// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
 
 class IterateOopClosureRegionClosure: public HeapRegionClosure {
-  MemRegion _mr;
   ExtendedOopClosure* _cl;
 public:
-  IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl)
-    : _mr(mr), _cl(cl) {}
+  IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
   bool doHeapRegion(HeapRegion* r) {
     if (!r->continuesHumongous()) {
       r->oop_iterate(_cl);
@@ -2632,12 +2583,7 @@
 };
 
 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
-  IterateOopClosureRegionClosure blk(_g1_committed, cl);
-  heap_region_iterate(&blk);
-}
-
-void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
-  IterateOopClosureRegionClosure blk(mr, cl);
+  IterateOopClosureRegionClosure blk(cl);
   heap_region_iterate(&blk);
 }
 
@@ -2678,89 +2624,15 @@
 }
 
 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
-  _hrs.iterate(cl);
+  _hrm.iterate(cl);
 }
 
 void
 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
                                                  uint worker_id,
-                                                 uint no_of_par_workers,
-                                                 jint claim_value) {
-  const uint regions = n_regions();
-  const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
-                             no_of_par_workers :
-                             1);
-  assert(UseDynamicNumberOfGCThreads ||
-         no_of_par_workers == workers()->total_workers(),
-         "Non dynamic should use fixed number of workers");
-  // try to spread out the starting points of the workers
-  const HeapRegion* start_hr =
-                        start_region_for_worker(worker_id, no_of_par_workers);
-  const uint start_index = start_hr->hrs_index();
-
-  // each worker will actually look at all regions
-  for (uint count = 0; count < regions; ++count) {
-    const uint index = (start_index + count) % regions;
-    assert(0 <= index && index < regions, "sanity");
-    HeapRegion* r = region_at(index);
-    // we'll ignore "continues humongous" regions (we'll process them
-    // when we come across their corresponding "start humongous"
-    // region) and regions already claimed
-    if (r->claim_value() == claim_value || r->continuesHumongous()) {
-      continue;
-    }
-    // OK, try to claim it
-    if (r->claimHeapRegion(claim_value)) {
-      // success!
-      assert(!r->continuesHumongous(), "sanity");
-      if (r->startsHumongous()) {
-        // If the region is "starts humongous" we'll iterate over its
-        // "continues humongous" first; in fact we'll do them
-        // first. The order is important. In on case, calling the
-        // closure on the "starts humongous" region might de-allocate
-        // and clear all its "continues humongous" regions and, as a
-        // result, we might end up processing them twice. So, we'll do
-        // them first (notice: most closures will ignore them anyway) and
-        // then we'll do the "starts humongous" region.
-        for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
-          HeapRegion* chr = region_at(ch_index);
-
-          // if the region has already been claimed or it's not
-          // "continues humongous" we're done
-          if (chr->claim_value() == claim_value ||
-              !chr->continuesHumongous()) {
-            break;
-          }
-
-          // No one should have claimed it directly. We can given
-          // that we claimed its "starts humongous" region.
-          assert(chr->claim_value() != claim_value, "sanity");
-          assert(chr->humongous_start_region() == r, "sanity");
-
-          if (chr->claimHeapRegion(claim_value)) {
-            // we should always be able to claim it; no one else should
-            // be trying to claim this region
-
-            bool res2 = cl->doHeapRegion(chr);
-            assert(!res2, "Should not abort");
-
-            // Right now, this holds (i.e., no closure that actually
-            // does something with "continues humongous" regions
-            // clears them). We might have to weaken it in the future,
-            // but let's leave these two asserts here for extra safety.
-            assert(chr->continuesHumongous(), "should still be the case");
-            assert(chr->humongous_start_region() == r, "sanity");
-          } else {
-            guarantee(false, "we should not reach here");
-          }
-        }
-      }
-
-      assert(!r->continuesHumongous(), "sanity");
-      bool res = cl->doHeapRegion(r);
-      assert(!res, "Should not abort");
-    }
-  }
+                                                 uint num_workers,
+                                                 jint claim_value) const {
+  _hrm.par_iterate(cl, worker_id, num_workers, claim_value);
 }
 
 class ResetClaimValuesClosure: public HeapRegionClosure {
@@ -2938,17 +2810,6 @@
   return result;
 }
 
-HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
-                                                     uint no_of_par_workers) {
-  uint worker_num =
-           G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
-  assert(UseDynamicNumberOfGCThreads ||
-         no_of_par_workers == workers()->total_workers(),
-         "Non dynamic should use fixed number of workers");
-  const uint start_index = n_regions() * worker_i / worker_num;
-  return region_at(start_index);
-}
-
 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
   HeapRegion* r = g1_policy()->collection_set();
   while (r != NULL) {
@@ -2990,27 +2851,25 @@
   }
 }
 
-CompactibleSpace* G1CollectedHeap::first_compactible_space() {
-  return n_regions() > 0 ? region_at(0) : NULL;
-}
-
+HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
+  HeapRegion* result = _hrm.next_region_in_heap(from);
+  while (result != NULL && result->isHumongous()) {
+    result = _hrm.next_region_in_heap(result);
+  }
+  return result;
+}
 
 Space* G1CollectedHeap::space_containing(const void* addr) const {
-  Space* res = heap_region_containing(addr);
-  return res;
+  return heap_region_containing(addr);
 }
 
 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
   Space* sp = space_containing(addr);
-  if (sp != NULL) {
-    return sp->block_start(addr);
-  }
-  return NULL;
+  return sp->block_start(addr);
 }
 
 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
   Space* sp = space_containing(addr);
-  assert(sp != NULL, "block_size of address outside of heap");
   return sp->block_size(addr);
 }
 
@@ -3045,7 +2904,7 @@
   // since we can't allow tlabs to grow big enough to accommodate
   // humongous objects.
 
-  HeapRegion* hr = _mutator_alloc_region.get();
+  HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
   size_t max_tlab = max_tlab_size() * wordSize;
   if (hr == NULL) {
     return max_tlab;
@@ -3055,7 +2914,7 @@
 }
 
 size_t G1CollectedHeap::max_capacity() const {
-  return _g1_reserved.byte_size();
+  return _hrm.reserved().byte_size();
 }
 
 jlong G1CollectedHeap::millis_since_last_gc() {
@@ -3424,25 +3283,20 @@
 
     if (!silent) { gclog_or_tty->print("Roots "); }
     VerifyRootsClosure rootsCl(vo);
-    G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
-    G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
     VerifyKlassClosure klassCl(this, &rootsCl);
+    CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
 
     // We apply the relevant closures to all the oops in the
-    // system dictionary, the string table and the code cache.
-    const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
-
-    // Need cleared claim bits for the strong roots processing
-    ClassLoaderDataGraph::clear_claimed_marks();
-
-    process_strong_roots(true,      // activate StrongRootsScope
-                         false,     // we set "is scavenging" to false,
-                                    // so we don't reset the dirty cards.
-                         ScanningOption(so),  // roots scanning options
-                         &rootsCl,
-                         &blobsCl,
-                         &klassCl
-                         );
+    // system dictionary, class loader data graph, the string table
+    // and the nmethods in the code cache.
+    G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
+    G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
+
+    process_all_roots(true,            // activate StrongRootsScope
+                      SO_AllCodeCache, // roots scanning options
+                      &rootsCl,
+                      &cldCl,
+                      &blobsCl);
 
     bool failures = rootsCl.failures() || codeRootsCl.failures();
 
@@ -3589,9 +3443,9 @@
   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
             capacity()/K, used_unlocked()/K);
   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
-            _g1_storage.low_boundary(),
-            _g1_storage.high(),
-            _g1_storage.high_boundary());
+            _hrm.reserved().start(),
+            _hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords,
+            _hrm.reserved().end());
   st->cr();
   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
   uint young_regions = _young_list->length();
@@ -3737,7 +3591,7 @@
   }
 }
 
-void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
+void G1CollectedHeap::gc_epilogue(bool full) {
 
   if (G1SummarizeRSetStats &&
       (G1SummarizeRSetStatsPeriod > 0) &&
@@ -3754,6 +3608,7 @@
   // always_do_update_barrier = true;
 
   resize_all_tlabs();
+  allocation_context_stats().update(full);
 
   // We have just completed a GC. Update the soft reference
   // policy with the new heap occupancy
@@ -3771,6 +3626,8 @@
                              false, /* should_initiate_conc_mark */
                              g1_policy()->max_pause_time_ms(),
                              gc_cause);
+
+  op.set_allocation_context(AllocationContext::current());
   VMThread::execute(&op);
 
   HeapWord* result = op.result();
@@ -3814,6 +3671,61 @@
   return g1_rem_set()->cardsScanned();
 }
 
+bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
+  HeapRegion* region = region_at(index);
+  assert(region->startsHumongous(), "Must start a humongous object");
+  return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
+}
+
+class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
+ private:
+  size_t _total_humongous;
+  size_t _candidate_humongous;
+ public:
+  RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) {
+  }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    if (!r->startsHumongous()) {
+      return false;
+    }
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+    uint region_idx = r->hrm_index();
+    bool is_candidate = !g1h->humongous_region_is_always_live(region_idx);
+    // Is_candidate already filters out humongous regions with some remembered set.
+    // This will not lead to humongous object that we mistakenly keep alive because
+    // during young collection the remembered sets will only be added to.
+    if (is_candidate) {
+      g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
+      _candidate_humongous++;
+    }
+    _total_humongous++;
+
+    return false;
+  }
+
+  size_t total_humongous() const { return _total_humongous; }
+  size_t candidate_humongous() const { return _candidate_humongous; }
+};
+
+void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
+  if (!G1ReclaimDeadHumongousObjectsAtYoungGC) {
+    g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0, 0);
+    return;
+  }
+
+  RegisterHumongousWithInCSetFastTestClosure cl;
+  heap_region_iterate(&cl);
+  g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(cl.total_humongous(),
+                                                                  cl.candidate_humongous());
+  _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
+
+  if (_has_humongous_reclaim_candidates) {
+    clear_humongous_is_live_table();
+  }
+}
+
 void
 G1CollectedHeap::setup_surviving_young_words() {
   assert(_surviving_young_words == NULL, "pre-condition");
@@ -3903,8 +3815,7 @@
     return;
   }
 
-  gclog_or_tty->date_stamp(PrintGCDateStamps);
-  gclog_or_tty->stamp(PrintGCTimeStamps);
+  gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
 
   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
     .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
@@ -4025,6 +3936,7 @@
       increment_gc_time_stamp();
 
       verify_before_gc();
+      check_bitmaps("GC Start");
 
       COMPILER2_PRESENT(DerivedPointerTable::clear());
 
@@ -4045,7 +3957,7 @@
 
         // Forget the current alloc region (we might even choose it to be part
         // of the collection set!).
-        release_mutator_alloc_region();
+        _allocator->release_mutator_alloc_region();
 
         // We should call this after we retire the mutator alloc
         // region(s) so that all the ALLOC / RETIRE events are generated
@@ -4100,6 +4012,8 @@
 
         g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
 
+        register_humongous_regions_with_in_cset_fast_test();
+
         _cm->note_start_of_gc();
         // We should not verify the per-thread SATB buffers given that
         // we have not filtered them yet (we'll do so during the
@@ -4113,14 +4027,6 @@
         if (_hr_printer.is_active()) {
           HeapRegion* hr = g1_policy()->collection_set();
           while (hr != NULL) {
-            G1HRPrinter::RegionType type;
-            if (!hr->is_young()) {
-              type = G1HRPrinter::Old;
-            } else if (hr->is_survivor()) {
-              type = G1HRPrinter::Survivor;
-            } else {
-              type = G1HRPrinter::Eden;
-            }
             _hr_printer.cset(hr);
             hr = hr->next_in_collection_set();
           }
@@ -4134,7 +4040,7 @@
         setup_surviving_young_words();
 
         // Initialize the GC alloc regions.
-        init_gc_alloc_regions(evacuation_info);
+        _allocator->init_gc_alloc_regions(evacuation_info);
 
         // Actually do the work...
         evacuate_collection_set(evacuation_info);
@@ -4150,6 +4056,9 @@
                                  true  /* verify_fingers */);
 
         free_collection_set(g1_policy()->collection_set(), evacuation_info);
+
+        eagerly_reclaim_humongous_regions();
+
         g1_policy()->clear_collection_set();
 
         cleanup_surviving_young_words();
@@ -4157,9 +4066,6 @@
         // Start a new incremental collection set for the next pause.
         g1_policy()->start_incremental_cset_building();
 
-        // Clear the _cset_fast_test bitmap in anticipation of adding
-        // regions to the incremental collection set for the next
-        // evacuation pause.
         clear_cset_fast_test();
 
         _young_list->reset_sampled_info();
@@ -4183,7 +4089,7 @@
         _young_list->reset_auxilary_lists();
 
         if (evacuation_failed()) {
-          _summary_bytes_used = recalculate_used();
+          _allocator->set_used(recalculate_used());
           uint n_queues = MAX2((int)ParallelGCThreads, 1);
           for (uint i = 0; i < n_queues; i++) {
             if (_evacuation_failed_info_array[i].has_failed()) {
@@ -4193,7 +4099,7 @@
         } else {
           // The "used" of the the collection set have already been subtracted
           // when they were freed.  Add in the bytes evacuated.
-          _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
+          _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
         }
 
         if (g1_policy()->during_initial_mark_pause()) {
@@ -4215,7 +4121,7 @@
         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-        init_mutator_alloc_region();
+        _allocator->init_mutator_alloc_region();
 
         {
           size_t expand_bytes = g1_policy()->expansion_amount();
@@ -4224,10 +4130,7 @@
             // No need for an ergo verbose message here,
             // expansion_amount() does this when it returns a value > 0.
             if (!expand(expand_bytes)) {
-              // We failed to expand the heap so let's verify that
-              // committed/uncommitted amount match the backing store
-              assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
-              assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
+              // We failed to expand the heap. Cannot do anything about it.
             }
           }
         }
@@ -4273,6 +4176,7 @@
         increment_gc_time_stamp();
 
         verify_after_gc();
+        check_bitmaps("GC End");
 
         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
         ref_processor_stw()->verify_no_references_recorded();
@@ -4286,10 +4190,6 @@
       // RETIRE events are generated before the end GC event.
       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
 
-      if (mark_in_progress()) {
-        concurrent_mark()->update_g1_committed();
-      }
-
 #ifdef TRACESPINNING
       ParallelTaskTerminator::print_termination_counts();
 #endif
@@ -4305,7 +4205,7 @@
     // output from the concurrent mark thread interfering with this
     // logging output either.
 
-    _hrs.verify_optional();
+    _hrm.verify_optional();
     verify_region_sets_optional();
 
     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
@@ -4336,7 +4236,7 @@
     // this point does not assume that we are the only GC thread
     // running. Note: of course, the actual marking work will
     // not start until the safepoint itself is released in
-    // ConcurrentGCThread::safepoint_desynchronize().
+    // SuspendibleThreadSet::desynchronize().
     doConcurrentMark();
   }
 
@@ -4366,75 +4266,6 @@
   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
 }
 
-void G1CollectedHeap::init_mutator_alloc_region() {
-  assert(_mutator_alloc_region.get() == NULL, "pre-condition");
-  _mutator_alloc_region.init();
-}
-
-void G1CollectedHeap::release_mutator_alloc_region() {
-  _mutator_alloc_region.release();
-  assert(_mutator_alloc_region.get() == NULL, "post-condition");
-}
-
-void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
-  assert_at_safepoint(true /* should_be_vm_thread */);
-
-  _survivor_gc_alloc_region.init();
-  _old_gc_alloc_region.init();
-  HeapRegion* retained_region = _retained_old_gc_alloc_region;
-  _retained_old_gc_alloc_region = NULL;
-
-  // We will discard the current GC alloc region if:
-  // a) it's in the collection set (it can happen!),
-  // b) it's already full (no point in using it),
-  // c) it's empty (this means that it was emptied during
-  // a cleanup and it should be on the free list now), or
-  // d) it's humongous (this means that it was emptied
-  // during a cleanup and was added to the free list, but
-  // has been subsequently used to allocate a humongous
-  // object that may be less than the region size).
-  if (retained_region != NULL &&
-      !retained_region->in_collection_set() &&
-      !(retained_region->top() == retained_region->end()) &&
-      !retained_region->is_empty() &&
-      !retained_region->isHumongous()) {
-    retained_region->set_saved_mark();
-    // The retained region was added to the old region set when it was
-    // retired. We have to remove it now, since we don't allow regions
-    // we allocate to in the region sets. We'll re-add it later, when
-    // it's retired again.
-    _old_set.remove(retained_region);
-    bool during_im = g1_policy()->during_initial_mark_pause();
-    retained_region->note_start_of_copying(during_im);
-    _old_gc_alloc_region.set(retained_region);
-    _hr_printer.reuse(retained_region);
-    evacuation_info.set_alloc_regions_used_before(retained_region->used());
-  }
-}
-
-void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
-  evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
-                                         _old_gc_alloc_region.count());
-  _survivor_gc_alloc_region.release();
-  // If we have an old GC alloc region to release, we'll save it in
-  // _retained_old_gc_alloc_region. If we don't
-  // _retained_old_gc_alloc_region will become NULL. This is what we
-  // want either way so no reason to check explicitly for either
-  // condition.
-  _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
-
-  if (ResizePLAB) {
-    _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
-    _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
-  }
-}
-
-void G1CollectedHeap::abandon_gc_alloc_regions() {
-  assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
-  assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
-  _retained_old_gc_alloc_region = NULL;
-}
-
 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
   _drain_in_progress = false;
   set_evac_failure_closure(cl);
@@ -4575,25 +4406,26 @@
 }
 
 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
-                                                  size_t word_size) {
+                                                  size_t word_size,
+                                                  AllocationContext_t context) {
   if (purpose == GCAllocForSurvived) {
-    HeapWord* result = survivor_attempt_allocation(word_size);
+    HeapWord* result = survivor_attempt_allocation(word_size, context);
     if (result != NULL) {
       return result;
     } else {
       // Let's try to allocate in the old gen in case we can fit the
       // object there.
-      return old_attempt_allocation(word_size);
+      return old_attempt_allocation(word_size, context);
     }
   } else {
     assert(purpose ==  GCAllocForTenured, "sanity");
-    HeapWord* result = old_attempt_allocation(word_size);
+    HeapWord* result = old_attempt_allocation(word_size, context);
     if (result != NULL) {
       return result;
     } else {
       // Let's try to allocate in the survivors in case we can fit the
       // object there.
-      return survivor_attempt_allocation(word_size);
+      return survivor_attempt_allocation(word_size, context);
     }
   }
 
@@ -4602,154 +4434,20 @@
   return NULL;
 }
 
-G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
-  ParGCAllocBuffer(gclab_word_size), _retired(false) { }
-
-G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
-  : _g1h(g1h),
-    _refs(g1h->task_queue(queue_num)),
-    _dcq(&g1h->dirty_card_queue_set()),
-    _ct_bs(g1h->g1_barrier_set()),
-    _g1_rem(g1h->g1_rem_set()),
-    _hash_seed(17), _queue_num(queue_num),
-    _term_attempts(0),
-    _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
-    _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
-    _age_table(false), _scanner(g1h, this, rp),
-    _strong_roots_time(0), _term_time(0),
-    _alloc_buffer_waste(0), _undo_waste(0) {
-  // we allocate G1YoungSurvRateNumRegions plus one entries, since
-  // we "sacrifice" entry 0 to keep track of surviving bytes for
-  // non-young regions (where the age is -1)
-  // We also add a few elements at the beginning and at the end in
-  // an attempt to eliminate cache contention
-  uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
-  uint array_length = PADDING_ELEM_NUM +
-                      real_length +
-                      PADDING_ELEM_NUM;
-  _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
-  if (_surviving_young_words_base == NULL)
-    vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
-                          "Not enough space for young surv histo.");
-  _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
-  memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
-
-  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
-  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
-
-  _start = os::elapsedTime();
-}
-
-void
-G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
-{
-  st->print_raw_cr("GC Termination Stats");
-  st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
-                   " ------waste (KiB)------");
-  st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
-                   "  total   alloc    undo");
-  st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
-                   " ------- ------- -------");
-}
-
-void
-G1ParScanThreadState::print_termination_stats(int i,
-                                              outputStream* const st) const
-{
-  const double elapsed_ms = elapsed_time() * 1000.0;
-  const double s_roots_ms = strong_roots_time() * 1000.0;
-  const double term_ms    = term_time() * 1000.0;
-  st->print_cr("%3d %9.2f %9.2f %6.2f "
-               "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
-               SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
-               i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
-               term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
-               (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
-               alloc_buffer_waste() * HeapWordSize / K,
-               undo_waste() * HeapWordSize / K);
-}
-
-#ifdef ASSERT
-bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
-  assert(ref != NULL, "invariant");
-  assert(UseCompressedOops, "sanity");
-  assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
-  oop p = oopDesc::load_decode_heap_oop(ref);
-  assert(_g1h->is_in_g1_reserved(p),
-         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
-  return true;
-}
-
-bool G1ParScanThreadState::verify_ref(oop* ref) const {
-  assert(ref != NULL, "invariant");
-  if (has_partial_array_mask(ref)) {
-    // Must be in the collection set--it's already been copied.
-    oop p = clear_partial_array_mask(ref);
-    assert(_g1h->obj_in_cs(p),
-           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
-  } else {
-    oop p = oopDesc::load_decode_heap_oop(ref);
-    assert(_g1h->is_in_g1_reserved(p),
-           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
-  }
-  return true;
-}
-
-bool G1ParScanThreadState::verify_task(StarTask ref) const {
-  if (ref.is_narrow()) {
-    return verify_ref((narrowOop*) ref);
-  } else {
-    return verify_ref((oop*) ref);
-  }
-}
-#endif // ASSERT
-
-void G1ParScanThreadState::trim_queue() {
-  assert(_evac_failure_cl != NULL, "not set");
-
-  StarTask ref;
-  do {
-    // Drain the overflow stack first, so other threads can steal.
-    while (refs()->pop_overflow(ref)) {
-      deal_with_reference(ref);
-    }
-
-    while (refs()->pop_local(ref)) {
-      deal_with_reference(ref);
-    }
-  } while (!refs()->is_empty());
-}
-
-G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
-                                     G1ParScanThreadState* par_scan_state) :
-  _g1(g1), _par_scan_state(par_scan_state),
-  _worker_id(par_scan_state->queue_num()) { }
-
 void G1ParCopyHelper::mark_object(oop obj) {
-#ifdef ASSERT
-  HeapRegion* hr = _g1->heap_region_containing(obj);
-  assert(hr != NULL, "sanity");
-  assert(!hr->in_collection_set(), "should not mark objects in the CSet");
-#endif // ASSERT
+  assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
 
   // We know that the object is not moving so it's safe to read its size.
   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
 }
 
 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
-#ifdef ASSERT
   assert(from_obj->is_forwarded(), "from obj should be forwarded");
   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
   assert(from_obj != to_obj, "should not be self-forwarded");
 
-  HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
-  assert(from_hr != NULL, "sanity");
-  assert(from_hr->in_collection_set(), "from obj should be in the CSet");
-
-  HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
-  assert(to_hr != NULL, "sanity");
-  assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
-#endif // ASSERT
+  assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
+  assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
 
   // The object might be in the process of being copied by another
   // worker so we cannot trust that its to-space image is
@@ -4758,107 +4456,6 @@
   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
 }
 
-oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
-  size_t word_sz = old->size();
-  HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
-  // +1 to make the -1 indexes valid...
-  int       young_index = from_region->young_index_in_cset()+1;
-  assert( (from_region->is_young() && young_index >  0) ||
-         (!from_region->is_young() && young_index == 0), "invariant" );
-  G1CollectorPolicy* g1p = _g1h->g1_policy();
-  markOop m = old->mark();
-  int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
-                                           : m->age();
-  GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
-                                                             word_sz);
-  HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
-#ifndef PRODUCT
-  // Should this evacuation fail?
-  if (_g1h->evacuation_should_fail()) {
-    if (obj_ptr != NULL) {
-      undo_allocation(alloc_purpose, obj_ptr, word_sz);
-      obj_ptr = NULL;
-    }
-  }
-#endif // !PRODUCT
-
-  if (obj_ptr == NULL) {
-    // This will either forward-to-self, or detect that someone else has
-    // installed a forwarding pointer.
-    return _g1h->handle_evacuation_failure_par(this, old);
-  }
-
-  oop obj = oop(obj_ptr);
-
-  // We're going to allocate linearly, so might as well prefetch ahead.
-  Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
-
-  oop forward_ptr = old->forward_to_atomic(obj);
-  if (forward_ptr == NULL) {
-    Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
-
-    // alloc_purpose is just a hint to allocate() above, recheck the type of region
-    // we actually allocated from and update alloc_purpose accordingly
-    HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
-    alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
-
-    if (g1p->track_object_age(alloc_purpose)) {
-      // We could simply do obj->incr_age(). However, this causes a
-      // performance issue. obj->incr_age() will first check whether
-      // the object has a displaced mark by checking its mark word;
-      // getting the mark word from the new location of the object
-      // stalls. So, given that we already have the mark word and we
-      // are about to install it anyway, it's better to increase the
-      // age on the mark word, when the object does not have a
-      // displaced mark word. We're not expecting many objects to have
-      // a displaced marked word, so that case is not optimized
-      // further (it could be...) and we simply call obj->incr_age().
-
-      if (m->has_displaced_mark_helper()) {
-        // in this case, we have to install the mark word first,
-        // otherwise obj looks to be forwarded (the old mark word,
-        // which contains the forward pointer, was copied)
-        obj->set_mark(m);
-        obj->incr_age();
-      } else {
-        m = m->incr_age();
-        obj->set_mark(m);
-      }
-      age_table()->add(obj, word_sz);
-    } else {
-      obj->set_mark(m);
-    }
-
-    if (G1StringDedup::is_enabled()) {
-      G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
-                                             to_region->is_young(),
-                                             queue_num(),
-                                             obj);
-    }
-
-    size_t* surv_young_words = surviving_young_words();
-    surv_young_words[young_index] += word_sz;
-
-    if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
-      // We keep track of the next start index in the length field of
-      // the to-space object. The actual length can be found in the
-      // length field of the from-space object.
-      arrayOop(obj)->set_length(0);
-      oop* old_p = set_partial_array_mask(old);
-      push_on_queue(old_p);
-    } else {
-      // No point in using the slower heap_region_containing() method,
-      // given that we know obj is in the heap.
-      _scanner.set_region(_g1h->heap_region_containing_raw(obj));
-      obj->oop_iterate_backwards(&_scanner);
-    }
-  } else {
-    undo_allocation(alloc_purpose, obj_ptr, word_sz);
-    obj = forward_ptr;
-  }
-  return obj;
-}
-
 template <class T>
 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
@@ -4866,7 +4463,7 @@
   }
 }
 
-template <G1Barrier barrier, bool do_mark_object>
+template <G1Barrier barrier, G1Mark do_mark_object>
 template <class T>
 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
@@ -4879,7 +4476,9 @@
 
   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
 
-  if (_g1->in_cset_fast_test(obj)) {
+  G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
+
+  if (state == G1CollectedHeap::InCSet) {
     oop forwardee;
     if (obj->is_forwarded()) {
       forwardee = obj->forwardee();
@@ -4888,7 +4487,7 @@
     }
     assert(forwardee != NULL, "forwardee should not be NULL");
     oopDesc::encode_store_heap_oop(p, forwardee);
-    if (do_mark_object && forwardee != obj) {
+    if (do_mark_object != G1MarkNone && forwardee != obj) {
       // If the object is self-forwarded we don't need to explicitly
       // mark it, the evacuation failure protocol will do so.
       mark_forwarded_object(obj, forwardee);
@@ -4898,10 +4497,12 @@
       do_klass_barrier(p, forwardee);
     }
   } else {
+    if (state == G1CollectedHeap::IsHumongous) {
+      _g1->set_humongous_is_live(obj);
+    }
     // The object is not in collection set. If we're a root scanning
-    // closure during an initial mark pause (i.e. do_mark_object will
-    // be true) then attempt to mark the object.
-    if (do_mark_object) {
+    // closure during an initial mark pause then attempt to mark the object.
+    if (do_mark_object == G1MarkFromRoot) {
       mark_object(obj);
     }
   }
@@ -4911,8 +4512,8 @@
   }
 }
 
-template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
-template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
+template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
+template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
 
 class G1ParEvacuateFollowersClosure : public VoidClosure {
 protected:
@@ -4948,27 +4549,11 @@
 }
 
 void G1ParEvacuateFollowersClosure::do_void() {
-  StarTask stolen_task;
   G1ParScanThreadState* const pss = par_scan_state();
   pss->trim_queue();
-
   do {
-    while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
-      assert(pss->verify_task(stolen_task), "sanity");
-      if (stolen_task.is_narrow()) {
-        pss->deal_with_reference((narrowOop*) stolen_task);
-      } else {
-        pss->deal_with_reference((oop*) stolen_task);
-      }
-
-      // We've just processed a reference and we might have made
-      // available new entries on the queues. So we have to make sure
-      // we drain the queues as necessary.
-      pss->trim_queue();
-    }
+    pss->steal_and_trim_queue(queues());
   } while (!offer_termination());
-
-  pss->retire_alloc_buffers();
 }
 
 class G1KlassScanClosure : public KlassClosure {
@@ -4997,6 +4582,56 @@
   }
 };
 
+class G1CodeBlobClosure : public CodeBlobClosure {
+  class HeapRegionGatheringOopClosure : public OopClosure {
+    G1CollectedHeap* _g1h;
+    OopClosure* _work;
+    nmethod* _nm;
+
+    template <typename T>
+    void do_oop_work(T* p) {
+      _work->do_oop(p);
+      T oop_or_narrowoop = oopDesc::load_heap_oop(p);
+      if (!oopDesc::is_null(oop_or_narrowoop)) {
+        oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
+        HeapRegion* hr = _g1h->heap_region_containing_raw(o);
+        assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
+        hr->add_strong_code_root(_nm);
+      }
+    }
+
+  public:
+    HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
+
+    void do_oop(oop* o) {
+      do_oop_work(o);
+    }
+
+    void do_oop(narrowOop* o) {
+      do_oop_work(o);
+    }
+
+    void set_nm(nmethod* nm) {
+      _nm = nm;
+    }
+  };
+
+  HeapRegionGatheringOopClosure _oc;
+public:
+  G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
+
+  void do_code_blob(CodeBlob* cb) {
+    nmethod* nm = cb->as_nmethod_or_null();
+    if (nm != NULL) {
+      if (!nm->test_set_oops_do_mark()) {
+        _oc.set_nm(nm);
+        nm->oops_do(&_oc);
+        nm->fix_oop_relocations();
+      }
+    }
+  }
+};
+
 class G1ParTask : public AbstractGangTask {
 protected:
   G1CollectedHeap*       _g1h;
@@ -5007,14 +4642,8 @@
   Mutex _stats_lock;
   Mutex* stats_lock() { return &_stats_lock; }
 
-  size_t getNCards() {
-    return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
-      / G1BlockOffsetSharedArray::N_bytes;
-  }
-
 public:
-  G1ParTask(G1CollectedHeap* g1h,
-            RefToScanQueueSet *task_queues)
+  G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
     : AbstractGangTask("G1 collection"),
       _g1h(g1h),
       _queues(task_queues),
@@ -5042,6 +4671,35 @@
     _n_workers = active_workers;
   }
 
+  // Helps out with CLD processing.
+  //
+  // During InitialMark we need to:
+  // 1) Scavenge all CLDs for the young GC.
+  // 2) Mark all objects directly reachable from strong CLDs.
+  template <G1Mark do_mark_object>
+  class G1CLDClosure : public CLDClosure {
+    G1ParCopyClosure<G1BarrierNone,  do_mark_object>* _oop_closure;
+    G1ParCopyClosure<G1BarrierKlass, do_mark_object>  _oop_in_klass_closure;
+    G1KlassScanClosure                                _klass_in_cld_closure;
+    bool                                              _claim;
+
+   public:
+    G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
+                 bool only_young, bool claim)
+        : _oop_closure(oop_closure),
+          _oop_in_klass_closure(oop_closure->g1(),
+                                oop_closure->pss(),
+                                oop_closure->rp()),
+          _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
+          _claim(claim) {
+
+    }
+
+    void do_cld(ClassLoaderData* cld) {
+      cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
+    }
+  };
+
   void work(uint worker_id) {
     if (worker_id >= _n_workers) return;  // no work needed this round
 
@@ -5059,40 +4717,67 @@
 
       pss.set_evac_failure_closure(&evac_failure_cl);
 
-      G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
-      G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
-
-      G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
-      G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
-
-      bool only_young                 = _g1h->g1_policy()->gcs_are_young();
-      G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
-      G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
-
-      OopClosure*                    scan_root_cl = &only_scan_root_cl;
-      G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
+      bool only_young = _g1h->g1_policy()->gcs_are_young();
+
+      // Non-IM young GC.
+      G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
+      G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
+                                                                               only_young, // Only process dirty klasses.
+                                                                               false);     // No need to claim CLDs.
+      // IM young GC.
+      //    Strong roots closures.
+      G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, &pss, rp);
+      G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
+                                                                               false, // Process all klasses.
+                                                                               true); // Need to claim CLDs.
+      //    Weak roots closures.
+      G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
+      G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
+                                                                                    false, // Process all klasses.
+                                                                                    true); // Need to claim CLDs.
+
+      G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
+      G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
+      // IM Weak code roots are handled later.
+
+      OopClosure* strong_root_cl;
+      OopClosure* weak_root_cl;
+      CLDClosure* strong_cld_cl;
+      CLDClosure* weak_cld_cl;
+      CodeBlobClosure* strong_code_cl;
 
       if (_g1h->g1_policy()->during_initial_mark_pause()) {
         // We also need to mark copied objects.
-        scan_root_cl = &scan_mark_root_cl;
-        scan_klasses_cl = &scan_mark_klasses_cl_s;
+        strong_root_cl = &scan_mark_root_cl;
+        strong_cld_cl  = &scan_mark_cld_cl;
+        strong_code_cl = &scan_mark_code_cl;
+        if (ClassUnloadingWithConcurrentMark) {
+          weak_root_cl = &scan_mark_weak_root_cl;
+          weak_cld_cl  = &scan_mark_weak_cld_cl;
+        } else {
+          weak_root_cl = &scan_mark_root_cl;
+          weak_cld_cl  = &scan_mark_cld_cl;
+        }
+      } else {
+        strong_root_cl = &scan_only_root_cl;
+        weak_root_cl   = &scan_only_root_cl;
+        strong_cld_cl  = &scan_only_cld_cl;
+        weak_cld_cl    = &scan_only_cld_cl;
+        strong_code_cl = &scan_only_code_cl;
       }
 
-      G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
-
-      // Don't scan the scavengable methods in the code cache as part
-      // of strong root scanning. The code roots that point into a
-      // region in the collection set are scanned when we scan the
-      // region's RSet.
-      int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
+
+      G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
 
       pss.start_strong_roots();
-      _g1h->g1_process_strong_roots(/* is scavenging */ true,
-                                    SharedHeap::ScanningOption(so),
-                                    scan_root_cl,
-                                    &push_heap_rs_cl,
-                                    scan_klasses_cl,
-                                    worker_id);
+      _g1h->g1_process_roots(strong_root_cl,
+                             weak_root_cl,
+                             &push_heap_rs_cl,
+                             strong_cld_cl,
+                             weak_cld_cl,
+                             strong_code_cl,
+                             worker_id);
+
       pss.end_strong_roots();
 
       {
@@ -5112,7 +4797,7 @@
         pss.print_termination_stats(worker_id);
       }
 
-      assert(pss.refs()->is_empty(), "should be empty");
+      assert(pss.queue_is_empty(), "should be empty");
 
       // Close the inner scope so that the ResourceMark and HandleMark
       // destructors are executed here and are included as part of the
@@ -5130,30 +4815,32 @@
 
 void
 G1CollectedHeap::
-g1_process_strong_roots(bool is_scavenging,
-                        ScanningOption so,
-                        OopClosure* scan_non_heap_roots,
-                        OopsInHeapRegionClosure* scan_rs,
-                        G1KlassScanClosure* scan_klasses,
-                        uint worker_i) {
-
-  // First scan the strong roots
+g1_process_roots(OopClosure* scan_non_heap_roots,
+                 OopClosure* scan_non_heap_weak_roots,
+                 OopsInHeapRegionClosure* scan_rs,
+                 CLDClosure* scan_strong_clds,
+                 CLDClosure* scan_weak_clds,
+                 CodeBlobClosure* scan_strong_code,
+                 uint worker_i) {
+
+  // First scan the shared roots.
   double ext_roots_start = os::elapsedTime();
   double closure_app_time_sec = 0.0;
 
+  bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
+  bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
+
   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
-
-  assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
-  // Walk the code cache/strong code roots w/o buffering, because StarTask
-  // cannot handle unaligned oop locations.
-  CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
-
-  process_strong_roots(false, // no scoping; this is parallel code
-                       is_scavenging, so,
-                       &buf_scan_non_heap_roots,
-                       &eager_scan_code_roots,
-                       scan_klasses
-                       );
+  BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
+
+  process_roots(false, // no scoping; this is parallel code
+                SharedHeap::SO_None,
+                &buf_scan_non_heap_roots,
+                &buf_scan_non_heap_weak_roots,
+                scan_strong_clds,
+                // Unloading Initial Marks handle the weak CLDs separately.
+                (trace_metadata ? NULL : scan_weak_clds),
+                scan_strong_code);
 
   // Now the CM ref_processor roots.
   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
@@ -5164,10 +4851,21 @@
     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
   }
 
+  if (trace_metadata) {
+    // Barrier to make sure all workers passed
+    // the strong CLD and strong nmethods phases.
+    active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
+
+    // Now take the complement of the strong CLDs.
+    ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
+  }
+
   // Finish up any enqueued closure apps (attributed as object copy time).
   buf_scan_non_heap_roots.done();
-
-  double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
+  buf_scan_non_heap_weak_roots.done();
+
+  double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
+      + buf_scan_non_heap_weak_roots.closure_app_seconds();
 
   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
 
@@ -5191,32 +4889,14 @@
   }
   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
 
-  // If this is an initial mark pause, and we're not scanning
-  // the entire code cache, we need to mark the oops in the
-  // strong code root lists for the regions that are not in
-  // the collection set.
-  // Note all threads participate in this set of root tasks.
-  double mark_strong_code_roots_ms = 0.0;
-  if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
-    double mark_strong_roots_start = os::elapsedTime();
-    mark_strong_code_roots(worker_i);
-    mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
-  }
-  g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
-
   // Now scan the complement of the collection set.
-  if (scan_rs != NULL) {
-    g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
-  }
+  G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
+
+  g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
+
   _process_strong_tasks->all_tasks_completed();
 }
 
-void
-G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
-  CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
-  SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
-}
-
 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
 private:
   BoolObjectClosure* _is_alive;
@@ -5234,7 +4914,8 @@
   bool _do_in_parallel;
 public:
   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
-    AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
+    AbstractGangTask("String/Symbol Unlinking"),
+    _is_alive(is_alive),
     _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
@@ -5256,6 +4937,14 @@
     guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
               err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
+
+    if (G1TraceStringSymbolTableScrubbing) {
+      gclog_or_tty->print_cr("Cleaned string and symbol table, "
+                             "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
+                             "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
+                             strings_processed(), strings_removed(),
+                             symbols_processed(), symbols_removed());
+    }
   }
 
   void work(uint worker_id) {
@@ -5291,12 +4980,300 @@
   size_t symbols_removed()   const { return (size_t)_symbols_removed; }
 };
 
-void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
-                                                     bool process_strings, bool process_symbols) {
+class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
+private:
+  static Monitor* _lock;
+
+  BoolObjectClosure* const _is_alive;
+  const bool               _unloading_occurred;
+  const uint               _num_workers;
+
+  // Variables used to claim nmethods.
+  nmethod* _first_nmethod;
+  volatile nmethod* _claimed_nmethod;
+
+  // The list of nmethods that need to be processed by the second pass.
+  volatile nmethod* _postponed_list;
+  volatile uint     _num_entered_barrier;
+
+ public:
+  G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
+      _is_alive(is_alive),
+      _unloading_occurred(unloading_occurred),
+      _num_workers(num_workers),
+      _first_nmethod(NULL),
+      _claimed_nmethod(NULL),
+      _postponed_list(NULL),
+      _num_entered_barrier(0)
+  {
+    nmethod::increase_unloading_clock();
+    _first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
+    _claimed_nmethod = (volatile nmethod*)_first_nmethod;
+  }
+
+  ~G1CodeCacheUnloadingTask() {
+    CodeCache::verify_clean_inline_caches();
+
+    CodeCache::set_needs_cache_clean(false);
+    guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
+
+    CodeCache::verify_icholder_relocations();
+  }
+
+ private:
+  void add_to_postponed_list(nmethod* nm) {
+      nmethod* old;
+      do {
+        old = (nmethod*)_postponed_list;
+        nm->set_unloading_next(old);
+      } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
+  }
+
+  void clean_nmethod(nmethod* nm) {
+    bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
+
+    if (postponed) {
+      // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
+      add_to_postponed_list(nm);
+    }
+
+    // Mark that this thread has been cleaned/unloaded.
+    // After this call, it will be safe to ask if this nmethod was unloaded or not.
+    nm->set_unloading_clock(nmethod::global_unloading_clock());
+  }
+
+  void clean_nmethod_postponed(nmethod* nm) {
+    nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
+  }
+
+  static const int MaxClaimNmethods = 16;
+
+  void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
+    nmethod* first;
+    nmethod* last;
+
+    do {
+      *num_claimed_nmethods = 0;
+
+      first = last = (nmethod*)_claimed_nmethod;
+
+      if (first != NULL) {
+        for (int i = 0; i < MaxClaimNmethods; i++) {
+          last = CodeCache::alive_nmethod(CodeCache::next(last));
+
+          if (last == NULL) {
+            break;
+          }
+
+          claimed_nmethods[i] = last;
+          (*num_claimed_nmethods)++;
+        }
+      }
+
+    } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
+  }
+
+  nmethod* claim_postponed_nmethod() {
+    nmethod* claim;
+    nmethod* next;
+
+    do {
+      claim = (nmethod*)_postponed_list;
+      if (claim == NULL) {
+        return NULL;
+      }
+
+      next = claim->unloading_next();
+
+    } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
+
+    return claim;
+  }
+
+ public:
+  // Mark that we're done with the first pass of nmethod cleaning.
+  void barrier_mark(uint worker_id) {
+    MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
+    _num_entered_barrier++;
+    if (_num_entered_barrier == _num_workers) {
+      ml.notify_all();
+    }
+  }
+
+  // See if we have to wait for the other workers to
+  // finish their first-pass nmethod cleaning work.
+  void barrier_wait(uint worker_id) {
+    if (_num_entered_barrier < _num_workers) {
+      MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
+      while (_num_entered_barrier < _num_workers) {
+          ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
+      }
+    }
+  }
+
+  // Cleaning and unloading of nmethods. Some work has to be postponed
+  // to the second pass, when we know which nmethods survive.
+  void work_first_pass(uint worker_id) {
+    // The first nmethods is claimed by the first worker.
+    if (worker_id == 0 && _first_nmethod != NULL) {
+      clean_nmethod(_first_nmethod);
+      _first_nmethod = NULL;
+    }
+
+    int num_claimed_nmethods;
+    nmethod* claimed_nmethods[MaxClaimNmethods];
+
+    while (true) {
+      claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
+
+      if (num_claimed_nmethods == 0) {
+        break;
+      }
+
+      for (int i = 0; i < num_claimed_nmethods; i++) {
+        clean_nmethod(claimed_nmethods[i]);
+      }
+    }
+
+    // The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark.
+    // Need to retire the buffers now that this thread has stopped cleaning nmethods.
+    MetadataOnStackMark::retire_buffer_for_thread(Thread::current());
+  }
+
+  void work_second_pass(uint worker_id) {
+    nmethod* nm;
+    // Take care of postponed nmethods.
+    while ((nm = claim_postponed_nmethod()) != NULL) {
+      clean_nmethod_postponed(nm);
+    }
+  }
+};
+
+Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
+
+class G1KlassCleaningTask : public StackObj {
+  BoolObjectClosure*                      _is_alive;
+  volatile jint                           _clean_klass_tree_claimed;
+  ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
+
+ public:
+  G1KlassCleaningTask(BoolObjectClosure* is_alive) :
+      _is_alive(is_alive),
+      _clean_klass_tree_claimed(0),
+      _klass_iterator() {
+  }
+
+ private:
+  bool claim_clean_klass_tree_task() {
+    if (_clean_klass_tree_claimed) {
+      return false;
+    }
+
+    return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
+  }
+
+  InstanceKlass* claim_next_klass() {
+    Klass* klass;
+    do {
+      klass =_klass_iterator.next_klass();
+    } while (klass != NULL && !klass->oop_is_instance());
+
+    return (InstanceKlass*)klass;
+  }
+
+public:
+
+  void clean_klass(InstanceKlass* ik) {
+    ik->clean_implementors_list(_is_alive);
+    ik->clean_method_data(_is_alive);
+
+    // G1 specific cleanup work that has
+    // been moved here to be done in parallel.
+    ik->clean_dependent_nmethods();
+    if (JvmtiExport::has_redefined_a_class()) {
+      InstanceKlass::purge_previous_versions(ik);
+    }
+  }
+
+  void work() {
+    ResourceMark rm;
+
+    // One worker will clean the subklass/sibling klass tree.
+    if (claim_clean_klass_tree_task()) {
+      Klass::clean_subklass_tree(_is_alive);
+    }
+
+    // All workers will help cleaning the classes,
+    InstanceKlass* klass;
+    while ((klass = claim_next_klass()) != NULL) {
+      clean_klass(klass);
+    }
+  }
+};
+
+// To minimize the remark pause times, the tasks below are done in parallel.
+class G1ParallelCleaningTask : public AbstractGangTask {
+private:
+  G1StringSymbolTableUnlinkTask _string_symbol_task;
+  G1CodeCacheUnloadingTask      _code_cache_task;
+  G1KlassCleaningTask           _klass_cleaning_task;
+
+public:
+  // The constructor is run in the VMThread.
+  G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
+      AbstractGangTask("Parallel Cleaning"),
+      _string_symbol_task(is_alive, process_strings, process_symbols),
+      _code_cache_task(num_workers, is_alive, unloading_occurred),
+      _klass_cleaning_task(is_alive) {
+  }
+
+  void pre_work_verification() {
+    // The VM Thread will have registered Metadata during the single-threaded phase of MetadataStackOnMark.
+    assert(Thread::current()->is_VM_thread()
+           || !MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
+  }
+
+  void post_work_verification() {
+    assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
+  }
+
+  // The parallel work done by all worker threads.
+  void work(uint worker_id) {
+    pre_work_verification();
+
+    // Do first pass of code cache cleaning.
+    _code_cache_task.work_first_pass(worker_id);
+
+    // Let the threads mark that the first pass is done.
+    _code_cache_task.barrier_mark(worker_id);
+
+    // Clean the Strings and Symbols.
+    _string_symbol_task.work(worker_id);
+
+    // Wait for all workers to finish the first code cache cleaning pass.
+    _code_cache_task.barrier_wait(worker_id);
+
+    // Do the second code cache cleaning work, which realize on
+    // the liveness information gathered during the first pass.
+    _code_cache_task.work_second_pass(worker_id);
+
+    // Clean all klasses that were not unloaded.
+    _klass_cleaning_task.work();
+
+    post_work_verification();
+  }
+};
+
+
+void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
+                                        bool process_strings,
+                                        bool process_symbols,
+                                        bool class_unloading_occurred) {
   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
-                   _g1h->workers()->active_workers() : 1);
-
-  G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
+                    workers()->active_workers() : 1);
+
+  G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
+                                        n_workers, class_unloading_occurred);
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     set_par_threads(n_workers);
     workers()->run_task(&g1_unlink_task);
@@ -5304,12 +5281,21 @@
   } else {
     g1_unlink_task.work(0);
   }
-  if (G1TraceStringSymbolTableScrubbing) {
-    gclog_or_tty->print_cr("Cleaned string and symbol table, "
-                           "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
-                           "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
-                           g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
-                           g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
+}
+
+void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
+                                                     bool process_strings, bool process_symbols) {
+  {
+    uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+                     _g1h->workers()->active_workers() : 1);
+    G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
+    if (G1CollectedHeap::use_parallel_gc_threads()) {
+      set_par_threads(n_workers);
+      workers()->run_task(&g1_unlink_task);
+      set_par_threads(0);
+    } else {
+      g1_unlink_task.work(0);
+    }
   }
 
   if (G1StringDedup::is_enabled()) {
@@ -5317,21 +5303,43 @@
   }
 }
 
-class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
-public:
-  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
-    *card_ptr = CardTableModRefBS::dirty_card_val();
-    return true;
+class G1RedirtyLoggedCardsTask : public AbstractGangTask {
+ private:
+  DirtyCardQueueSet* _queue;
+ public:
+  G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
+
+  virtual void work(uint worker_id) {
+    double start_time = os::elapsedTime();
+
+    RedirtyLoggedCardTableEntryClosure cl;
+    if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
+      _queue->par_apply_closure_to_all_completed_buffers(&cl);
+    } else {
+      _queue->apply_closure_to_all_completed_buffers(&cl);
+    }
+
+    G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
+    timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
+    timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
   }
 };
 
 void G1CollectedHeap::redirty_logged_cards() {
-  guarantee(G1DeferredRSUpdate, "Must only be called when using deferred RS updates.");
   double redirty_logged_cards_start = os::elapsedTime();
 
-  RedirtyLoggedCardTableEntryFastClosure redirty;
-  dirty_card_queue_set().set_closure(&redirty);
-  dirty_card_queue_set().apply_closure_to_all_completed_buffers();
+  uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+                   _g1h->workers()->active_workers() : 1);
+
+  G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
+  dirty_card_queue_set().reset_for_par_iteration();
+  if (use_parallel_gc_threads()) {
+    set_par_threads(n_workers);
+    workers()->run_task(&redirty_task);
+    set_par_threads(0);
+  } else {
+    redirty_task.work(0);
+  }
 
   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
   dcq.merge_bufferlists(&dirty_card_queue_set());
@@ -5370,12 +5378,22 @@
 public:
   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
-  void do_oop(      oop* p) {
+  void do_oop(oop* p) {
     oop obj = *p;
-
-    if (_g1->obj_in_cs(obj)) {
+    assert(obj != NULL, "the caller should have filtered out NULL values");
+
+    G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj);
+    if (cset_state == G1CollectedHeap::InNeither) {
+      return;
+    }
+    if (cset_state == G1CollectedHeap::InCSet) {
       assert( obj->is_forwarded(), "invariant" );
       *p = obj->forwardee();
+    } else {
+      assert(!obj->is_forwarded(), "invariant" );
+      assert(cset_state == G1CollectedHeap::IsHumongous,
+             err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state));
+      _g1->set_humongous_is_live(obj);
     }
   }
 };
@@ -5388,17 +5406,14 @@
 class G1CopyingKeepAliveClosure: public OopClosure {
   G1CollectedHeap*         _g1h;
   OopClosure*              _copy_non_heap_obj_cl;
-  OopsInHeapRegionClosure* _copy_metadata_obj_cl;
   G1ParScanThreadState*    _par_scan_state;
 
 public:
   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
                             OopClosure* non_heap_obj_cl,
-                            OopsInHeapRegionClosure* metadata_obj_cl,
                             G1ParScanThreadState* pss):
     _g1h(g1h),
     _copy_non_heap_obj_cl(non_heap_obj_cl),
-    _copy_metadata_obj_cl(metadata_obj_cl),
     _par_scan_state(pss)
   {}
 
@@ -5408,7 +5423,7 @@
   template <class T> void do_oop_work(T* p) {
     oop obj = oopDesc::load_decode_heap_oop(p);
 
-    if (_g1h->obj_in_cs(obj)) {
+    if (_g1h->is_in_cset_or_humongous(obj)) {
       // If the referent object has been forwarded (either copied
       // to a new location or to itself in the event of an
       // evacuation failure) then we need to update the reference
@@ -5431,12 +5446,12 @@
         _par_scan_state->push_on_queue(p);
       } else {
         assert(!Metaspace::contains((const void*)p),
-               err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
+               err_msg("Unexpectedly found a pointer from metadata: "
                               PTR_FORMAT, p));
-          _copy_non_heap_obj_cl->do_oop(p);
-        }
+        _copy_non_heap_obj_cl->do_oop(p);
       }
     }
+  }
 };
 
 // Serial drain queue closure. Called as the 'complete_gc'
@@ -5526,22 +5541,18 @@
     pss.set_evac_failure_closure(&evac_failure_cl);
 
     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
-    G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
 
     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
-    G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
 
     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
-    OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
 
     if (_g1h->g1_policy()->during_initial_mark_pause()) {
       // We also need to mark copied objects.
       copy_non_heap_cl = &copy_mark_non_heap_cl;
-      copy_metadata_cl = &copy_mark_metadata_cl;
     }
 
     // Keep alive closure.
-    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
+    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
 
     // Complete GC closure
     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
@@ -5632,22 +5643,17 @@
 
     pss.set_evac_failure_closure(&evac_failure_cl);
 
-    assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
-
+    assert(pss.queue_is_empty(), "both queue and overflow should be empty");
 
     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
-    G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
 
     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
-    G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
 
     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
-    OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
 
     if (_g1h->g1_policy()->during_initial_mark_pause()) {
       // We also need to mark copied objects.
       copy_non_heap_cl = &copy_mark_non_heap_cl;
-      copy_metadata_cl = &copy_mark_metadata_cl;
     }
 
     // Is alive closure
@@ -5655,7 +5661,7 @@
 
     // Copying keep alive closure. Applied to referent objects that need
     // to be copied.
-    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
+    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
 
     ReferenceProcessor* rp = _g1h->ref_processor_cm();
 
@@ -5691,7 +5697,7 @@
     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
     drain_queue.do_void();
     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
-    assert(pss.refs()->is_empty(), "should be");
+    assert(pss.queue_is_empty(), "should be");
   }
 };
 
@@ -5758,25 +5764,21 @@
 
   pss.set_evac_failure_closure(&evac_failure_cl);
 
-  assert(pss.refs()->is_empty(), "pre-condition");
+  assert(pss.queue_is_empty(), "pre-condition");
 
   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
-  G1ParScanMetadataClosure       only_copy_metadata_cl(this, &pss, NULL);
 
   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
-  G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
 
   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
-  OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
 
   if (_g1h->g1_policy()->during_initial_mark_pause()) {
     // We also need to mark copied objects.
     copy_non_heap_cl = &copy_mark_non_heap_cl;
-    copy_metadata_cl = &copy_mark_metadata_cl;
   }
 
   // Keep alive closure.
-  G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
+  G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);
 
   // Serial Complete GC closure
   G1STWDrainQueueClosure drain_queue(this, &pss);
@@ -5791,7 +5793,8 @@
                                               &keep_alive,
                                               &drain_queue,
                                               NULL,
-                                              _gc_timer_stw);
+                                              _gc_timer_stw,
+                                              _gc_tracer_stw->gc_id());
   } else {
     // Parallel reference processing
     assert(rp->num_q() == no_of_gc_workers, "sanity");
@@ -5802,15 +5805,14 @@
                                               &keep_alive,
                                               &drain_queue,
                                               &par_task_executor,
-                                              _gc_timer_stw);
+                                              _gc_timer_stw,
+                                              _gc_tracer_stw->gc_id());
   }
 
   _gc_tracer_stw->report_gc_reference_stats(stats);
-  // We have completed copying any necessary live referent objects
-  // (that were not copied during the actual pause) so we can
-  // retire any active alloc buffers
-  pss.retire_alloc_buffers();
-  assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
+
+  // We have completed copying any necessary live referent objects.
+  assert(pss.queue_is_empty(), "both queue and overflow should be empty");
 
   double ref_proc_time = os::elapsedTime() - ref_proc_start;
   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
@@ -5895,6 +5897,10 @@
 
   {
     StrongRootsScope srs(this);
+    // InitialMark needs claim bits to keep track of the marked-through CLDs.
+    if (g1_policy()->during_initial_mark_pause()) {
+      ClassLoaderDataGraph::clear_claimed_marks();
+    }
 
     if (G1CollectedHeap::use_parallel_gc_threads()) {
       // The individual threads will set their evac-failure closures.
@@ -5943,7 +5949,7 @@
     }
   }
 
-  release_gc_alloc_regions(n_workers, evacuation_info);
+  _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
 
   // Reset and re-enable the hot card cache.
@@ -5952,12 +5958,6 @@
   hot_card_cache->reset_hot_cache();
   hot_card_cache->set_use_cache(true);
 
-  // Migrate the strong code roots attached to each region in
-  // the collection set. Ideally we would like to do this
-  // after we have finished the scanning/evacuation of the
-  // strong code roots for a particular heap region.
-  migrate_strong_code_roots();
-
   purge_code_root_memory();
 
   if (g1_policy()->during_initial_mark_pause()) {
@@ -5985,9 +5985,7 @@
   // RSets.
   enqueue_discovered_references(n_workers);
 
-  if (G1DeferredRSUpdate) {
-    redirty_logged_cards();
-  }
+  redirty_logged_cards();
   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 }
 
@@ -5995,10 +5993,16 @@
                                   FreeRegionList* free_list,
                                   bool par,
                                   bool locked) {
-  assert(!hr->isHumongous(), "this is only for non-humongous regions");
+  assert(!hr->is_free(), "the region should not be free");
   assert(!hr->is_empty(), "the region should not be empty");
+  assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
   assert(free_list != NULL, "pre-condition");
 
+  if (G1VerifyBitmaps) {
+    MemRegion mr(hr->bottom(), hr->end());
+    concurrent_mark()->clearRangePrevBitmap(mr);
+  }
+
   // Clear the card counts for this region.
   // Note: we only need to do this if the region is not young
   // (since we don't refine cards in young regions).
@@ -6019,14 +6023,14 @@
   // We need to read this before we make the region non-humongous,
   // otherwise the information will be gone.
   uint last_index = hr->last_hc_index();
-  hr->set_notHumongous();
+  hr->clear_humongous();
   free_region(hr, free_list, par);
 
-  uint i = hr->hrs_index() + 1;
+  uint i = hr->hrm_index() + 1;
   while (i < last_index) {
     HeapRegion* curr_hr = region_at(i);
     assert(curr_hr->continuesHumongous(), "invariant");
-    curr_hr->set_notHumongous();
+    curr_hr->clear_humongous();
     free_region(curr_hr, free_list, par);
     i += 1;
   }
@@ -6046,15 +6050,12 @@
   assert(list != NULL, "list can't be null");
   if (!list->is_empty()) {
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    _free_list.add_ordered(list);
+    _hrm.insert_list_into_free_list(list);
   }
 }
 
 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
-  assert(_summary_bytes_used >= bytes,
-         err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
-                  _summary_bytes_used, bytes));
-  _summary_bytes_used -= bytes;
+  _allocator->decrease_used(bytes);
 }
 
 class G1ParCleanupCTTask : public AbstractGangTask {
@@ -6133,7 +6134,87 @@
 void G1CollectedHeap::verify_dirty_young_regions() {
   verify_dirty_young_list(_young_list->first_region());
 }
-#endif
+
+bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
+                                               HeapWord* tams, HeapWord* end) {
+  guarantee(tams <= end,
+            err_msg("tams: "PTR_FORMAT" end: "PTR_FORMAT, tams, end));
+  HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
+  if (result < end) {
+    gclog_or_tty->cr();
+    gclog_or_tty->print_cr("## wrong marked address on %s bitmap: "PTR_FORMAT,
+                           bitmap_name, result);
+    gclog_or_tty->print_cr("## %s tams: "PTR_FORMAT" end: "PTR_FORMAT,
+                           bitmap_name, tams, end);
+    return false;
+  }
+  return true;
+}
+
+bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
+  CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
+  CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
+
+  HeapWord* bottom = hr->bottom();
+  HeapWord* ptams  = hr->prev_top_at_mark_start();
+  HeapWord* ntams  = hr->next_top_at_mark_start();
+  HeapWord* end    = hr->end();
+
+  bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
+
+  bool res_n = true;
+  // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
+  // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
+  // if we happen to be in that state.
+  if (mark_in_progress() || !_cmThread->in_progress()) {
+    res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
+  }
+  if (!res_p || !res_n) {
+    gclog_or_tty->print_cr("#### Bitmap verification failed for "HR_FORMAT,
+                           HR_FORMAT_PARAMS(hr));
+    gclog_or_tty->print_cr("#### Caller: %s", caller);
+    return false;
+  }
+  return true;
+}
+
+void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
+  if (!G1VerifyBitmaps) return;
+
+  guarantee(verify_bitmaps(caller, hr), "bitmap verification");
+}
+
+class G1VerifyBitmapClosure : public HeapRegionClosure {
+private:
+  const char* _caller;
+  G1CollectedHeap* _g1h;
+  bool _failures;
+
+public:
+  G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
+    _caller(caller), _g1h(g1h), _failures(false) { }
+
+  bool failures() { return _failures; }
+
+  virtual bool doHeapRegion(HeapRegion* hr) {
+    if (hr->continuesHumongous()) return false;
+
+    bool result = _g1h->verify_bitmaps(_caller, hr);
+    if (!result) {
+      _failures = true;
+    }
+    return false;
+  }
+};
+
+void G1CollectedHeap::check_bitmaps(const char* caller) {
+  if (!G1VerifyBitmaps) return;
+
+  G1VerifyBitmapClosure cl(caller, this);
+  heap_region_iterate(&cl);
+  guarantee(!cl.failures(), "bitmap verification");
+}
+#endif // PRODUCT
 
 void G1CollectedHeap::cleanUpCardTable() {
   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
@@ -6254,9 +6335,9 @@
       if (cur->is_young()) {
         cur->set_young_index_in_cset(-1);
       }
-      cur->set_not_young();
       cur->set_evacuation_failed(false);
       // The region is now considered to be old.
+      cur->set_old();
       _old_set.add(cur);
       evacuation_info.increment_collectionset_used_after(cur->used());
     }
@@ -6282,6 +6363,154 @@
   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
 }
 
+class G1FreeHumongousRegionClosure : public HeapRegionClosure {
+ private:
+  FreeRegionList* _free_region_list;
+  HeapRegionSet* _proxy_set;
+  HeapRegionSetCount _humongous_regions_removed;
+  size_t _freed_bytes;
+ public:
+
+  G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
+    _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) {
+  }
+
+  virtual bool doHeapRegion(HeapRegion* r) {
+    if (!r->startsHumongous()) {
+      return false;
+    }
+
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+    oop obj = (oop)r->bottom();
+    CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
+
+    // The following checks whether the humongous object is live are sufficient.
+    // The main additional check (in addition to having a reference from the roots
+    // or the young gen) is whether the humongous object has a remembered set entry.
+    //
+    // A humongous object cannot be live if there is no remembered set for it
+    // because:
+    // - there can be no references from within humongous starts regions referencing
+    // the object because we never allocate other objects into them.
+    // (I.e. there are no intra-region references that may be missed by the
+    // remembered set)
+    // - as soon there is a remembered set entry to the humongous starts region
+    // (i.e. it has "escaped" to an old object) this remembered set entry will stay
+    // until the end of a concurrent mark.
+    //
+    // It is not required to check whether the object has been found dead by marking
+    // or not, in fact it would prevent reclamation within a concurrent cycle, as
+    // all objects allocated during that time are considered live.
+    // SATB marking is even more conservative than the remembered set.
+    // So if at this point in the collection there is no remembered set entry,
+    // nobody has a reference to it.
+    // At the start of collection we flush all refinement logs, and remembered sets
+    // are completely up-to-date wrt to references to the humongous object.
+    //
+    // Other implementation considerations:
+    // - never consider object arrays: while they are a valid target, they have not
+    // been observed to be used as temporary objects.
+    // - they would also pose considerable effort for cleaning up the the remembered
+    // sets.
+    // While this cleanup is not strictly necessary to be done (or done instantly),
+    // given that their occurrence is very low, this saves us this additional
+    // complexity.
+    uint region_idx = r->hrm_index();
+    if (g1h->humongous_is_live(region_idx) ||
+        g1h->humongous_region_is_always_live(region_idx)) {
+
+      if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
+        gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
+                               r->isHumongous(),
+                               region_idx,
+                               r->rem_set()->occupied(),
+                               r->rem_set()->strong_code_roots_list_length(),
+                               next_bitmap->isMarked(r->bottom()),
+                               g1h->humongous_is_live(region_idx),
+                               obj->is_objArray()
+                              );
+      }
+
+      return false;
+    }
+
+    guarantee(!obj->is_objArray(),
+              err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
+                      r->bottom()));
+
+    if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
+      gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
+                             r->isHumongous(),
+                             r->bottom(),
+                             region_idx,
+                             r->region_num(),
+                             r->rem_set()->occupied(),
+                             r->rem_set()->strong_code_roots_list_length(),
+                             next_bitmap->isMarked(r->bottom()),
+                             g1h->humongous_is_live(region_idx),
+                             obj->is_objArray()
+                            );
+    }
+    // Need to clear mark bit of the humongous object if already set.
+    if (next_bitmap->isMarked(r->bottom())) {
+      next_bitmap->clear(r->bottom());
+    }
+    _freed_bytes += r->used();
+    r->set_containing_set(NULL);
+    _humongous_regions_removed.increment(1u, r->capacity());
+    g1h->free_humongous_region(r, _free_region_list, false);
+
+    return false;
+  }
+
+  HeapRegionSetCount& humongous_free_count() {
+    return _humongous_regions_removed;
+  }
+
+  size_t bytes_freed() const {
+    return _freed_bytes;
+  }
+
+  size_t humongous_reclaimed() const {
+    return _humongous_regions_removed.length();
+  }
+};
+
+void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
+  assert_at_safepoint(true);
+
+  if (!G1ReclaimDeadHumongousObjectsAtYoungGC || !_has_humongous_reclaim_candidates) {
+    g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
+    return;
+  }
+
+  double start_time = os::elapsedTime();
+
+  FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
+
+  G1FreeHumongousRegionClosure cl(&local_cleanup_list);
+  heap_region_iterate(&cl);
+
+  HeapRegionSetCount empty_set;
+  remove_from_old_sets(empty_set, cl.humongous_free_count());
+
+  G1HRPrinter* hr_printer = _g1h->hr_printer();
+  if (hr_printer->is_active()) {
+    FreeRegionListIterator iter(&local_cleanup_list);
+    while (iter.more_available()) {
+      HeapRegion* hr = iter.get_next();
+      hr_printer->cleanup(hr);
+    }
+  }
+
+  prepend_to_freelist(&local_cleanup_list);
+  decrement_summary_bytes(cl.bytes_freed());
+
+  g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
+                                                                    cl.humongous_reclaimed());
+}
+
 // This routine is similar to the above but does not record
 // any policy statistics or update free lists; we are abandoning
 // the current incremental collection set in preparation of a
@@ -6395,16 +6624,15 @@
   TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
 
   bool doHeapRegion(HeapRegion* r) {
-    if (r->is_empty()) {
-      // We ignore empty regions, we'll empty the free list afterwards
-    } else if (r->is_young()) {
-      // We ignore young regions, we'll empty the young list afterwards
-    } else if (r->isHumongous()) {
+    if (r->is_old()) {
+      _old_set->remove(r);
+    } else {
+      // We ignore free regions, we'll empty the free list afterwards.
+      // We ignore young regions, we'll empty the young list afterwards.
       // We ignore humongous regions, we're not tearing down the
-      // humongous region set
-    } else {
-      // The rest should be old
-      _old_set->remove(r);
+      // humongous regions set.
+      assert(r->is_free() || r->is_young() || r->isHumongous(),
+             "it cannot be another type");
     }
     return false;
   }
@@ -6426,22 +6654,22 @@
     // this is that during a full GC string deduplication needs to know if
     // a collected region was young or old when the full GC was initiated.
   }
-  _free_list.remove_all();
+  _hrm.remove_all_free_regions();
 }
 
 class RebuildRegionSetsClosure : public HeapRegionClosure {
 private:
   bool            _free_list_only;
   HeapRegionSet*   _old_set;
-  FreeRegionList* _free_list;
+  HeapRegionManager*   _hrm;
   size_t          _total_used;
 
 public:
   RebuildRegionSetsClosure(bool free_list_only,
-                           HeapRegionSet* old_set, FreeRegionList* free_list) :
+                           HeapRegionSet* old_set, HeapRegionManager* hrm) :
     _free_list_only(free_list_only),
-    _old_set(old_set), _free_list(free_list), _total_used(0) {
-    assert(_free_list->is_empty(), "pre-condition");
+    _old_set(old_set), _hrm(hrm), _total_used(0) {
+    assert(_hrm->num_free_regions() == 0, "pre-condition");
     if (!free_list_only) {
       assert(_old_set->is_empty(), "pre-condition");
     }
@@ -6454,14 +6682,20 @@
 
     if (r->is_empty()) {
       // Add free regions to the free list
-      _free_list->add_as_tail(r);
+      r->set_free();
+      r->set_allocation_context(AllocationContext::system());
+      _hrm->insert_into_free_list(r);
     } else if (!_free_list_only) {
       assert(!r->is_young(), "we should not come across young regions");
 
       if (r->isHumongous()) {
         // We ignore humongous regions, we left the humongous set unchanged
       } else {
-        // The rest should be old, add them to the old set
+        // Objects that were compacted would have ended up on regions
+        // that were previously old or free.
+        assert(r->is_free() || r->is_old(), "invariant");
+        // We now consider them old, so register as such.
+        r->set_old();
         _old_set->add(r);
       }
       _total_used += r->used();
@@ -6482,16 +6716,16 @@
     _young_list->empty_list();
   }
 
-  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
+  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
   heap_region_iterate(&cl);
 
   if (!free_list_only) {
-    _summary_bytes_used = cl.total_used();
-  }
-  assert(_summary_bytes_used == recalculate_used(),
-         err_msg("inconsistent _summary_bytes_used, "
+    _allocator->set_used(cl.total_used());
+  }
+  assert(_allocator->used_unlocked() == recalculate_used(),
+         err_msg("inconsistent _allocator->used_unlocked(), "
                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
-                 _summary_bytes_used, recalculate_used()));
+                 _allocator->used_unlocked(), recalculate_used()));
 }
 
 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
@@ -6500,11 +6734,7 @@
 
 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
   HeapRegion* hr = heap_region_containing(p);
-  if (hr == NULL) {
-    return false;
-  } else {
-    return hr->is_in(p);
-  }
+  return hr->is_in(p);
 }
 
 // Methods for the mutator alloc region
@@ -6522,6 +6752,7 @@
     if (new_alloc_region != NULL) {
       set_region_short_lived_locked(new_alloc_region);
       _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
+      check_bitmaps("Mutator Region Allocation", new_alloc_region);
       return new_alloc_region;
     }
   }
@@ -6531,10 +6762,10 @@
 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
                                                   size_t allocated_bytes) {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
-  assert(alloc_region->is_young(), "all mutator alloc regions should be young");
+  assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
 
   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
-  _summary_bytes_used += allocated_bytes;
+  _allocator->increase_used(allocated_bytes);
   _hr_printer.retire(alloc_region);
   // We update the eden sizes here, when the region is retired,
   // instead of when it's allocated, since this is the point that its
@@ -6542,11 +6773,6 @@
   g1mm()->update_eden_size();
 }
 
-HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
-                                                    bool force) {
-  return _g1h->new_mutator_alloc_region(word_size, force);
-}
-
 void G1CollectedHeap::set_par_threads() {
   // Don't change the number of workers.  Use the value previously set
   // in the workgroup.
@@ -6563,11 +6789,6 @@
   set_par_threads(n_workers);
 }
 
-void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
-                                       size_t allocated_bytes) {
-  _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
-}
-
 // Methods for the GC alloc regions
 
 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
@@ -6584,12 +6805,15 @@
       // We really only need to do this for old regions given that we
       // should never scan survivors. But it doesn't hurt to do it
       // for survivors too.
-      new_alloc_region->set_saved_mark();
+      new_alloc_region->record_top_and_timestamp();
       if (survivor) {
         new_alloc_region->set_survivor();
         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
+        check_bitmaps("Survivor Region Allocation", new_alloc_region);
       } else {
+        new_alloc_region->set_old();
         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
+        check_bitmaps("Old Region Allocation", new_alloc_region);
       }
       bool during_im = g1_policy()->during_initial_mark_pause();
       new_alloc_region->note_start_of_copying(during_im);
@@ -6615,36 +6839,13 @@
   _hr_printer.retire(alloc_region);
 }
 
-HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
-                                                       bool force) {
-  assert(!force, "not supported for GC alloc regions");
-  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
-}
-
-void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
-                                          size_t allocated_bytes) {
-  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
-                               GCAllocForSurvived);
-}
-
-HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
-                                                  bool force) {
-  assert(!force, "not supported for GC alloc regions");
-  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
-}
-
-void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
-                                     size_t allocated_bytes) {
-  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
-                               GCAllocForTenured);
-}
 // Heap region set verification
 
 class VerifyRegionListsClosure : public HeapRegionClosure {
 private:
   HeapRegionSet*   _old_set;
   HeapRegionSet*   _humongous_set;
-  FreeRegionList*  _free_list;
+  HeapRegionManager*   _hrm;
 
 public:
   HeapRegionSetCount _old_count;
@@ -6653,8 +6854,8 @@
 
   VerifyRegionListsClosure(HeapRegionSet* old_set,
                            HeapRegionSet* humongous_set,
-                           FreeRegionList* free_list) :
-    _old_set(old_set), _humongous_set(humongous_set), _free_list(free_list),
+                           HeapRegionManager* hrm) :
+    _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
     _old_count(), _humongous_count(), _free_count(){ }
 
   bool doHeapRegion(HeapRegion* hr) {
@@ -6665,19 +6866,21 @@
     if (hr->is_young()) {
       // TODO
     } else if (hr->startsHumongous()) {
-      assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->region_num()));
+      assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
       _humongous_count.increment(1u, hr->capacity());
     } else if (hr->is_empty()) {
-      assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->region_num()));
+      assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
       _free_count.increment(1u, hr->capacity());
+    } else if (hr->is_old()) {
+      assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
+      _old_count.increment(1u, hr->capacity());
     } else {
-      assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->region_num()));
-      _old_count.increment(1u, hr->capacity());
+      ShouldNotReachHere();
     }
     return false;
   }
 
-  void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, FreeRegionList* free_list) {
+  void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
     guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
     guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
         old_set->total_capacity_bytes(), _old_count.capacity()));
@@ -6686,26 +6889,17 @@
     guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
         humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
 
-    guarantee(free_list->length() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->length(), _free_count.length()));
+    guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
     guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
         free_list->total_capacity_bytes(), _free_count.capacity()));
   }
 };
 
-HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
-                                             HeapWord* bottom) {
-  HeapWord* end = bottom + HeapRegion::GrainWords;
-  MemRegion mr(bottom, end);
-  assert(_g1_reserved.contains(mr), "invariant");
-  // This might return NULL if the allocation fails
-  return new HeapRegion(hrs_index, _bot_shared, mr);
-}
-
 void G1CollectedHeap::verify_region_sets() {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 
   // First, check the explicit lists.
-  _free_list.verify_list();
+  _hrm.verify();
   {
     // Given that a concurrent operation might be adding regions to
     // the secondary free list we have to take the lock before
@@ -6736,9 +6930,9 @@
   // Finally, make sure that the region accounting in the lists is
   // consistent with what we see in the heap.
 
-  VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
+  VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
   heap_region_iterate(&cl);
-  cl.verify_counts(&_old_set, &_humongous_set, &_free_list);
+  cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
 }
 
 // Optimized nmethod scanning
@@ -6757,13 +6951,8 @@
                      " starting at "HR_FORMAT,
                      _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
 
-      // HeapRegion::add_strong_code_root() avoids adding duplicate
-      // entries but having duplicates is  OK since we "mark" nmethods
-      // as visited when we scan the strong code root lists during the GC.
-      hr->add_strong_code_root(_nm);
-      assert(hr->rem_set()->strong_code_roots_list_contains(_nm),
-             err_msg("failed to add code root "PTR_FORMAT" to remembered set of region "HR_FORMAT,
-                     _nm, HR_FORMAT_PARAMS(hr)));
+      // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
+      hr->add_strong_code_root_locked(_nm);
     }
   }
 
@@ -6790,9 +6979,6 @@
                      _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
 
       hr->remove_strong_code_root(_nm);
-      assert(!hr->rem_set()->strong_code_roots_list_contains(_nm),
-             err_msg("failed to remove code root "PTR_FORMAT" of region "HR_FORMAT,
-                     _nm, HR_FORMAT_PARAMS(hr)));
     }
   }
 
@@ -6820,132 +7006,13 @@
   nm->oops_do(&reg_cl, true);
 }
 
-class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
-public:
-  bool doHeapRegion(HeapRegion *hr) {
-    assert(!hr->isHumongous(),
-           err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
-                   HR_FORMAT_PARAMS(hr)));
-    hr->migrate_strong_code_roots();
-    return false;
-  }
-};
-
-void G1CollectedHeap::migrate_strong_code_roots() {
-  MigrateCodeRootsHeapRegionClosure cl;
-  double migrate_start = os::elapsedTime();
-  collection_set_iterate(&cl);
-  double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
-  g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
-}
-
 void G1CollectedHeap::purge_code_root_memory() {
   double purge_start = os::elapsedTime();
-  G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
+  G1CodeRootSet::purge();
   double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
   g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
 }
 
-// Mark all the code roots that point into regions *not* in the
-// collection set.
-//
-// Note we do not want to use a "marking" CodeBlobToOopClosure while
-// walking the the code roots lists of regions not in the collection
-// set. Suppose we have an nmethod (M) that points to objects in two
-// separate regions - one in the collection set (R1) and one not (R2).
-// Using a "marking" CodeBlobToOopClosure here would result in "marking"
-// nmethod M when walking the code roots for R1. When we come to scan
-// the code roots for R2, we would see that M is already marked and it
-// would be skipped and the objects in R2 that are referenced from M
-// would not be evacuated.
-
-class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
-
-  class MarkStrongCodeRootOopClosure: public OopClosure {
-    ConcurrentMark* _cm;
-    HeapRegion* _hr;
-    uint _worker_id;
-
-    template <class T> void do_oop_work(T* p) {
-      T heap_oop = oopDesc::load_heap_oop(p);
-      if (!oopDesc::is_null(heap_oop)) {
-        oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-        // Only mark objects in the region (which is assumed
-        // to be not in the collection set).
-        if (_hr->is_in(obj)) {
-          _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
-        }
-      }
-    }
-
-  public:
-    MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
-      _cm(cm), _hr(hr), _worker_id(worker_id) {
-      assert(!_hr->in_collection_set(), "sanity");
-    }
-
-    void do_oop(narrowOop* p) { do_oop_work(p); }
-    void do_oop(oop* p)       { do_oop_work(p); }
-  };
-
-  MarkStrongCodeRootOopClosure _oop_cl;
-
-public:
-  MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
-    _oop_cl(cm, hr, worker_id) {}
-
-  void do_code_blob(CodeBlob* cb) {
-    nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
-    if (nm != NULL) {
-      nm->oops_do(&_oop_cl);
-    }
-  }
-};
-
-class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
-  G1CollectedHeap* _g1h;
-  uint _worker_id;
-
-public:
-  MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
-    _g1h(g1h), _worker_id(worker_id) {}
-
-  bool doHeapRegion(HeapRegion *hr) {
-    HeapRegionRemSet* hrrs = hr->rem_set();
-    if (hr->continuesHumongous()) {
-      // Code roots should never be attached to a continuation of a humongous region
-      assert(hrrs->strong_code_roots_list_length() == 0,
-             err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT
-                     " starting at "HR_FORMAT", but has "SIZE_FORMAT,
-                     HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()),
-                     hrrs->strong_code_roots_list_length()));
-      return false;
-    }
-
-    if (hr->in_collection_set()) {
-      // Don't mark code roots into regions in the collection set here.
-      // They will be marked when we scan them.
-      return false;
-    }
-
-    MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
-    hr->strong_code_roots_do(&cb_cl);
-    return false;
-  }
-};
-
-void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
-  MarkStrongCodeRootsHRClosure cl(this, worker_id);
-  if (G1CollectedHeap::use_parallel_gc_threads()) {
-    heap_region_par_iterate_chunked(&cl,
-                                    worker_id,
-                                    workers()->active_workers(),
-                                    HeapRegion::ParMarkRootClaimValue);
-  } else {
-    heap_region_iterate(&cl);
-  }
-}
-
 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
   G1CollectedHeap* _g1h;
 
@@ -6959,7 +7026,7 @@
       return;
     }
 
-    if (ScavengeRootsInCode && nm->detect_scavenge_root_oops()) {
+    if (ScavengeRootsInCode) {
       _g1h->register_nmethod(nm);
     }
   }
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,15 +25,17 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
 
+#include "gc_implementation/g1/g1AllocationContext.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/evacuationInfo.hpp"
 #include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/g1/g1BiasedArray.hpp"
 #include "gc_implementation/g1/g1HRPrinter.hpp"
 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
-#include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
-#include "gc_implementation/g1/heapRegionSeq.hpp"
+#include "gc_implementation/g1/heapRegionManager.hpp"
 #include "gc_implementation/g1/heapRegionSet.hpp"
 #include "gc_implementation/shared/hSpaceCounters.hpp"
 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
@@ -80,12 +82,6 @@
 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
 
-enum GCAllocPurpose {
-  GCAllocForTenured,
-  GCAllocForSurvived,
-  GCAllocPurposeCount
-};
-
 class YoungList : public CHeapObj<mtGC> {
 private:
   G1CollectedHeap* _g1h;
@@ -158,33 +154,6 @@
   void          print();
 };
 
-class MutatorAllocRegion : public G1AllocRegion {
-protected:
-  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
-  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
-public:
-  MutatorAllocRegion()
-    : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
-};
-
-class SurvivorGCAllocRegion : public G1AllocRegion {
-protected:
-  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
-  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
-public:
-  SurvivorGCAllocRegion()
-  : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
-};
-
-class OldGCAllocRegion : public G1AllocRegion {
-protected:
-  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
-  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
-public:
-  OldGCAllocRegion()
-  : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
-};
-
 // The G1 STW is alive closure.
 // An instance is embedded into the G1CH and used as the
 // (optional) _is_alive_non_header closure in the STW
@@ -199,7 +168,15 @@
 
 class RefineCardTableEntryClosure;
 
+class G1RegionMappingChangedListener : public G1MappingChangedListener {
+ private:
+  void reset_from_card_cache(uint start_idx, size_t num_regions);
+ public:
+  virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
+};
+
 class G1CollectedHeap : public SharedHeap {
+  friend class VM_CollectForMetadataAllocation;
   friend class VM_G1CollectForAllocation;
   friend class VM_G1CollectFull;
   friend class VM_G1IncCollectionPause;
@@ -207,9 +184,12 @@
   friend class MutatorAllocRegion;
   friend class SurvivorGCAllocRegion;
   friend class OldGCAllocRegion;
+  friend class G1Allocator;
+  friend class G1DefaultAllocator;
+  friend class G1ResManAllocator;
 
   // Closures used in implementation.
-  template <G1Barrier barrier, bool do_mark_object>
+  template <G1Barrier barrier, G1Mark do_mark_object>
   friend class G1ParCopyClosure;
   friend class G1IsAliveClosure;
   friend class G1EvacuateFollowersClosure;
@@ -217,6 +197,8 @@
   friend class G1ParScanClosureSuper;
   friend class G1ParEvacuateFollowersClosure;
   friend class G1ParTask;
+  friend class G1ParGCAllocator;
+  friend class G1DefaultParGCAllocator;
   friend class G1FreeGarbageRegionClosure;
   friend class RefineCardTableEntryClosure;
   friend class G1PrepareCompactClosure;
@@ -226,6 +208,7 @@
   friend class EvacPopObjClosure;
   friend class G1ParCleanupCTTask;
 
+  friend class G1FreeHumongousRegionClosure;
   // Other related classes.
   friend class G1MarkSweep;
 
@@ -235,19 +218,9 @@
 
   static size_t _humongous_object_threshold_in_words;
 
-  // Storage for the G1 heap.
-  VirtualSpace _g1_storage;
-  MemRegion    _g1_reserved;
-
-  // The part of _g1_storage that is currently committed.
-  MemRegion _g1_committed;
-
-  // The master free list. It will satisfy all new region allocations.
-  FreeRegionList _free_list;
-
   // The secondary free list which contains regions that have been
-  // freed up during the cleanup process. This will be appended to the
-  // master free list when appropriate.
+  // freed up during the cleanup process. This will be appended to
+  // the master free list when appropriate.
   FreeRegionList _secondary_free_list;
 
   // It keeps track of the old regions.
@@ -256,6 +229,9 @@
   // It keeps track of the humongous regions.
   HeapRegionSet _humongous_set;
 
+  void clear_humongous_is_live_table();
+  void eagerly_reclaim_humongous_regions();
+
   // The number of regions we could create by expansion.
   uint _expansion_regions;
 
@@ -278,47 +254,24 @@
   // after heap shrinking (free_list_only == true).
   void rebuild_region_sets(bool free_list_only);
 
+  // Callback for region mapping changed events.
+  G1RegionMappingChangedListener _listener;
+
   // The sequence of all heap regions in the heap.
-  HeapRegionSeq _hrs;
+  HeapRegionManager _hrm;
 
-  // Alloc region used to satisfy mutator allocation requests.
-  MutatorAllocRegion _mutator_alloc_region;
+  // Class that handles the different kinds of allocations.
+  G1Allocator* _allocator;
 
-  // Alloc region used to satisfy allocation requests by the GC for
-  // survivor objects.
-  SurvivorGCAllocRegion _survivor_gc_alloc_region;
+  // Statistics for each allocation context
+  AllocationContextStats _allocation_context_stats;
 
   // PLAB sizing policy for survivors.
   PLABStats _survivor_plab_stats;
 
-  // Alloc region used to satisfy allocation requests by the GC for
-  // old objects.
-  OldGCAllocRegion _old_gc_alloc_region;
-
   // PLAB sizing policy for tenured objects.
   PLABStats _old_plab_stats;
 
-  PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
-    PLABStats* stats = NULL;
-
-    switch (purpose) {
-    case GCAllocForSurvived:
-      stats = &_survivor_plab_stats;
-      break;
-    case GCAllocForTenured:
-      stats = &_old_plab_stats;
-      break;
-    default:
-      assert(false, "unrecognized GCAllocPurpose");
-    }
-
-    return stats;
-  }
-
-  // The last old region we allocated to during the last GC.
-  // Typically, it is not full so we should re-use it during the next GC.
-  HeapRegion* _retained_old_gc_alloc_region;
-
   // It specifies whether we should attempt to expand the heap after a
   // region allocation failure. If heap expansion fails we set this to
   // false so that we don't re-attempt the heap expansion (it's likely
@@ -346,33 +299,25 @@
   // Helper for monitoring and management support.
   G1MonitoringSupport* _g1mm;
 
-  // Determines PLAB size for a particular allocation purpose.
-  size_t desired_plab_sz(GCAllocPurpose purpose);
-
-  // Outside of GC pauses, the number of bytes used in all regions other
-  // than the current allocation region.
-  size_t _summary_bytes_used;
+  // Records whether the region at the given index is kept live by roots or
+  // references from the young generation.
+  class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
+   protected:
+    bool default_value() const { return false; }
+   public:
+    void clear() { G1BiasedMappedArray<bool>::clear(); }
+    void set_live(uint region) {
+      set_by_index(region, true);
+    }
+    bool is_live(uint region) {
+      return get_by_index(region);
+    }
+  };
 
-  // This is used for a quick test on whether a reference points into
-  // the collection set or not. Basically, we have an array, with one
-  // byte per region, and that byte denotes whether the corresponding
-  // region is in the collection set or not. The entry corresponding
-  // the bottom of the heap, i.e., region 0, is pointed to by
-  // _in_cset_fast_test_base.  The _in_cset_fast_test field has been
-  // biased so that it actually points to address 0 of the address
-  // space, to make the test as fast as possible (we can simply shift
-  // the address to address into it, instead of having to subtract the
-  // bottom of the heap from the address before shifting it; basically
-  // it works in the same way the card table works).
-  bool* _in_cset_fast_test;
-
-  // The allocated array used for the fast test on whether a reference
-  // points into the collection set or not. This field is also used to
-  // free the array.
-  bool* _in_cset_fast_test_base;
-
-  // The length of the _in_cset_fast_test_base array.
-  uint _in_cset_fast_test_length;
+  HumongousIsLiveBiasedMappedArray _humongous_is_live;
+  // Stores whether during humongous object registration we found candidate regions.
+  // If not, we can skip a few steps.
+  bool _has_humongous_reclaim_candidates;
 
   volatile unsigned _gc_time_stamp;
 
@@ -415,7 +360,7 @@
 
   // If the HR printer is active, dump the state of the regions in the
   // heap after a compaction.
-  void print_hrs_post_compaction();
+  void print_hrm_post_compaction();
 
   double verify(bool guard, const char* msg);
   void verify_before_gc();
@@ -506,24 +451,17 @@
   // humongous object, set is_old to true. If not, to false.
   HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
 
-  // Attempt to satisfy a humongous allocation request of the given
-  // size by finding a contiguous set of free regions of num_regions
-  // length and remove them from the master free list. Return the
-  // index of the first region or G1_NULL_HRS_INDEX if the search
-  // was unsuccessful.
-  uint humongous_obj_allocate_find_first(uint num_regions,
-                                         size_t word_size);
-
   // Initialize a contiguous set of free regions of length num_regions
   // and starting at index first so that they appear as a single
   // humongous region.
   HeapWord* humongous_obj_allocate_initialize_regions(uint first,
                                                       uint num_regions,
-                                                      size_t word_size);
+                                                      size_t word_size,
+                                                      AllocationContext_t context);
 
   // Attempt to allocate a humongous object of the given size. Return
   // NULL if unsuccessful.
-  HeapWord* humongous_obj_allocate(size_t word_size);
+  HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context);
 
   // The following two methods, allocate_new_tlab() and
   // mem_allocate(), are the two main entry points from the runtime
@@ -579,6 +517,7 @@
   // retry the allocation attempt, potentially scheduling a GC
   // pause. This should only be used for non-humongous allocations.
   HeapWord* attempt_allocation_slow(size_t word_size,
+                                    AllocationContext_t context,
                                     unsigned int* gc_count_before_ret,
                                     int* gclocker_retry_count_ret);
 
@@ -593,7 +532,8 @@
   // specifies whether the mutator alloc region is expected to be NULL
   // or not.
   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
-                                       bool expect_null_mutator_alloc_region);
+                                            AllocationContext_t context,
+                                            bool expect_null_mutator_alloc_region);
 
   // It dirties the cards that cover the block so that so that the post
   // write barrier never queues anything when updating objects on this
@@ -605,7 +545,9 @@
   // allocation region, either by picking one or expanding the
   // heap, and then allocate a block of the given size. The block
   // may not be a humongous - it must fit into a single heap region.
-  HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
+  HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
+                                   size_t word_size,
+                                   AllocationContext_t context);
 
   HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
                                     HeapRegion*    alloc_region,
@@ -617,10 +559,12 @@
   void par_allocate_remaining_space(HeapRegion* r);
 
   // Allocation attempt during GC for a survivor object / PLAB.
-  inline HeapWord* survivor_attempt_allocation(size_t word_size);
+  inline HeapWord* survivor_attempt_allocation(size_t word_size,
+                                               AllocationContext_t context);
 
   // Allocation attempt during GC for an old object / PLAB.
-  inline HeapWord* old_attempt_allocation(size_t word_size);
+  inline HeapWord* old_attempt_allocation(size_t word_size,
+                                          AllocationContext_t context);
 
   // These methods are the "callbacks" from the G1AllocRegion class.
 
@@ -659,13 +603,15 @@
   // Callback from VM_G1CollectForAllocation operation.
   // This function does everything necessary/possible to satisfy a
   // failed allocation request (including collection, expansion, etc.)
-  HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
+  HeapWord* satisfy_failed_allocation(size_t word_size,
+                                      AllocationContext_t context,
+                                      bool* succeeded);
 
   // Attempting to expand the heap sufficiently
   // to support an allocation of the given "word_size".  If
   // successful, perform the allocation and return the address of the
   // allocated block, or else "NULL".
-  HeapWord* expand_and_allocate(size_t word_size);
+  HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
 
   // Process any reference objects discovered during
   // an incremental evacuation pause.
@@ -677,6 +623,10 @@
 
 public:
 
+  G1Allocator* allocator() {
+    return _allocator;
+  }
+
   G1MonitoringSupport* g1mm() {
     assert(_g1mm != NULL, "should have been initialized");
     return _g1mm;
@@ -688,24 +638,51 @@
   // (Rounds up to a HeapRegion boundary.)
   bool expand(size_t expand_bytes);
 
+  // Returns the PLAB statistics given a purpose.
+  PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
+    PLABStats* stats = NULL;
+
+    switch (purpose) {
+    case GCAllocForSurvived:
+      stats = &_survivor_plab_stats;
+      break;
+    case GCAllocForTenured:
+      stats = &_old_plab_stats;
+      break;
+    default:
+      assert(false, "unrecognized GCAllocPurpose");
+    }
+
+    return stats;
+  }
+
+  // Determines PLAB size for a particular allocation purpose.
+  size_t desired_plab_sz(GCAllocPurpose purpose);
+
+  inline AllocationContextStats& allocation_context_stats();
+
   // Do anything common to GC's.
   virtual void gc_prologue(bool full);
   virtual void gc_epilogue(bool full);
 
-  #ifdef GRAAL
-    HeapWord** top_addr() const;
-    HeapWord** end_addr() const;
-  #endif
+  inline void set_humongous_is_live(oop obj);
+
+  bool humongous_is_live(uint region) {
+    return _humongous_is_live.is_live(region);
+  }
 
+  // Returns whether the given region (which must be a humongous (start) region)
+  // is to be considered conservatively live regardless of any other conditions.
+  bool humongous_region_is_always_live(uint index);
+  // Register the given region to be part of the collection set.
+  inline void register_humongous_region_with_in_cset_fast_test(uint index);
+  // Register regions with humongous objects (actually on the start region) in
+  // the in_cset_fast_test table.
+  void register_humongous_regions_with_in_cset_fast_test();
   // We register a region with the fast "in collection set" test. We
   // simply set to true the array slot corresponding to this region.
   void register_region_with_in_cset_fast_test(HeapRegion* r) {
-    assert(_in_cset_fast_test_base != NULL, "sanity");
-    assert(r->in_collection_set(), "invariant");
-    uint index = r->hrs_index();
-    assert(index < _in_cset_fast_test_length, "invariant");
-    assert(!_in_cset_fast_test_base[index], "invariant");
-    _in_cset_fast_test_base[index] = true;
+    _in_cset_fast_test.set_in_cset(r->hrm_index());
   }
 
   // This is a fast test on whether a reference points into the
@@ -714,9 +691,7 @@
   inline bool in_cset_fast_test(oop obj);
 
   void clear_cset_fast_test() {
-    assert(_in_cset_fast_test_base != NULL, "sanity");
-    memset(_in_cset_fast_test_base, false,
-           (size_t) _in_cset_fast_test_length * sizeof(bool));
+    _in_cset_fast_test.clear();
   }
 
   // This is called at the start of either a concurrent cycle or a Full
@@ -845,22 +820,13 @@
   // param is for use with parallel roots processing, and should be
   // the "i" of the calling parallel worker thread's work(i) function.
   // In the sequential case this param will be ignored.
-  void g1_process_strong_roots(bool is_scavenging,
-                               ScanningOption so,
-                               OopClosure* scan_non_heap_roots,
-                               OopsInHeapRegionClosure* scan_rs,
-                               G1KlassScanClosure* scan_klasses,
-                               uint worker_i);
-
-  // Apply "blk" to all the weak roots of the system.  These include
-  // JNI weak roots, the code cache, system dictionary, symbol table,
-  // string table, and referents of reachable weak refs.
-  void g1_process_weak_roots(OopClosure* root_closure);
-
-  // Notifies all the necessary spaces that the committed space has
-  // been updated (either expanded or shrunk). It should be called
-  // after _g1_storage is updated.
-  void update_committed_space(HeapWord* old_end, HeapWord* new_end);
+  void g1_process_roots(OopClosure* scan_non_heap_roots,
+                        OopClosure* scan_non_heap_weak_roots,
+                        OopsInHeapRegionClosure* scan_rs,
+                        CLDClosure* scan_strong_clds,
+                        CLDClosure* scan_weak_clds,
+                        CodeBlobClosure* scan_strong_code,
+                        uint worker_i);
 
   // The concurrent marker (and the thread it runs in.)
   ConcurrentMark* _cm;
@@ -1048,7 +1014,7 @@
   // of G1CollectedHeap::_gc_time_stamp.
   unsigned int* _worker_cset_start_region_time_stamp;
 
-  enum G1H_process_strong_roots_tasks {
+  enum G1H_process_roots_tasks {
     G1H_PS_filter_satb_buffers,
     G1H_PS_refProcessor_oops_do,
     // Leave this one last.
@@ -1129,20 +1095,11 @@
     return _gc_time_stamp;
   }
 
-  void reset_gc_time_stamp() {
-    _gc_time_stamp = 0;
-    OrderAccess::fence();
-    // Clear the cached CSet starting regions and time stamps.
-    // Their validity is dependent on the GC timestamp.
-    clear_cset_start_regions();
-  }
+  inline void reset_gc_time_stamp();
 
   void check_gc_time_stamps() PRODUCT_RETURN;
 
-  void increment_gc_time_stamp() {
-    ++_gc_time_stamp;
-    OrderAccess::fence();
-  }
+  inline void increment_gc_time_stamp();
 
   // Reset the given region's GC timestamp. If it's starts humongous,
   // also reset the GC timestamp of its corresponding
@@ -1180,43 +1137,51 @@
   // end fields defining the extent of the contiguous allocation region.)
   // But G1CollectedHeap doesn't yet support this.
 
-  // Return an estimate of the maximum allocation that could be performed
-  // without triggering any collection or expansion activity.  In a
-  // generational collector, for example, this is probably the largest
-  // allocation that could be supported (without expansion) in the youngest
-  // generation.  It is "unsafe" because no locks are taken; the result
-  // should be treated as an approximation, not a guarantee, for use in
-  // heuristic resizing decisions.
-  virtual size_t unsafe_max_alloc();
-
   virtual bool is_maximal_no_gc() const {
-    return _g1_storage.uncommitted_size() == 0;
+    return _hrm.available() == 0;
   }
 
-  // The total number of regions in the heap.
-  uint n_regions() { return _hrs.length(); }
+  // The current number of regions in the heap.
+  uint num_regions() const { return _hrm.length(); }
 
   // The max number of regions in the heap.
-  uint max_regions() { return _hrs.max_length(); }
+  uint max_regions() const { return _hrm.max_length(); }
 
   // The number of regions that are completely free.
-  uint free_regions() { return _free_list.length(); }
+  uint num_free_regions() const { return _hrm.num_free_regions(); }
 
   // The number of regions that are not completely free.
-  uint used_regions() { return n_regions() - free_regions(); }
-
-  // The number of regions available for "regular" expansion.
-  uint expansion_regions() { return _expansion_regions; }
-
-  // Factory method for HeapRegion instances. It will return NULL if
-  // the allocation fails.
-  HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom);
+  uint num_used_regions() const { return num_regions() - num_free_regions(); }
 
   void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
   void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
   void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
   void verify_dirty_young_regions() PRODUCT_RETURN;
 
+#ifndef PRODUCT
+  // Make sure that the given bitmap has no marked objects in the
+  // range [from,limit). If it does, print an error message and return
+  // false. Otherwise, just return true. bitmap_name should be "prev"
+  // or "next".
+  bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
+                                HeapWord* from, HeapWord* limit);
+
+  // Verify that the prev / next bitmap range [tams,end) for the given
+  // region has no marks. Return true if all is well, false if errors
+  // are detected.
+  bool verify_bitmaps(const char* caller, HeapRegion* hr);
+#endif // PRODUCT
+
+  // If G1VerifyBitmaps is set, verify that the marking bitmaps for
+  // the given region do not have any spurious marks. If errors are
+  // detected, print appropriate error messages and crash.
+  void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN;
+
+  // If G1VerifyBitmaps is set, verify that the marking bitmaps do not
+  // have any spurious marks. If errors are detected, print
+  // appropriate error messages and crash.
+  void check_bitmaps(const char* caller) PRODUCT_RETURN;
+
   // verify_region_sets() performs verification over the region
   // lists. It will be compiled in the product code to be used when
   // necessary (i.e., during heap verification).
@@ -1235,7 +1200,7 @@
 
 #ifdef ASSERT
   bool is_on_master_free_list(HeapRegion* hr) {
-    return hr->containing_set() == &_free_list;
+    return _hrm.is_free(hr);
   }
 #endif // ASSERT
 
@@ -1247,7 +1212,7 @@
   }
 
   void append_secondary_free_list() {
-    _free_list.add_ordered(&_secondary_free_list);
+    _hrm.insert_list_into_free_list(&_secondary_free_list);
   }
 
   void append_secondary_free_list_if_not_empty_with_lock() {
@@ -1273,7 +1238,7 @@
   // Determine whether the given region is one that we are using as an
   // old GC alloc region.
   bool is_old_gc_alloc_region(HeapRegion* hr) {
-    return hr == _retained_old_gc_alloc_region;
+    return _allocator->is_retained_old_region(hr);
   }
 
   // Perform a collection of the heap; intended for use in implementing
@@ -1284,6 +1249,11 @@
   // The same as above but assume that the caller holds the Heap_lock.
   void collect_locked(GCCause::Cause cause);
 
+  virtual bool copy_allocation_context_stats(const jint* contexts,
+                                             jlong* totals,
+                                             jbyte* accuracy,
+                                             jint len);
+
   // True iff an evacuation has failed in the most-recent collection.
   bool evacuation_failed() { return _evacuation_failed; }
 
@@ -1293,33 +1263,84 @@
 
   // Returns "TRUE" iff "p" points into the committed areas of the heap.
   virtual bool is_in(const void* p) const;
+#ifdef ASSERT
+  // Returns whether p is in one of the available areas of the heap. Slow but
+  // extensive version.
+  bool is_in_exact(const void* p) const;
+#endif
 
   // Return "TRUE" iff the given object address is within the collection
-  // set.
+  // set. Slow implementation.
   inline bool obj_in_cs(oop obj);
 
+  inline bool is_in_cset(oop obj);
+
+  inline bool is_in_cset_or_humongous(const oop obj);
+
+  enum in_cset_state_t {
+   InNeither,           // neither in collection set nor humongous
+   InCSet,              // region is in collection set only
+   IsHumongous          // region is a humongous start region
+  };
+ private:
+  // Instances of this class are used for quick tests on whether a reference points
+  // into the collection set or is a humongous object (points into a humongous
+  // object).
+  // Each of the array's elements denotes whether the corresponding region is in
+  // the collection set or a humongous region.
+  // We use this to quickly reclaim humongous objects: by making a humongous region
+  // succeed this test, we sort-of add it to the collection set. During the reference
+  // iteration closures, when we see a humongous region, we simply mark it as
+  // referenced, i.e. live.
+  class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> {
+   protected:
+    char default_value() const { return G1CollectedHeap::InNeither; }
+   public:
+    void set_humongous(uintptr_t index) {
+      assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values");
+      set_by_index(index, G1CollectedHeap::IsHumongous);
+    }
+
+    void clear_humongous(uintptr_t index) {
+      set_by_index(index, G1CollectedHeap::InNeither);
+    }
+
+    void set_in_cset(uintptr_t index) {
+      assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value");
+      set_by_index(index, G1CollectedHeap::InCSet);
+    }
+
+    bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; }
+    bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; }
+    G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); }
+    void clear() { G1BiasedMappedArray<char>::clear(); }
+  };
+
+  // This array is used for a quick test on whether a reference points into
+  // the collection set or not. Each of the array's elements denotes whether the
+  // corresponding region is in the collection set or not.
+  G1FastCSetBiasedMappedArray _in_cset_fast_test;
+
+ public:
+
+  inline in_cset_state_t in_cset_state(const oop obj);
+
   // Return "TRUE" iff the given object address is in the reserved
   // region of g1.
   bool is_in_g1_reserved(const void* p) const {
-    return _g1_reserved.contains(p);
+    return _hrm.reserved().contains(p);
   }
 
   // Returns a MemRegion that corresponds to the space that has been
   // reserved for the heap
-  MemRegion g1_reserved() {
-    return _g1_reserved;
-  }
-
-  // Returns a MemRegion that corresponds to the space that has been
-  // committed in the heap
-  MemRegion g1_committed() {
-    return _g1_committed;
+  MemRegion g1_reserved() const {
+    return _hrm.reserved();
   }
 
   virtual bool is_in_closed_subset(const void* p) const;
 
-  G1SATBCardTableModRefBS* g1_barrier_set() {
-    return (G1SATBCardTableModRefBS*) barrier_set();
+  G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
+    return (G1SATBCardTableLoggingModRefBS*) barrier_set();
   }
 
   // This resets the card table to all zeros.  It is used after
@@ -1332,9 +1353,6 @@
   // "cl.do_oop" on each.
   virtual void oop_iterate(ExtendedOopClosure* cl);
 
-  // Same as above, restricted to a memory region.
-  void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
-
   // Iterate over all objects, calling "cl.do_object" on each.
   virtual void object_iterate(ObjectClosure* cl);
 
@@ -1352,6 +1370,12 @@
   // Return the region with the given index. It assumes the index is valid.
   inline HeapRegion* region_at(uint index) const;
 
+  // Calculate the region index of the given address. Given address must be
+  // within the heap.
+  inline uint addr_to_region(HeapWord* addr) const;
+
+  inline HeapWord* bottom_addr_for_region(uint index) const;
+
   // Divide the heap region sequence into "chunks" of some size (the number
   // of regions divided by the number of parallel threads times some
   // overpartition factor, currently 4).  Assumes that this will be called
@@ -1365,10 +1389,10 @@
   // setting the claim value of the second and subsequent regions of the
   // chunk.)  For now requires that "doHeapRegion" always returns "false",
   // i.e., that a closure never attempt to abort a traversal.
-  void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
-                                       uint worker,
-                                       uint no_of_par_workers,
-                                       jint claim_value);
+  void heap_region_par_iterate_chunked(HeapRegionClosure* cl,
+                                       uint worker_id,
+                                       uint num_workers,
+                                       jint claim_value) const;
 
   // It resets all the region claim values to the default.
   void reset_heap_region_claim_values();
@@ -1393,35 +1417,27 @@
   // starting region for iterating over the current collection set.
   HeapRegion* start_cset_region_for_worker(uint worker_i);
 
-  // This is a convenience method that is used by the
-  // HeapRegionIterator classes to calculate the starting region for
-  // each worker so that they do not all start from the same region.
-  HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers);
-
   // Iterate over the regions (if any) in the current collection set.
   void collection_set_iterate(HeapRegionClosure* blk);
 
   // As above but starting from region r
   void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
 
-  // Returns the first (lowest address) compactible space in the heap.
-  virtual CompactibleSpace* first_compactible_space();
+  HeapRegion* next_compaction_region(const HeapRegion* from) const;
 
   // A CollectedHeap will contain some number of spaces.  This finds the
   // space containing a given address, or else returns NULL.
   virtual Space* space_containing(const void* addr) const;
 
-  // A G1CollectedHeap will contain some number of heap regions.  This
-  // finds the region containing a given address, or else returns NULL.
+  // Returns the HeapRegion that contains addr. addr must not be NULL.
+  template <class T>
+  inline HeapRegion* heap_region_containing_raw(const T addr) const;
+
+  // Returns the HeapRegion that contains addr. addr must not be NULL.
+  // If addr is within a humongous continues region, it returns its humongous start region.
   template <class T>
   inline HeapRegion* heap_region_containing(const T addr) const;
 
-  // Like the above, but requires "addr" to be in the heap (to avoid a
-  // null-check), and unlike the above, may return an continuing humongous
-  // region.
-  template <class T>
-  inline HeapRegion* heap_region_containing_raw(const T addr) const;
-
   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
   // each address in the (reserved) heap is a member of exactly
   // one block.  The defining characteristic of a block is that it is
@@ -1563,7 +1579,6 @@
   // the region to which the object belongs. An object is dead
   // iff a) it was not allocated since the last mark and b) it
   // is not marked.
-
   bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
     return
       !hr->obj_allocated_since_prev_marking(obj) &&
@@ -1573,7 +1588,6 @@
   // This function returns true when an object has been
   // around since the previous marking and hasn't yet
   // been marked during this marking.
-
   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
     return
       !hr->obj_allocated_since_next_marking(obj) &&
@@ -1619,19 +1633,9 @@
   // Unregister the given nmethod from the G1 heap
   virtual void unregister_nmethod(nmethod* nm);
 
-  // Migrate the nmethods in the code root lists of the regions
-  // in the collection set to regions in to-space. In the event
-  // of an evacuation failure, nmethods that reference objects
-  // that were not successfullly evacuated are not migrated.
-  void migrate_strong_code_roots();
-
   // Free up superfluous code root memory.
   void purge_code_root_memory();
 
-  // During an initial mark pause, mark all the code roots that
-  // point into regions *not* in the collection set.
-  void mark_strong_code_roots(uint worker_id);
-
   // Rebuild the stong code root lists for each region
   // after a full GC
   void rebuild_strong_code_roots();
@@ -1640,6 +1644,9 @@
   // in symbol table, possibly in parallel.
   void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
 
+  // Parallel phase of unloading/cleaning after G1 concurrent mark.
+  void parallel_cleaning(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool class_unloading_occurred);
+
   // Redirty logged cards in the refinement queue.
   void redirty_logged_cards();
   // Verification
@@ -1711,274 +1718,4 @@
   size_t _max_heap_capacity;
 };
 
-class G1ParGCAllocBuffer: public ParGCAllocBuffer {
-private:
-  bool        _retired;
-
-public:
-  G1ParGCAllocBuffer(size_t gclab_word_size);
-
-  void set_buf(HeapWord* buf) {
-    ParGCAllocBuffer::set_buf(buf);
-    _retired = false;
-  }
-
-  void retire(bool end_of_gc, bool retain) {
-    if (_retired)
-      return;
-    ParGCAllocBuffer::retire(end_of_gc, retain);
-    _retired = true;
-  }
-};
-
-class G1ParScanThreadState : public StackObj {
-protected:
-  G1CollectedHeap* _g1h;
-  RefToScanQueue*  _refs;
-  DirtyCardQueue   _dcq;
-  G1SATBCardTableModRefBS* _ct_bs;
-  G1RemSet* _g1_rem;
-
-  G1ParGCAllocBuffer  _surviving_alloc_buffer;
-  G1ParGCAllocBuffer  _tenured_alloc_buffer;
-  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
-  ageTable            _age_table;
-
-  G1ParScanClosure    _scanner;
-
-  size_t           _alloc_buffer_waste;
-  size_t           _undo_waste;
-
-  OopsInHeapRegionClosure*      _evac_failure_cl;
-
-  int  _hash_seed;
-  uint _queue_num;
-
-  size_t _term_attempts;
-
-  double _start;
-  double _start_strong_roots;
-  double _strong_roots_time;
-  double _start_term;
-  double _term_time;
-
-  // Map from young-age-index (0 == not young, 1 is youngest) to
-  // surviving words. base is what we get back from the malloc call
-  size_t* _surviving_young_words_base;
-  // this points into the array, as we use the first few entries for padding
-  size_t* _surviving_young_words;
-
-#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
-
-  void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
-
-  void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
-
-  DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
-  G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
-
-  template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
-
-  template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
-    // If the new value of the field points to the same region or
-    // is the to-space, we don't need to include it in the Rset updates.
-    if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
-      size_t card_index = ctbs()->index_for(p);
-      // If the card hasn't been added to the buffer, do it.
-      if (ctbs()->mark_card_deferred(card_index)) {
-        dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
-      }
-    }
-  }
-
-public:
-  G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
-
-  ~G1ParScanThreadState() {
-    FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
-  }
-
-  RefToScanQueue*   refs()            { return _refs;             }
-  ageTable*         age_table()       { return &_age_table;       }
-
-  G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
-    return _alloc_buffers[purpose];
-  }
-
-  size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
-  size_t undo_waste() const                      { return _undo_waste; }
-
-#ifdef ASSERT
-  bool verify_ref(narrowOop* ref) const;
-  bool verify_ref(oop* ref) const;
-  bool verify_task(StarTask ref) const;
-#endif // ASSERT
-
-  template <class T> void push_on_queue(T* ref) {
-    assert(verify_ref(ref), "sanity");
-    refs()->push(ref);
-  }
-
-  template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
-
-  HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
-    HeapWord* obj = NULL;
-    size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
-    if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
-      G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
-      add_to_alloc_buffer_waste(alloc_buf->words_remaining());
-      alloc_buf->retire(false /* end_of_gc */, false /* retain */);
-
-      HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
-      if (buf == NULL) return NULL; // Let caller handle allocation failure.
-      // Otherwise.
-      alloc_buf->set_word_size(gclab_word_size);
-      alloc_buf->set_buf(buf);
-
-      obj = alloc_buf->allocate(word_sz);
-      assert(obj != NULL, "buffer was definitely big enough...");
-    } else {
-      obj = _g1h->par_allocate_during_gc(purpose, word_sz);
-    }
-    return obj;
-  }
-
-  HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
-    HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
-    if (obj != NULL) return obj;
-    return allocate_slow(purpose, word_sz);
-  }
-
-  void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
-    if (alloc_buffer(purpose)->contains(obj)) {
-      assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
-             "should contain whole object");
-      alloc_buffer(purpose)->undo_allocation(obj, word_sz);
-    } else {
-      CollectedHeap::fill_with_object(obj, word_sz);
-      add_to_undo_waste(word_sz);
-    }
-  }
-
-  void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
-    _evac_failure_cl = evac_failure_cl;
-  }
-  OopsInHeapRegionClosure* evac_failure_closure() {
-    return _evac_failure_cl;
-  }
-
-  int* hash_seed() { return &_hash_seed; }
-  uint queue_num() { return _queue_num; }
-
-  size_t term_attempts() const  { return _term_attempts; }
-  void note_term_attempt() { _term_attempts++; }
-
-  void start_strong_roots() {
-    _start_strong_roots = os::elapsedTime();
-  }
-  void end_strong_roots() {
-    _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
-  }
-  double strong_roots_time() const { return _strong_roots_time; }
-
-  void start_term_time() {
-    note_term_attempt();
-    _start_term = os::elapsedTime();
-  }
-  void end_term_time() {
-    _term_time += (os::elapsedTime() - _start_term);
-  }
-  double term_time() const { return _term_time; }
-
-  double elapsed_time() const {
-    return os::elapsedTime() - _start;
-  }
-
-  static void
-    print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
-  void
-    print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
-
-  size_t* surviving_young_words() {
-    // We add on to hide entry 0 which accumulates surviving words for
-    // age -1 regions (i.e. non-young ones)
-    return _surviving_young_words;
-  }
-
-  void retire_alloc_buffers() {
-    for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-      size_t waste = _alloc_buffers[ap]->words_remaining();
-      add_to_alloc_buffer_waste(waste);
-      _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
-                                                 true /* end_of_gc */,
-                                                 false /* retain */);
-    }
-  }
-private:
-  #define G1_PARTIAL_ARRAY_MASK 0x2
-
-  inline bool has_partial_array_mask(oop* ref) const {
-    return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
-  }
-
-  // We never encode partial array oops as narrowOop*, so return false immediately.
-  // This allows the compiler to create optimized code when popping references from
-  // the work queue.
-  inline bool has_partial_array_mask(narrowOop* ref) const {
-    assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
-    return false;
-  }
-
-  // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
-  // We always encode partial arrays as regular oop, to allow the
-  // specialization for has_partial_array_mask() for narrowOops above.
-  // This means that unintentional use of this method with narrowOops are caught
-  // by the compiler.
-  inline oop* set_partial_array_mask(oop obj) const {
-    assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
-    return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
-  }
-
-  inline oop clear_partial_array_mask(oop* ref) const {
-    return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
-  }
-
-  inline void do_oop_partial_array(oop* p);
-
-  // This method is applied to the fields of the objects that have just been copied.
-  template <class T> void do_oop_evac(T* p, HeapRegion* from) {
-    assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
-           "Reference should not be NULL here as such are never pushed to the task queue.");
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
-
-    // Although we never intentionally push references outside of the collection
-    // set, due to (benign) races in the claim mechanism during RSet scanning more
-    // than one thread might claim the same card. So the same card may be
-    // processed multiple times. So redo this check.
-    if (_g1h->in_cset_fast_test(obj)) {
-      oop forwardee;
-      if (obj->is_forwarded()) {
-        forwardee = obj->forwardee();
-      } else {
-        forwardee = copy_to_survivor_space(obj);
-      }
-      assert(forwardee != NULL, "forwardee should not be NULL");
-      oopDesc::encode_store_heap_oop(p, forwardee);
-    }
-
-    assert(obj != NULL, "Must be");
-    update_rs(from, p, queue_num());
-  }
-public:
-
-  oop copy_to_survivor_space(oop const obj);
-
-  template <class T> inline void deal_with_reference(T* ref_to_scan);
-
-  inline void deal_with_reference(StarTask ref);
-
-public:
-  void trim_queue();
-};
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -29,34 +29,61 @@
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
-#include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/taskqueue.hpp"
 
 // Inline functions for G1CollectedHeap
 
+inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
+  return _allocation_context_stats;
+}
+
 // Return the region with the given index. It assumes the index is valid.
-inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
+inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
+
+inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
+  assert(is_in_reserved(addr),
+         err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
+                 p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
+  return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
+}
+
+inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
+  return _hrm.reserved().start() + index * HeapRegion::GrainWords;
+}
 
 template <class T>
-inline HeapRegion*
-G1CollectedHeap::heap_region_containing(const T addr) const {
-  HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
-  // hr can be null if addr in perm_gen
-  if (hr != NULL && hr->continuesHumongous()) {
-    hr = hr->humongous_start_region();
+inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
+  assert(addr != NULL, "invariant");
+  assert(is_in_g1_reserved((const void*) addr),
+      err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
+          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
+  return _hrm.addr_to_region((HeapWord*) addr);
+}
+
+template <class T>
+inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
+  HeapRegion* hr = heap_region_containing_raw(addr);
+  if (hr->continuesHumongous()) {
+    return hr->humongous_start_region();
   }
   return hr;
 }
 
-template <class T>
-inline HeapRegion*
-G1CollectedHeap::heap_region_containing_raw(const T addr) const {
-  assert(_g1_reserved.contains((const void*) addr), "invariant");
-  HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
-  return res;
+inline void G1CollectedHeap::reset_gc_time_stamp() {
+  _gc_time_stamp = 0;
+  OrderAccess::fence();
+  // Clear the cached CSet starting regions and time stamps.
+  // Their validity is dependent on the GC timestamp.
+  clear_cset_start_regions();
+}
+
+inline void G1CollectedHeap::increment_gc_time_stamp() {
+  ++_gc_time_stamp;
+  OrderAccess::fence();
 }
 
 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
@@ -64,22 +91,23 @@
 }
 
 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
-  HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
+  HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
   return r != NULL && r->in_collection_set();
 }
 
-inline HeapWord*
-G1CollectedHeap::attempt_allocation(size_t word_size,
-                                    unsigned int* gc_count_before_ret,
-                                    int* gclocker_retry_count_ret) {
+inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
+                                                     unsigned int* gc_count_before_ret,
+                                                     int* gclocker_retry_count_ret) {
   assert_heap_not_locked_and_not_at_safepoint();
   assert(!isHumongous(word_size), "attempt_allocation() should not "
          "be called for humongous allocation requests");
 
-  HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
-                                                      false /* bot_updates */);
+  AllocationContext_t context = AllocationContext::current();
+  HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
+                                                                                   false /* bot_updates */);
   if (result == NULL) {
     result = attempt_allocation_slow(word_size,
+                                     context,
                                      gc_count_before_ret,
                                      gclocker_retry_count_ret);
   }
@@ -90,17 +118,17 @@
   return result;
 }
 
-inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
-                                                              word_size) {
+inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
+                                                              AllocationContext_t context) {
   assert(!isHumongous(word_size),
          "we should not be seeing humongous-size allocations in this path");
 
-  HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
-                                                      false /* bot_updates */);
+  HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
+                                                                                       false /* bot_updates */);
   if (result == NULL) {
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
-                                                      false /* bot_updates */);
+    result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
+                                                                                      false /* bot_updates */);
   }
   if (result != NULL) {
     dirty_young_block(result, word_size);
@@ -108,16 +136,17 @@
   return result;
 }
 
-inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
+inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
+                                                         AllocationContext_t context) {
   assert(!isHumongous(word_size),
          "we should not be seeing humongous-size allocations in this path");
 
-  HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
-                                                       true /* bot_updates */);
+  HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
+                                                                                  true /* bot_updates */);
   if (result == NULL) {
     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
-                                                       true /* bot_updates */);
+    result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
+                                                                                 true /* bot_updates */);
   }
   return result;
 }
@@ -134,8 +163,7 @@
   // have to keep calling heap_region_containing_raw() in the
   // asserts below.
   DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
-  assert(containing_hr != NULL && start != NULL && word_size > 0,
-         "pre-condition");
+  assert(word_size > 0, "pre-condition");
   assert(containing_hr->is_in(start), "it should contain start");
   assert(containing_hr->is_young(), "it should be young");
   assert(!containing_hr->isHumongous(), "it should not be humongous");
@@ -159,17 +187,11 @@
   return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
 }
 
-
 // This is a fast test on whether a reference points into the
 // collection set or not. Assume that the reference
 // points into the heap.
-inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
-  assert(_in_cset_fast_test != NULL, "sanity");
-  assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, p2i((HeapWord*)obj)));
-  // no need to subtract the bottom of the heap from obj,
-  // _in_cset_fast_test is biased
-  uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
-  bool ret = _in_cset_fast_test[index];
+inline bool G1CollectedHeap::is_in_cset(oop obj) {
+  bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj);
   // let's make sure the result is consistent with what the slower
   // test returns
   assert( ret || !obj_in_cs(obj), "sanity");
@@ -177,6 +199,18 @@
   return ret;
 }
 
+bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
+  return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
+}
+
+G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) {
+  return _in_cset_fast_test.at((HeapWord*)obj);
+}
+
+void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) {
+  _in_cset_fast_test.set_humongous(index);
+}
+
 #ifndef PRODUCT
 // Support for G1EvacuationFailureALot
 
@@ -226,8 +260,7 @@
   }
 }
 
-inline bool
-G1CollectedHeap::evacuation_should_fail() {
+inline bool G1CollectedHeap::evacuation_should_fail() {
   if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
     return false;
   }
@@ -251,8 +284,10 @@
 #endif  // #ifndef PRODUCT
 
 inline bool G1CollectedHeap::is_in_young(const oop obj) {
-  HeapRegion* hr = heap_region_containing(obj);
-  return hr != NULL && hr->is_young();
+  if (obj == NULL) {
+    return false;
+  }
+  return heap_region_containing(obj)->is_young();
 }
 
 // We don't need barriers for initializing stores to objects
@@ -265,105 +300,34 @@
 }
 
 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
-  const HeapRegion* hr = heap_region_containing(obj);
-  if (hr == NULL) {
-    if (obj == NULL) return false;
-    else return true;
+  if (obj == NULL) {
+    return false;
   }
-  else return is_obj_dead(obj, hr);
+  return is_obj_dead(obj, heap_region_containing(obj));
 }
 
 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
-  const HeapRegion* hr = heap_region_containing(obj);
-  if (hr == NULL) {
-    if (obj == NULL) return false;
-    else return true;
+  if (obj == NULL) {
+    return false;
   }
-  else return is_obj_ill(obj, hr);
-}
-
-template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
-  if (!from->is_survivor()) {
-    _g1_rem->par_write_ref(from, p, tid);
-  }
-}
-
-template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
-  if (G1DeferredRSUpdate) {
-    deferred_rs_update(from, p, tid);
-  } else {
-    immediate_rs_update(from, p, tid);
-  }
+  return is_obj_ill(obj, heap_region_containing(obj));
 }
 
-
-inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
-  assert(has_partial_array_mask(p), "invariant");
-  oop from_obj = clear_partial_array_mask(p);
-
-  assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
-  assert(from_obj->is_objArray(), "must be obj array");
-  objArrayOop from_obj_array = objArrayOop(from_obj);
-  // The from-space object contains the real length.
-  int length                 = from_obj_array->length();
-
-  assert(from_obj->is_forwarded(), "must be forwarded");
-  oop to_obj                 = from_obj->forwardee();
-  assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
-  objArrayOop to_obj_array   = objArrayOop(to_obj);
-  // We keep track of the next start index in the length field of the
-  // to-space object.
-  int next_index             = to_obj_array->length();
-  assert(0 <= next_index && next_index < length,
-         err_msg("invariant, next index: %d, length: %d", next_index, length));
-
-  int start                  = next_index;
-  int end                    = length;
-  int remainder              = end - start;
-  // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
-  if (remainder > 2 * ParGCArrayScanChunk) {
-    end = start + ParGCArrayScanChunk;
-    to_obj_array->set_length(end);
-    // Push the remainder before we process the range in case another
-    // worker has run out of things to do and can steal it.
-    oop* from_obj_p = set_partial_array_mask(from_obj);
-    push_on_queue(from_obj_p);
-  } else {
-    assert(length == end, "sanity");
-    // We'll process the final range for this object. Restore the length
-    // so that the heap remains parsable in case of evacuation failure.
-    to_obj_array->set_length(end);
-  }
-  _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
-  // Process indexes [start,end). It will also process the header
-  // along with the first chunk (i.e., the chunk with start == 0).
-  // Note that at this point the length field of to_obj_array is not
-  // correct given that we are using it to keep track of the next
-  // start index. oop_iterate_range() (thankfully!) ignores the length
-  // field and only relies on the start / end parameters.  It does
-  // however return the size of the object which will be incorrect. So
-  // we have to ignore it even if we wanted to use it.
-  to_obj_array->oop_iterate_range(&_scanner, start, end);
-}
-
-template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
-  if (!has_partial_array_mask(ref_to_scan)) {
-    // Note: we can use "raw" versions of "region_containing" because
-    // "obj_to_scan" is definitely in the heap, and is not in a
-    // humongous region.
-    HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
-    do_oop_evac(ref_to_scan, r);
-  } else {
-    do_oop_partial_array((oop*)ref_to_scan);
-  }
-}
-
-inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
-  assert(verify_task(ref), "sanity");
-  if (ref.is_narrow()) {
-    deal_with_reference((narrowOop*)ref);
-  } else {
-    deal_with_reference((oop*)ref);
+inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
+  uint region = addr_to_region((HeapWord*)obj);
+  // We not only set the "live" flag in the humongous_is_live table, but also
+  // reset the entry in the _in_cset_fast_test table so that subsequent references
+  // to the same humongous object do not go into the slow path again.
+  // This is racy, as multiple threads may at the same time enter here, but this
+  // is benign.
+  // During collection we only ever set the "live" flag, and only ever clear the
+  // entry in the in_cset_fast_table.
+  // We only ever evaluate the contents of these tables (in the VM thread) after
+  // having synchronized the worker threads with the VM thread, or in the same
+  // thread (i.e. within the VM thread).
+  if (!_humongous_is_live.is_live(region)) {
+    _humongous_is_live.set_live(region);
+    _in_cset_fast_test.clear_humongous(region);
   }
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap_ext.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+
+bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
+                                                    jlong* totals,
+                                                    jbyte* accuracy,
+                                                    jint len) {
+  return false;
+}
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -455,7 +455,7 @@
   } else {
     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   }
-  _free_regions_at_end_of_collection = _g1->free_regions();
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
   update_young_list_target_length();
 
   // We may immediately start allocating regions and placing them on the
@@ -828,7 +828,7 @@
 
   record_survivor_regions(0, NULL, NULL);
 
-  _free_regions_at_end_of_collection = _g1->free_regions();
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
   // Reset survivors SurvRateGroup.
   _survivor_surv_rate_group->reset();
   update_young_list_target_length();
@@ -1046,7 +1046,7 @@
 
   bool new_in_marking_window = _in_marking_window;
   bool new_in_marking_window_im = false;
-  if (during_initial_mark_pause()) {
+  if (last_pause_included_initial_mark) {
     new_in_marking_window = true;
     new_in_marking_window_im = true;
   }
@@ -1180,7 +1180,7 @@
 
   _in_marking_window = new_in_marking_window;
   _in_marking_window_im = new_in_marking_window_im;
-  _free_regions_at_end_of_collection = _g1->free_regions();
+  _free_regions_at_end_of_collection = _g1->num_free_regions();
   update_young_list_target_length();
 
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
@@ -1202,7 +1202,7 @@
   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
   _heap_capacity_bytes_before_gc = _g1->capacity();
   _heap_used_bytes_before_gc = _g1->used();
-  _cur_collection_pause_used_regions_at_start = _g1->used_regions();
+  _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
 
   _eden_capacity_bytes_before_gc =
          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
@@ -1425,6 +1425,18 @@
 #endif // PRODUCT
 }
 
+bool G1CollectorPolicy::is_young_list_full() {
+  uint young_list_length = _g1->young_list()->length();
+  uint young_list_target_length = _young_list_target_length;
+  return young_list_length >= young_list_target_length;
+}
+
+bool G1CollectorPolicy::can_expand_young_list() {
+  uint young_list_length = _g1->young_list()->length();
+  uint young_list_max_length = _young_list_max_length;
+  return young_list_length < young_list_max_length;
+}
+
 uint G1CollectorPolicy::max_regions(int purpose) {
   switch (purpose) {
     case GCAllocForSurvived:
@@ -1617,7 +1629,7 @@
 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
   _collectionSetChooser->clear();
 
-  uint region_num = _g1->n_regions();
+  uint region_num = _g1->num_regions();
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     const uint OverpartitionFactor = 4;
     uint WorkUnit;
@@ -1638,7 +1650,7 @@
         MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
              MinWorkUnit);
     }
-    _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
+    _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
                                                            WorkUnit);
     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
                                             (int) WorkUnit);
@@ -1664,7 +1676,7 @@
 // Add the heap region at the head of the non-incremental collection set
 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
   assert(_inc_cset_build_state == Active, "Precondition");
-  assert(!hr->is_young(), "non-incremental add of young region");
+  assert(hr->is_old(), "the region should be old");
 
   assert(!hr->in_collection_set(), "should not already be in the CSet");
   hr->set_in_collection_set(true);
@@ -1810,7 +1822,7 @@
 // Add the region at the RHS of the incremental cset
 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
   // We should only ever be appending survivors at the end of a pause
-  assert( hr->is_survivor(), "Logic");
+  assert(hr->is_survivor(), "Logic");
 
   // Do the 'common' stuff
   add_region_to_incremental_cset_common(hr);
@@ -1828,7 +1840,7 @@
 // Add the region to the LHS of the incremental cset
 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
   // Survivors should be added to the RHS at the end of a pause
-  assert(!hr->is_survivor(), "Logic");
+  assert(hr->is_eden(), "Logic");
 
   // Do the 'common' stuff
   add_region_to_incremental_cset_common(hr);
@@ -1935,7 +1947,7 @@
   // of them are available.
 
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  const size_t region_num = g1h->n_regions();
+  const size_t region_num = g1h->num_regions();
   const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
   size_t result = region_num * perc / 100;
   // emulate ceiling
@@ -1988,7 +2000,11 @@
   HeapRegion* hr = young_list->first_survivor_region();
   while (hr != NULL) {
     assert(hr->is_survivor(), "badly formed young list");
-    hr->set_young();
+    // There is a convention that all the young regions in the CSet
+    // are tagged as "eden", so we do this for the survivors here. We
+    // use the special set_eden_pre_gc() as it doesn't check that the
+    // region is free (which is not the case here).
+    hr->set_eden_pre_gc();
     hr = hr->get_next_young_region();
   }
 
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
 
 #include "gc_implementation/g1/collectionSetChooser.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
 #include "gc_implementation/g1/g1MMUTracker.hpp"
 #include "memory/collectorPolicy.hpp"
 
@@ -299,13 +300,13 @@
   // Accessors
 
   void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
-    hr->set_young();
+    hr->set_eden();
     hr->install_surv_rate_group(_short_lived_surv_rate_group);
     hr->set_young_index_in_cset(young_index_in_cset);
   }
 
   void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
-    assert(hr->is_young() && hr->is_survivor(), "pre-condition");
+    assert(hr->is_survivor(), "pre-condition");
     hr->install_surv_rate_group(_survivor_surv_rate_group);
     hr->set_young_index_in_cset(young_index_in_cset);
   }
@@ -803,7 +804,7 @@
 
   // If an expansion would be appropriate, because recent GC overhead had
   // exceeded the desired limit, return an amount to expand by.
-  size_t expansion_amount();
+  virtual size_t expansion_amount();
 
   // Print tracing information.
   void print_tracing_info() const;
@@ -822,17 +823,9 @@
 
   size_t young_list_target_length() const { return _young_list_target_length; }
 
-  bool is_young_list_full() {
-    uint young_list_length = _g1->young_list()->length();
-    uint young_list_target_length = _young_list_target_length;
-    return young_list_length >= young_list_target_length;
-  }
+  bool is_young_list_full();
 
-  bool can_expand_young_list() {
-    uint young_list_length = _g1->young_list()->length();
-    uint young_list_max_length = _young_list_max_length;
-    return young_list_length < young_list_max_length;
-  }
+  bool can_expand_young_list();
 
   uint young_list_max_length() {
     return _young_list_max_length;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy_ext.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
+
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+
+class G1CollectorPolicyExt : public G1CollectorPolicy { };
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
--- a/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -71,6 +71,9 @@
   bool _during_initial_mark;
   bool _during_conc_mark;
   uint _worker_id;
+  HeapWord* _end_of_last_gap;
+  HeapWord* _last_gap_threshold;
+  HeapWord* _last_obj_threshold;
 
 public:
   RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
@@ -83,7 +86,10 @@
     _update_rset_cl(update_rset_cl),
     _during_initial_mark(during_initial_mark),
     _during_conc_mark(during_conc_mark),
-    _worker_id(worker_id) { }
+    _worker_id(worker_id),
+    _end_of_last_gap(hr->bottom()),
+    _last_gap_threshold(hr->bottom()),
+    _last_obj_threshold(hr->bottom()) { }
 
   size_t marked_bytes() { return _marked_bytes; }
 
@@ -107,7 +113,12 @@
     HeapWord* obj_addr = (HeapWord*) obj;
     assert(_hr->is_in(obj_addr), "sanity");
     size_t obj_size = obj->size();
-    _hr->update_bot_for_object(obj_addr, obj_size);
+    HeapWord* obj_end = obj_addr + obj_size;
+
+    if (_end_of_last_gap != obj_addr) {
+      // there was a gap before obj_addr
+      _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
+    }
 
     if (obj->is_forwarded() && obj->forwardee() == obj) {
       // The object failed to move.
@@ -115,7 +126,9 @@
       // We consider all objects that we find self-forwarded to be
       // live. What we'll do is that we'll update the prev marking
       // info so that they are all under PTAMS and explicitly marked.
-      _cm->markPrev(obj);
+      if (!_cm->isPrevMarked(obj)) {
+        _cm->markPrev(obj);
+      }
       if (_during_initial_mark) {
         // For the next marking info we'll only mark the
         // self-forwarded objects explicitly if we are during
@@ -145,28 +158,35 @@
       // remembered set entries missing given that we skipped cards on
       // the collection set. So, we'll recreate such entries now.
       obj->oop_iterate(_update_rset_cl);
-      assert(_cm->isPrevMarked(obj), "Should be marked!");
     } else {
+
       // The object has been either evacuated or is dead. Fill it with a
       // dummy object.
-      MemRegion mr((HeapWord*) obj, obj_size);
+      MemRegion mr(obj_addr, obj_size);
       CollectedHeap::fill_with_object(mr);
+
+      // must nuke all dead objects which we skipped when iterating over the region
+      _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
     }
+    _end_of_last_gap = obj_end;
+    _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
   }
 };
 
 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
   G1CollectedHeap* _g1h;
   ConcurrentMark* _cm;
-  OopsInHeapRegionClosure *_update_rset_cl;
   uint _worker_id;
 
+  DirtyCardQueue _dcq;
+  UpdateRSetDeferred _update_rset_cl;
+
 public:
   RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
-                                OopsInHeapRegionClosure* update_rset_cl,
                                 uint worker_id) :
-    _g1h(g1h), _update_rset_cl(update_rset_cl),
-    _worker_id(worker_id), _cm(_g1h->concurrent_mark()) { }
+    _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
+    _worker_id(worker_id), _cm(_g1h->concurrent_mark()) {
+    }
 
   bool doHeapRegion(HeapRegion *hr) {
     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
@@ -177,20 +197,14 @@
 
     if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
       if (hr->evacuation_failed()) {
-        RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl,
+        RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl,
                                             during_initial_mark,
                                             during_conc_mark,
                                             _worker_id);
 
-        MemRegion mr(hr->bottom(), hr->end());
-        // We'll recreate the prev marking info so we'll first clear
-        // the prev bitmap range for this region. We never mark any
-        // CSet objects explicitly so the next bitmap range should be
-        // cleared anyway.
-        _cm->clearRangePrevBitmap(mr);
-
         hr->note_self_forwarding_removal_start(during_initial_mark,
                                                during_conc_mark);
+        _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
 
         // In the common case (i.e. when there is no evacuation
         // failure) we make sure that the following is done when
@@ -202,9 +216,11 @@
         // whenever this might be required in the future.
         hr->rem_set()->reset_for_par_iteration();
         hr->reset_bot();
-        _update_rset_cl->set_region(hr);
+        _update_rset_cl.set_region(hr);
         hr->object_iterate(&rspc);
 
+        hr->rem_set()->clean_strong_code_roots(hr);
+
         hr->note_self_forwarding_removal_end(during_initial_mark,
                                              during_conc_mark,
                                              rspc.marked_bytes());
@@ -224,16 +240,7 @@
     _g1h(g1h) { }
 
   void work(uint worker_id) {
-    UpdateRSetImmediate immediate_update(_g1h->g1_rem_set());
-    DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
-    UpdateRSetDeferred deferred_update(_g1h, &dcq);
-
-    OopsInHeapRegionClosure *update_rset_cl = &deferred_update;
-    if (!G1DeferredRSUpdate) {
-      update_rset_cl = &immediate_update;
-    }
-
-    RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl, worker_id);
+    RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id);
 
     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
     _g1h->collection_set_iterate_from(hr, &rsfp_cl);
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -166,13 +166,14 @@
   _last_update_rs_processed_buffers(_max_gc_threads, "%d"),
   _last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
   _last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),
-  _last_strong_code_root_mark_times_ms(_max_gc_threads, "%.1lf"),
   _last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
   _last_termination_times_ms(_max_gc_threads, "%.1lf"),
   _last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
   _last_gc_worker_end_times_ms(_max_gc_threads, "%.1lf", false),
   _last_gc_worker_times_ms(_max_gc_threads, "%.1lf"),
   _last_gc_worker_other_times_ms(_max_gc_threads, "%.1lf"),
+  _last_redirty_logged_cards_time_ms(_max_gc_threads, "%.1lf"),
+  _last_redirty_logged_cards_processed_cards(_max_gc_threads, SIZE_FORMAT),
   _cur_string_dedup_queue_fixup_worker_times_ms(_max_gc_threads, "%.1lf"),
   _cur_string_dedup_table_fixup_worker_times_ms(_max_gc_threads, "%.1lf")
 {
@@ -191,13 +192,16 @@
   _last_update_rs_processed_buffers.reset();
   _last_scan_rs_times_ms.reset();
   _last_strong_code_root_scan_times_ms.reset();
-  _last_strong_code_root_mark_times_ms.reset();
   _last_obj_copy_times_ms.reset();
   _last_termination_times_ms.reset();
   _last_termination_attempts.reset();
   _last_gc_worker_end_times_ms.reset();
   _last_gc_worker_times_ms.reset();
   _last_gc_worker_other_times_ms.reset();
+
+  _last_redirty_logged_cards_time_ms.reset();
+  _last_redirty_logged_cards_processed_cards.reset();
+
 }
 
 void G1GCPhaseTimes::note_gc_end() {
@@ -208,7 +212,6 @@
   _last_update_rs_processed_buffers.verify();
   _last_scan_rs_times_ms.verify();
   _last_strong_code_root_scan_times_ms.verify();
-  _last_strong_code_root_mark_times_ms.verify();
   _last_obj_copy_times_ms.verify();
   _last_termination_times_ms.verify();
   _last_termination_attempts.verify();
@@ -223,7 +226,6 @@
                                _last_update_rs_times_ms.get(i) +
                                _last_scan_rs_times_ms.get(i) +
                                _last_strong_code_root_scan_times_ms.get(i) +
-                               _last_strong_code_root_mark_times_ms.get(i) +
                                _last_obj_copy_times_ms.get(i) +
                                _last_termination_times_ms.get(i);
 
@@ -233,6 +235,9 @@
 
   _last_gc_worker_times_ms.verify();
   _last_gc_worker_other_times_ms.verify();
+
+  _last_redirty_logged_cards_time_ms.verify();
+  _last_redirty_logged_cards_processed_cards.verify();
 }
 
 void G1GCPhaseTimes::note_string_dedup_fixup_start() {
@@ -249,6 +254,10 @@
   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
 }
 
+void G1GCPhaseTimes::print_stats(int level, const char* str, size_t value) {
+  LineBuffer(level).append_and_print_cr("[%s: "SIZE_FORMAT"]", str, value);
+}
+
 void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {
   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: " UINT32_FORMAT "]", str, value, workers);
 }
@@ -263,9 +272,6 @@
     // Now subtract the time taken to fix up roots in generated code
     misc_time_ms += _cur_collection_code_root_fixup_time_ms;
 
-    // Strong code root migration time
-    misc_time_ms += _cur_strong_code_root_migration_time_ms;
-
     // Strong code root purge time
     misc_time_ms += _cur_strong_code_root_purge_time_ms;
 
@@ -292,9 +298,6 @@
     if (_last_satb_filtering_times_ms.sum() > 0.0) {
       _last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
     }
-    if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
-     _last_strong_code_root_mark_times_ms.print(2, "Code Root Marking (ms)");
-    }
     _last_update_rs_times_ms.print(2, "Update RS (ms)");
       _last_update_rs_processed_buffers.print(3, "Processed Buffers");
     _last_scan_rs_times_ms.print(2, "Scan RS (ms)");
@@ -312,9 +315,6 @@
     if (_last_satb_filtering_times_ms.sum() > 0.0) {
       _last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
     }
-    if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
-      _last_strong_code_root_mark_times_ms.print(1, "Code Root Marking (ms)");
-    }
     _last_update_rs_times_ms.print(1, "Update RS (ms)");
       _last_update_rs_processed_buffers.print(2, "Processed Buffers");
     _last_scan_rs_times_ms.print(1, "Scan RS (ms)");
@@ -322,7 +322,6 @@
     _last_obj_copy_times_ms.print(1, "Object Copy (ms)");
   }
   print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
-  print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms);
   print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);
   if (G1StringDedup::is_enabled()) {
     print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads);
@@ -350,8 +349,18 @@
     _recorded_non_young_cset_choice_time_ms));
   print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
   print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
-  if (G1DeferredRSUpdate) {
-    print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
+  print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
+  if (G1Log::finest()) {
+    _last_redirty_logged_cards_time_ms.print(3, "Parallel Redirty");
+    _last_redirty_logged_cards_processed_cards.print(3, "Redirtied Cards");
+  }
+  if (G1ReclaimDeadHumongousObjectsAtYoungGC) {
+    print_stats(2, "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
+    if (G1Log::finest()) {
+      print_stats(3, "Humongous Total", _cur_fast_reclaim_humongous_total);
+      print_stats(3, "Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
+      print_stats(3, "Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
+    }
   }
   print_stats(2, "Free CSet",
     (_recorded_young_free_cset_time_ms +
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -120,7 +120,6 @@
   WorkerDataArray<int>    _last_update_rs_processed_buffers;
   WorkerDataArray<double> _last_scan_rs_times_ms;
   WorkerDataArray<double> _last_strong_code_root_scan_times_ms;
-  WorkerDataArray<double> _last_strong_code_root_mark_times_ms;
   WorkerDataArray<double> _last_obj_copy_times_ms;
   WorkerDataArray<double> _last_termination_times_ms;
   WorkerDataArray<size_t> _last_termination_attempts;
@@ -130,7 +129,6 @@
 
   double _cur_collection_par_time_ms;
   double _cur_collection_code_root_fixup_time_ms;
-  double _cur_strong_code_root_migration_time_ms;
   double _cur_strong_code_root_purge_time_ms;
 
   double _cur_evac_fail_recalc_used;
@@ -151,16 +149,24 @@
   double _recorded_young_cset_choice_time_ms;
   double _recorded_non_young_cset_choice_time_ms;
 
+  WorkerDataArray<double> _last_redirty_logged_cards_time_ms;
+  WorkerDataArray<size_t> _last_redirty_logged_cards_processed_cards;
   double _recorded_redirty_logged_cards_time_ms;
 
   double _recorded_young_free_cset_time_ms;
   double _recorded_non_young_free_cset_time_ms;
 
+  double _cur_fast_reclaim_humongous_time_ms;
+  size_t _cur_fast_reclaim_humongous_total;
+  size_t _cur_fast_reclaim_humongous_candidates;
+  size_t _cur_fast_reclaim_humongous_reclaimed;
+
   double _cur_verify_before_time_ms;
   double _cur_verify_after_time_ms;
 
   // Helper methods for detailed logging
   void print_stats(int level, const char* str, double value);
+  void print_stats(int level, const char* str, size_t value);
   void print_stats(int level, const char* str, double value, uint workers);
 
  public:
@@ -197,10 +203,6 @@
     _last_strong_code_root_scan_times_ms.set(worker_i, ms);
   }
 
-  void record_strong_code_root_mark_time(uint worker_i, double ms) {
-    _last_strong_code_root_mark_times_ms.set(worker_i, ms);
-  }
-
   void record_obj_copy_time(uint worker_i, double ms) {
     _last_obj_copy_times_ms.set(worker_i, ms);
   }
@@ -230,10 +232,6 @@
     _cur_collection_code_root_fixup_time_ms = ms;
   }
 
-  void record_strong_code_root_migration_time(double ms) {
-    _cur_strong_code_root_migration_time_ms = ms;
-  }
-
   void record_strong_code_root_purge_time(double ms) {
     _cur_strong_code_root_purge_time_ms = ms;
   }
@@ -285,6 +283,16 @@
     _recorded_non_young_free_cset_time_ms = time_ms;
   }
 
+  void record_fast_reclaim_humongous_stats(size_t total, size_t candidates) {
+    _cur_fast_reclaim_humongous_total = total;
+    _cur_fast_reclaim_humongous_candidates = candidates;
+  }
+
+  void record_fast_reclaim_humongous_time_ms(double value, size_t reclaimed) {
+    _cur_fast_reclaim_humongous_time_ms = value;
+    _cur_fast_reclaim_humongous_reclaimed = reclaimed;
+  }
+
   void record_young_cset_choice_time_ms(double time_ms) {
     _recorded_young_cset_choice_time_ms = time_ms;
   }
@@ -293,6 +301,14 @@
     _recorded_non_young_cset_choice_time_ms = time_ms;
   }
 
+  void record_redirty_logged_cards_time_ms(uint worker_i, double time_ms) {
+    _last_redirty_logged_cards_time_ms.set(worker_i, time_ms);
+  }
+
+  void record_redirty_logged_cards_processed_cards(uint worker_i, size_t processed_buffers) {
+    _last_redirty_logged_cards_processed_cards.set(worker_i, processed_buffers);
+  }
+
   void record_redirty_logged_cards_time_ms(double time_ms) {
     _recorded_redirty_logged_cards_time_ms = time_ms;
   }
@@ -343,6 +359,10 @@
     return _recorded_non_young_free_cset_time_ms;
   }
 
+  double fast_reclaim_humongous_time_ms() {
+    return _cur_fast_reclaim_humongous_time_ms;
+  }
+
   double average_last_update_rs_time() {
     return _last_update_rs_times_ms.average();
   }
@@ -359,10 +379,6 @@
     return _last_strong_code_root_scan_times_ms.average();
   }
 
-  double average_last_strong_code_root_mark_time(){
-    return _last_strong_code_root_mark_times_ms.average();
-  }
-
   double average_last_obj_copy_time() {
     return _last_obj_copy_times_ms.average();
   }
--- a/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -27,13 +27,12 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1HotCardCache.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
-#include "gc_implementation/g1/heapRegion.hpp"
 #include "runtime/atomic.hpp"
 
 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
   _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
 
-void G1HotCardCache::initialize() {
+void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
   if (default_use_cache()) {
     _use_cache = true;
 
@@ -49,7 +48,7 @@
     _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
     _hot_cache_par_claimed_idx = 0;
 
-    _card_counts.initialize();
+    _card_counts.initialize(card_counts_storage);
   }
 }
 
@@ -135,10 +134,6 @@
   // above, are discarded prior to re-enabling the cache near the end of the GC.
 }
 
-void G1HotCardCache::resize_card_counts(size_t heap_capacity) {
-  _card_counts.resize(heap_capacity);
-}
-
 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
   _card_counts.clear_region(hr);
 }
--- a/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -78,7 +78,7 @@
   G1HotCardCache(G1CollectedHeap* g1h);
   ~G1HotCardCache();
 
-  void initialize();
+  void initialize(G1RegionToSpaceMapper* card_counts_storage);
 
   bool use_cache() { return _use_cache; }
 
@@ -115,9 +115,6 @@
 
   bool hot_cache_is_empty() { return _n_hot == 0; }
 
-  // Resizes the card counts table to match the given capacity
-  void resize_card_counts(size_t heap_capacity);
-
   // Zeros the values in the card counts table for entire committed heap
   void reset_card_counts();
 
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -123,20 +123,20 @@
 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
                                     bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer());
+  GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
   GenMarkSweep::trace(" 1");
 
   SharedHeap* sh = SharedHeap::heap();
 
-  // Need cleared claim bits for the strong roots processing
+  // Need cleared claim bits for the roots processing
   ClassLoaderDataGraph::clear_claimed_marks();
 
-  sh->process_strong_roots(true,  // activate StrongRootsScope
-                           false, // not scavenging.
-                           SharedHeap::SO_SystemClasses,
+  MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
+  sh->process_strong_roots(true,   // activate StrongRootsScope
+                           SharedHeap::SO_None,
                            &GenMarkSweep::follow_root_closure,
-                           &GenMarkSweep::follow_code_root_closure,
-                           &GenMarkSweep::follow_klass_closure);
+                           &GenMarkSweep::follow_cld_closure,
+                           &follow_code_closure);
 
   // Process reference objects found during marking
   ReferenceProcessor* rp = GenMarkSweep::ref_processor();
@@ -148,7 +148,8 @@
                                       &GenMarkSweep::keep_alive,
                                       &GenMarkSweep::follow_stack_closure,
                                       NULL,
-                                      gc_timer());
+                                      gc_timer(),
+                                      gc_tracer()->gc_id());
   gc_tracer()->report_gc_reference_stats(stats);
 
 
@@ -193,65 +194,6 @@
   gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
 }
 
-class G1PrepareCompactClosure: public HeapRegionClosure {
-  G1CollectedHeap* _g1h;
-  ModRefBarrierSet* _mrbs;
-  CompactPoint _cp;
-  HeapRegionSetCount _humongous_regions_removed;
-
-  void free_humongous_region(HeapRegion* hr) {
-    HeapWord* end = hr->end();
-    FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
-
-    assert(hr->startsHumongous(),
-           "Only the start of a humongous region should be freed.");
-
-    hr->set_containing_set(NULL);
-    _humongous_regions_removed.increment(1u, hr->capacity());
-
-    _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
-    hr->prepare_for_compaction(&_cp);
-    // Also clear the part of the card table that will be unused after
-    // compaction.
-    _mrbs->clear(MemRegion(hr->compaction_top(), end));
-    dummy_free_list.remove_all();
-  }
-
-public:
-  G1PrepareCompactClosure(CompactibleSpace* cs)
-  : _g1h(G1CollectedHeap::heap()),
-    _mrbs(_g1h->g1_barrier_set()),
-    _cp(NULL, cs, cs->initialize_threshold()),
-    _humongous_regions_removed() { }
-
-  void update_sets() {
-    // We'll recalculate total used bytes and recreate the free list
-    // at the end of the GC, so no point in updating those values here.
-    HeapRegionSetCount empty_set;
-    _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
-  }
-
-  bool doHeapRegion(HeapRegion* hr) {
-    if (hr->isHumongous()) {
-      if (hr->startsHumongous()) {
-        oop obj = oop(hr->bottom());
-        if (obj->is_gc_marked()) {
-          obj->forward_to(obj);
-        } else  {
-          free_humongous_region(hr);
-        }
-      } else {
-        assert(hr->continuesHumongous(), "Invalid humongous.");
-      }
-    } else {
-      hr->prepare_for_compaction(&_cp);
-      // Also clear the part of the card table that will be unused after
-      // compaction.
-      _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
-    }
-    return false;
-  }
-};
 
 void G1MarkSweep::mark_sweep_phase2() {
   // Now all live objects are marked, compute the new object addresses.
@@ -260,21 +202,10 @@
   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   // tracking expects us to do so. See comment under phase4.
 
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer());
+  GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
   GenMarkSweep::trace("2");
 
-  // find the first region
-  HeapRegion* r = g1h->region_at(0);
-  CompactibleSpace* sp = r;
-  if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
-    sp = r->next_compaction_space();
-  }
-
-  G1PrepareCompactClosure blk(sp);
-  g1h->heap_region_iterate(&blk);
-  blk.update_sets();
+  prepare_compaction();
 }
 
 class G1AdjustPointersClosure: public HeapRegionClosure {
@@ -299,27 +230,27 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   // Adjust the pointers to reflect the new locations
-  GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer());
+  GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
   GenMarkSweep::trace("3");
 
   SharedHeap* sh = SharedHeap::heap();
 
-  // Need cleared claim bits for the strong roots processing
+  // Need cleared claim bits for the roots processing
   ClassLoaderDataGraph::clear_claimed_marks();
 
-  sh->process_strong_roots(true,  // activate StrongRootsScope
-                           false, // not scavenging.
-                           SharedHeap::SO_AllClasses,
-                           &GenMarkSweep::adjust_pointer_closure,
-                           NULL,  // do not touch code cache here
-                           &GenMarkSweep::adjust_klass_closure);
+  CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
+  sh->process_all_roots(true,  // activate StrongRootsScope
+                        SharedHeap::SO_AllCodeCache,
+                        &GenMarkSweep::adjust_pointer_closure,
+                        &GenMarkSweep::adjust_cld_closure,
+                        &adjust_code_closure);
 
   assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
   g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
-  g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
+  sh->process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
 
   if (G1StringDedup::is_enabled()) {
     G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
@@ -362,10 +293,75 @@
   // to use a higher index (saved from phase2) when verifying perm_gen.
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
-  GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer());
+  GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
   GenMarkSweep::trace("4");
 
   G1SpaceCompactClosure blk;
   g1h->heap_region_iterate(&blk);
 
 }
+
+void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  g1h->heap_region_iterate(blk);
+  blk->update_sets();
+}
+
+void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
+  HeapWord* end = hr->end();
+  FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
+
+  assert(hr->startsHumongous(),
+         "Only the start of a humongous region should be freed.");
+
+  hr->set_containing_set(NULL);
+  _humongous_regions_removed.increment(1u, hr->capacity());
+
+  _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
+  prepare_for_compaction(hr, end);
+  dummy_free_list.remove_all();
+}
+
+void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
+  // If this is the first live region that we came across which we can compact,
+  // initialize the CompactPoint.
+  if (!is_cp_initialized()) {
+    _cp.space = hr;
+    _cp.threshold = hr->initialize_threshold();
+  }
+  prepare_for_compaction_work(&_cp, hr, end);
+}
+
+void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp,
+                                                          HeapRegion* hr,
+                                                          HeapWord* end) {
+  hr->prepare_for_compaction(cp);
+  // Also clear the part of the card table that will be unused after
+  // compaction.
+  _mrbs->clear(MemRegion(hr->compaction_top(), end));
+}
+
+void G1PrepareCompactClosure::update_sets() {
+  // We'll recalculate total used bytes and recreate the free list
+  // at the end of the GC, so no point in updating those values here.
+  HeapRegionSetCount empty_set;
+  _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
+}
+
+bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
+  if (hr->isHumongous()) {
+    if (hr->startsHumongous()) {
+      oop obj = oop(hr->bottom());
+      if (obj->is_gc_marked()) {
+        obj->forward_to(obj);
+      } else  {
+        free_humongous_region(hr);
+      }
+    } else {
+      assert(hr->continuesHumongous(), "Invalid humongous.");
+    }
+  } else {
+    prepare_for_compaction(hr, hr->end());
+  }
+  return false;
+}
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -43,7 +43,7 @@
 // compaction.
 //
 // Class unloading will only occur when a full gc is invoked.
-
+class G1PrepareCompactClosure;
 
 class G1MarkSweep : AllStatic {
   friend class VM_G1MarkSweep;
@@ -70,6 +70,30 @@
   static void mark_sweep_phase4();
 
   static void allocate_stacks();
+  static void prepare_compaction();
+  static void prepare_compaction_work(G1PrepareCompactClosure* blk);
+};
+
+class G1PrepareCompactClosure : public HeapRegionClosure {
+ protected:
+  G1CollectedHeap* _g1h;
+  ModRefBarrierSet* _mrbs;
+  CompactPoint _cp;
+  HeapRegionSetCount _humongous_regions_removed;
+
+  virtual void prepare_for_compaction(HeapRegion* hr, HeapWord* end);
+  void prepare_for_compaction_work(CompactPoint* cp, HeapRegion* hr, HeapWord* end);
+  void free_humongous_region(HeapRegion* hr);
+  bool is_cp_initialized() const { return _cp.space != NULL; }
+
+ public:
+  G1PrepareCompactClosure() :
+    _g1h(G1CollectedHeap::heap()),
+    _mrbs(_g1h->g1_barrier_set()),
+    _humongous_regions_removed() { }
+
+  void update_sets();
+  bool doHeapRegion(HeapRegion* hr);
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep_ext.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1MarkSweep.hpp"
+
+void G1MarkSweep::prepare_compaction() {
+  G1PrepareCompactClosure blk;
+  G1MarkSweep::prepare_compaction_work(&blk);
+}
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,7 +25,28 @@
 #include "precompiled.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
+#include "gc_implementation/g1/g1ParScanThreadState.hpp"
 
 G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state) :
   G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
   _cm(_g1->concurrent_mark()) {}
+
+G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1) :
+  _g1(g1), _par_scan_state(NULL), _worker_id(UINT_MAX) { }
+
+G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
+  _g1(g1), _par_scan_state(NULL),
+  _worker_id(UINT_MAX) {
+  set_par_scan_thread_state(par_scan_state);
+}
+
+void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan_state) {
+  assert(_par_scan_state == NULL, "_par_scan_state must only be set once");
+  assert(par_scan_state != NULL, "Must set par_scan_state to non-NULL.");
+
+  _par_scan_state = par_scan_state;
+  _worker_id = par_scan_state->queue_num();
+
+  assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u),
+         err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u)));
+}
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,6 +25,8 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
 
+#include "memory/iterator.hpp"
+
 class HeapRegion;
 class G1CollectedHeap;
 class G1RemSet;
@@ -51,8 +53,13 @@
   G1ParScanThreadState* _par_scan_state;
   uint _worker_id;
 public:
+  // Initializes the instance, leaving _par_scan_state uninitialized. Must be done
+  // later using the set_par_scan_thread_state() method.
+  G1ParClosureSuper(G1CollectedHeap* g1);
   G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
   bool apply_to_weak_ref_discovered_field() { return true; }
+
+  void set_par_scan_thread_state(G1ParScanThreadState* par_scan_state);
 };
 
 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
@@ -68,9 +75,8 @@
 
 class G1ParScanClosure : public G1ParClosureSuper {
 public:
-  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
-    G1ParClosureSuper(g1, par_scan_state)
-  {
+  G1ParScanClosure(G1CollectedHeap* g1, ReferenceProcessor* rp) :
+    G1ParClosureSuper(g1) {
     assert(_ref_processor == NULL, "sanity");
     _ref_processor = rp;
   }
@@ -102,7 +108,7 @@
   template <class T> void do_klass_barrier(T* p, oop new_obj);
 };
 
-template <G1Barrier barrier, bool do_mark_object>
+template <G1Barrier barrier, G1Mark do_mark_object>
 class G1ParCopyClosure : public G1ParCopyHelper {
 private:
   template <class T> void do_oop_work(T* p);
@@ -117,19 +123,19 @@
   template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
   virtual void do_oop(oop* p)       { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+
+  G1CollectedHeap*      g1()  { return _g1; };
+  G1ParScanThreadState* pss() { return _par_scan_state; }
+  ReferenceProcessor*   rp()  { return _ref_processor; };
 };
 
-typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure;
-typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
-
-
-typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
-typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
-
+typedef G1ParCopyClosure<G1BarrierNone,  G1MarkNone>             G1ParScanExtRootClosure;
+typedef G1ParCopyClosure<G1BarrierNone,  G1MarkFromRoot>         G1ParScanAndMarkExtRootClosure;
+typedef G1ParCopyClosure<G1BarrierNone,  G1MarkPromotedFromRoot> G1ParScanAndMarkWeakExtRootClosure;
 // We use a separate closure to handle references during evacuation
 // failure processing.
 
-typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
+typedef G1ParCopyClosure<G1BarrierEvac, G1MarkNone> G1ParScanHeapEvacFailureClosure;
 
 class FilterIntoCSClosure: public ExtendedOopClosure {
   G1CollectedHeap* _g1;
@@ -160,10 +166,11 @@
 };
 
 // Closure for iterating over object fields during concurrent marking
-class G1CMOopClosure : public ExtendedOopClosure {
+class G1CMOopClosure : public MetadataAwareOopClosure {
+protected:
+  ConcurrentMark*    _cm;
 private:
   G1CollectedHeap*   _g1h;
-  ConcurrentMark*    _cm;
   CMTask*            _task;
 public:
   G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
@@ -173,7 +180,7 @@
 };
 
 // Closure to scan the root regions during concurrent marking
-class G1RootRegionScanClosure : public ExtendedOopClosure {
+class G1RootRegionScanClosure : public MetadataAwareOopClosure {
 private:
   G1CollectedHeap* _g1h;
   ConcurrentMark*  _cm;
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,9 +28,12 @@
 #include "gc_implementation/g1/concurrentMark.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1OopClosures.hpp"
+#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
+#include "memory/iterator.inline.hpp"
+#include "runtime/prefetch.inline.hpp"
 
 /*
  * This really ought to be an inline function, but apparently the C++
@@ -41,7 +44,7 @@
 inline void FilterIntoCSClosure::do_oop_nv(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop) &&
-      _g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop))) {
+      _g1->is_in_cset_or_humongous(oopDesc::decode_heap_oop_not_null(heap_oop))) {
     _oc->do_oop(p);
   }
 }
@@ -64,7 +67,8 @@
 
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (_g1->in_cset_fast_test(obj)) {
+    G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
+    if (state == G1CollectedHeap::InCSet) {
       // We're not going to even bother checking whether the object is
       // already forwarded or not, as this usually causes an immediate
       // stall. We'll try to prefetch the object (for write, given that
@@ -83,6 +87,9 @@
 
       _par_scan_state->push_on_queue(p);
     } else {
+      if (state == G1CollectedHeap::IsHumongous) {
+        _g1->set_humongous_is_live(obj);
+      }
       _par_scan_state->update_rs(_from, p, _worker_id);
     }
   }
@@ -94,22 +101,20 @@
 
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (_g1->in_cset_fast_test(obj)) {
+    if (_g1->is_in_cset_or_humongous(obj)) {
       Prefetch::write(obj->mark_addr(), 0);
       Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
 
       // Place on the references queue
       _par_scan_state->push_on_queue(p);
+    } else {
+      assert(!_g1->obj_in_cs(obj), "checking");
     }
   }
 }
 
 template <class T>
 inline void G1CMOopClosure::do_oop_nv(T* p) {
-  assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
-  assert(!_g1h->is_on_master_free_list(
-                    _g1h->heap_region_containing((HeapWord*) p)), "invariant");
-
   oop obj = oopDesc::load_decode_heap_oop(p);
   if (_cm->verbose_high()) {
     gclog_or_tty->print_cr("[%u] we're looking at location "
@@ -125,9 +130,7 @@
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
     HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
-    if (hr != NULL) {
-      _cm->grayRoot(obj, obj->size(), _worker_id, hr);
-    }
+    _cm->grayRoot(obj, obj->size(), _worker_id, hr);
   }
 }
 
@@ -154,57 +157,61 @@
 template <class T>
 inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
   oop obj = oopDesc::load_decode_heap_oop(p);
+  if (obj == NULL) {
+    return;
+  }
 #ifdef ASSERT
   // can't do because of races
   // assert(obj == NULL || obj->is_oop(), "expected an oop");
 
   // Do the safe subset of is_oop
-  if (obj != NULL) {
 #ifdef CHECK_UNHANDLED_OOPS
-    oopDesc* o = obj.obj();
+  oopDesc* o = obj.obj();
 #else
-    oopDesc* o = obj;
+  oopDesc* o = obj;
 #endif // CHECK_UNHANDLED_OOPS
-    assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
-    assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
-  }
+  assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
+  assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
 #endif // ASSERT
 
   assert(_from != NULL, "from region must be non-NULL");
   assert(_from->is_in_reserved(p), "p is not in from");
 
   HeapRegion* to = _g1->heap_region_containing(obj);
-  if (to != NULL && _from != to) {
-    // The _record_refs_into_cset flag is true during the RSet
-    // updating part of an evacuation pause. It is false at all
-    // other times:
-    //  * rebuilding the rembered sets after a full GC
-    //  * during concurrent refinement.
-    //  * updating the remembered sets of regions in the collection
-    //    set in the event of an evacuation failure (when deferred
-    //    updates are enabled).
+  if (_from == to) {
+    // Normally this closure should only be called with cross-region references.
+    // But since Java threads are manipulating the references concurrently and we
+    // reload the values things may have changed.
+    return;
+  }
+  // The _record_refs_into_cset flag is true during the RSet
+  // updating part of an evacuation pause. It is false at all
+  // other times:
+  //  * rebuilding the remembered sets after a full GC
+  //  * during concurrent refinement.
+  //  * updating the remembered sets of regions in the collection
+  //    set in the event of an evacuation failure (when deferred
+  //    updates are enabled).
 
-    if (_record_refs_into_cset && to->in_collection_set()) {
-      // We are recording references that point into the collection
-      // set and this particular reference does exactly that...
-      // If the referenced object has already been forwarded
-      // to itself, we are handling an evacuation failure and
-      // we have already visited/tried to copy this object
-      // there is no need to retry.
-      if (!self_forwarded(obj)) {
-        assert(_push_ref_cl != NULL, "should not be null");
-        // Push the reference in the refs queue of the G1ParScanThreadState
-        // instance for this worker thread.
-        _push_ref_cl->do_oop(p);
-      }
+  if (_record_refs_into_cset && to->in_collection_set()) {
+    // We are recording references that point into the collection
+    // set and this particular reference does exactly that...
+    // If the referenced object has already been forwarded
+    // to itself, we are handling an evacuation failure and
+    // we have already visited/tried to copy this object
+    // there is no need to retry.
+    if (!self_forwarded(obj)) {
+      assert(_push_ref_cl != NULL, "should not be null");
+      // Push the reference in the refs queue of the G1ParScanThreadState
+      // instance for this worker thread.
+      _push_ref_cl->do_oop(p);
+     }
 
-      // Deferred updates to the CSet are either discarded (in the normal case),
-      // or processed (if an evacuation failure occurs) at the end
-      // of the collection.
-      // See G1RemSet::cleanup_after_oops_into_collection_set_do().
-      return;
-    }
-
+    // Deferred updates to the CSet are either discarded (in the normal case),
+    // or processed (if an evacuation failure occurs) at the end
+    // of the collection.
+    // See G1RemSet::cleanup_after_oops_into_collection_set_do().
+  } else {
     // We either don't care about pushing references that point into the
     // collection set (i.e. we're not during an evacuation pause) _or_
     // the reference doesn't point into the collection set. Either way
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
+#include "oops/markOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "services/memTracker.hpp"
+#ifdef TARGET_OS_FAMILY_linux
+# include "os_linux.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_solaris
+# include "os_solaris.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_windows
+# include "os_windows.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_bsd
+# include "os_bsd.inline.hpp"
+#endif
+#include "utilities/bitMap.inline.hpp"
+
+G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL),
+  _high_boundary(NULL), _committed(), _page_size(0), _special(false),
+  _dirty(), _executable(false) {
+}
+
+bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) {
+  if (!rs.is_reserved()) {
+    return false;  // Allocation failed.
+  }
+  assert(_low_boundary == NULL, "VirtualSpace already initialized");
+  assert(page_size > 0, "Granularity must be non-zero.");
+
+  _low_boundary  = rs.base();
+  _high_boundary = _low_boundary + rs.size();
+
+  _special = rs.special();
+  _executable = rs.executable();
+
+  _page_size = page_size;
+
+  assert(_committed.size() == 0, "virtual space initialized more than once");
+  uintx size_in_bits = rs.size() / page_size;
+  _committed.resize(size_in_bits, /* in_resource_area */ false);
+  if (_special) {
+    _dirty.resize(size_in_bits, /* in_resource_area */ false);
+  }
+
+  return true;
+}
+
+
+G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
+  release();
+}
+
+void G1PageBasedVirtualSpace::release() {
+  // This does not release memory it never reserved.
+  // Caller must release via rs.release();
+  _low_boundary           = NULL;
+  _high_boundary          = NULL;
+  _special                = false;
+  _executable             = false;
+  _page_size              = 0;
+  _committed.resize(0, false);
+  _dirty.resize(0, false);
+}
+
+size_t G1PageBasedVirtualSpace::committed_size() const {
+  return _committed.count_one_bits() * _page_size;
+}
+
+size_t G1PageBasedVirtualSpace::reserved_size() const {
+  return pointer_delta(_high_boundary, _low_boundary, sizeof(char));
+}
+
+size_t G1PageBasedVirtualSpace::uncommitted_size()  const {
+  return reserved_size() - committed_size();
+}
+
+uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
+  return (addr - _low_boundary) / _page_size;
+}
+
+bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const {
+  uintptr_t end = start + size_in_pages;
+  return _committed.get_next_zero_offset(start, end) >= end;
+}
+
+bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const {
+  uintptr_t end = start + size_in_pages;
+  return _committed.get_next_one_offset(start, end) >= end;
+}
+
+char* G1PageBasedVirtualSpace::page_start(uintptr_t index) {
+  return _low_boundary + index * _page_size;
+}
+
+size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) {
+  return num * _page_size;
+}
+
+bool G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) {
+  // We need to make sure to commit all pages covered by the given area.
+  guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted");
+
+  bool zero_filled = true;
+  uintptr_t end = start + size_in_pages;
+
+  if (_special) {
+    // Check for dirty pages and update zero_filled if any found.
+    if (_dirty.get_next_one_offset(start,end) < end) {
+      zero_filled = false;
+      _dirty.clear_range(start, end);
+    }
+  } else {
+    os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable,
+                              err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages));
+  }
+  _committed.set_range(start, end);
+
+  return zero_filled;
+}
+
+void G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) {
+  guarantee(is_area_committed(start, size_in_pages), "checking");
+
+  if (_special) {
+    // Mark that memory is dirty. If committed again the memory might
+    // need to be cleared explicitly.
+    _dirty.set_range(start, start + size_in_pages);
+  } else {
+    os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages));
+  }
+
+  _committed.clear_range(start, start + size_in_pages);
+}
+
+bool G1PageBasedVirtualSpace::contains(const void* p) const {
+  return _low_boundary <= (const char*) p && (const char*) p < _high_boundary;
+}
+
+#ifndef PRODUCT
+void G1PageBasedVirtualSpace::print_on(outputStream* out) {
+  out->print   ("Virtual space:");
+  if (_special) out->print(" (pinned in memory)");
+  out->cr();
+  out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
+  out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
+  out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_low_boundary), p2i(_high_boundary));
+}
+
+void G1PageBasedVirtualSpace::print() {
+  print_on(tty);
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/memRegion.hpp"
+#include "runtime/virtualspace.hpp"
+#include "utilities/bitMap.hpp"
+
+// Virtual space management helper for a virtual space with an OS page allocation
+// granularity.
+// (De-)Allocation requests are always OS page aligned by passing a page index
+// and multiples of pages.
+// The implementation gives an error when trying to commit or uncommit pages that
+// have already been committed or uncommitted.
+class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+ private:
+  // Reserved area addresses.
+  char* _low_boundary;
+  char* _high_boundary;
+
+  // The commit/uncommit granularity in bytes.
+  size_t _page_size;
+
+  // Bitmap used for verification of commit/uncommit operations.
+  BitMap _committed;
+
+  // Bitmap used to keep track of which pages are dirty or not for _special
+  // spaces. This is needed because for those spaces the underlying memory
+  // will only be zero filled the first time it is committed. Calls to commit
+  // will use this bitmap and return whether or not the memory is zero filled.
+  BitMap _dirty;
+
+  // Indicates that the entire space has been committed and pinned in memory,
+  // os::commit_memory() or os::uncommit_memory() have no function.
+  bool _special;
+
+  // Indicates whether the committed space should be executable.
+  bool _executable;
+
+  // Returns the index of the page which contains the given address.
+  uintptr_t  addr_to_page_index(char* addr) const;
+  // Returns the address of the given page index.
+  char*  page_start(uintptr_t index);
+  // Returns the byte size of the given number of pages.
+  size_t byte_size_for_pages(size_t num);
+
+  // Returns true if the entire area is backed by committed memory.
+  bool is_area_committed(uintptr_t start, size_t size_in_pages) const;
+  // Returns true if the entire area is not backed by committed memory.
+  bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const;
+
+ public:
+
+  // Commit the given area of pages starting at start being size_in_pages large.
+  // Returns true if the given area is zero filled upon completion.
+  bool commit(uintptr_t start, size_t size_in_pages);
+
+  // Uncommit the given area of pages starting at start being size_in_pages large.
+  void uncommit(uintptr_t start, size_t size_in_pages);
+
+  // Initialization
+  G1PageBasedVirtualSpace();
+  bool initialize_with_granularity(ReservedSpace rs, size_t page_size);
+
+  // Destruction
+  ~G1PageBasedVirtualSpace();
+
+  // Amount of reserved memory.
+  size_t reserved_size() const;
+  // Memory used in this virtual space.
+  size_t committed_size() const;
+  // Memory left to use/expand in this virtual space.
+  size_t uncommitted_size() const;
+
+  bool contains(const void* p) const;
+
+  MemRegion reserved() {
+    MemRegion x((HeapWord*)_low_boundary, reserved_size() / HeapWordSize);
+    return x;
+  }
+
+  void release();
+
+  void check_for_contiguity() PRODUCT_RETURN;
+
+  // Debugging
+  void print_on(outputStream* out) PRODUCT_RETURN;
+  void print();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1OopClosures.inline.hpp"
+#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/oop.pcgc.inline.hpp"
+#include "runtime/prefetch.inline.hpp"
+
+G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
+  : _g1h(g1h),
+    _refs(g1h->task_queue(queue_num)),
+    _dcq(&g1h->dirty_card_queue_set()),
+    _ct_bs(g1h->g1_barrier_set()),
+    _g1_rem(g1h->g1_rem_set()),
+    _hash_seed(17), _queue_num(queue_num),
+    _term_attempts(0),
+    _age_table(false), _scanner(g1h, rp),
+    _strong_roots_time(0), _term_time(0) {
+  _scanner.set_par_scan_thread_state(this);
+  // we allocate G1YoungSurvRateNumRegions plus one entries, since
+  // we "sacrifice" entry 0 to keep track of surviving bytes for
+  // non-young regions (where the age is -1)
+  // We also add a few elements at the beginning and at the end in
+  // an attempt to eliminate cache contention
+  uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
+  uint array_length = PADDING_ELEM_NUM +
+                      real_length +
+                      PADDING_ELEM_NUM;
+  _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
+  if (_surviving_young_words_base == NULL)
+    vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
+                          "Not enough space for young surv histo.");
+  _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
+  memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
+
+  _g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
+
+  _start = os::elapsedTime();
+}
+
+G1ParScanThreadState::~G1ParScanThreadState() {
+  _g1_par_allocator->retire_alloc_buffers();
+  delete _g1_par_allocator;
+  FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
+}
+
+void
+G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
+{
+  st->print_raw_cr("GC Termination Stats");
+  st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
+                   " ------waste (KiB)------");
+  st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
+                   "  total   alloc    undo");
+  st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
+                   " ------- ------- -------");
+}
+
+void
+G1ParScanThreadState::print_termination_stats(int i,
+                                              outputStream* const st) const
+{
+  const double elapsed_ms = elapsed_time() * 1000.0;
+  const double s_roots_ms = strong_roots_time() * 1000.0;
+  const double term_ms    = term_time() * 1000.0;
+  const size_t alloc_buffer_waste = _g1_par_allocator->alloc_buffer_waste();
+  const size_t undo_waste         = _g1_par_allocator->undo_waste();
+  st->print_cr("%3d %9.2f %9.2f %6.2f "
+               "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
+               SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
+               i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
+               term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
+               (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
+               alloc_buffer_waste * HeapWordSize / K,
+               undo_waste * HeapWordSize / K);
+}
+
+#ifdef ASSERT
+bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
+  assert(ref != NULL, "invariant");
+  assert(UseCompressedOops, "sanity");
+  assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, p2i(ref)));
+  oop p = oopDesc::load_decode_heap_oop(ref);
+  assert(_g1h->is_in_g1_reserved(p),
+         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
+  return true;
+}
+
+bool G1ParScanThreadState::verify_ref(oop* ref) const {
+  assert(ref != NULL, "invariant");
+  if (has_partial_array_mask(ref)) {
+    // Must be in the collection set--it's already been copied.
+    oop p = clear_partial_array_mask(ref);
+    assert(_g1h->obj_in_cs(p),
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
+  } else {
+    oop p = oopDesc::load_decode_heap_oop(ref);
+    assert(_g1h->is_in_g1_reserved(p),
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
+  }
+  return true;
+}
+
+bool G1ParScanThreadState::verify_task(StarTask ref) const {
+  if (ref.is_narrow()) {
+    return verify_ref((narrowOop*) ref);
+  } else {
+    return verify_ref((oop*) ref);
+  }
+}
+#endif // ASSERT
+
+void G1ParScanThreadState::trim_queue() {
+  assert(_evac_failure_cl != NULL, "not set");
+
+  StarTask ref;
+  do {
+    // Drain the overflow stack first, so other threads can steal.
+    while (_refs->pop_overflow(ref)) {
+      dispatch_reference(ref);
+    }
+
+    while (_refs->pop_local(ref)) {
+      dispatch_reference(ref);
+    }
+  } while (!_refs->is_empty());
+}
+
+oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
+  size_t word_sz = old->size();
+  HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
+  // +1 to make the -1 indexes valid...
+  int       young_index = from_region->young_index_in_cset()+1;
+  assert( (from_region->is_young() && young_index >  0) ||
+         (!from_region->is_young() && young_index == 0), "invariant" );
+  G1CollectorPolicy* g1p = _g1h->g1_policy();
+  markOop m = old->mark();
+  int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
+                                           : m->age();
+  GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
+                                                             word_sz);
+  AllocationContext_t context = from_region->allocation_context();
+  HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context);
+#ifndef PRODUCT
+  // Should this evacuation fail?
+  if (_g1h->evacuation_should_fail()) {
+    if (obj_ptr != NULL) {
+      _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
+      obj_ptr = NULL;
+    }
+  }
+#endif // !PRODUCT
+
+  if (obj_ptr == NULL) {
+    // This will either forward-to-self, or detect that someone else has
+    // installed a forwarding pointer.
+    return _g1h->handle_evacuation_failure_par(this, old);
+  }
+
+  oop obj = oop(obj_ptr);
+
+  // We're going to allocate linearly, so might as well prefetch ahead.
+  Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
+
+  oop forward_ptr = old->forward_to_atomic(obj);
+  if (forward_ptr == NULL) {
+    Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
+
+    // alloc_purpose is just a hint to allocate() above, recheck the type of region
+    // we actually allocated from and update alloc_purpose accordingly
+    HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
+    alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
+
+    if (g1p->track_object_age(alloc_purpose)) {
+      // We could simply do obj->incr_age(). However, this causes a
+      // performance issue. obj->incr_age() will first check whether
+      // the object has a displaced mark by checking its mark word;
+      // getting the mark word from the new location of the object
+      // stalls. So, given that we already have the mark word and we
+      // are about to install it anyway, it's better to increase the
+      // age on the mark word, when the object does not have a
+      // displaced mark word. We're not expecting many objects to have
+      // a displaced marked word, so that case is not optimized
+      // further (it could be...) and we simply call obj->incr_age().
+
+      if (m->has_displaced_mark_helper()) {
+        // in this case, we have to install the mark word first,
+        // otherwise obj looks to be forwarded (the old mark word,
+        // which contains the forward pointer, was copied)
+        obj->set_mark(m);
+        obj->incr_age();
+      } else {
+        m = m->incr_age();
+        obj->set_mark(m);
+      }
+      age_table()->add(obj, word_sz);
+    } else {
+      obj->set_mark(m);
+    }
+
+    if (G1StringDedup::is_enabled()) {
+      G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
+                                             to_region->is_young(),
+                                             queue_num(),
+                                             obj);
+    }
+
+    size_t* surv_young_words = surviving_young_words();
+    surv_young_words[young_index] += word_sz;
+
+    if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
+      // We keep track of the next start index in the length field of
+      // the to-space object. The actual length can be found in the
+      // length field of the from-space object.
+      arrayOop(obj)->set_length(0);
+      oop* old_p = set_partial_array_mask(old);
+      push_on_queue(old_p);
+    } else {
+      // No point in using the slower heap_region_containing() method,
+      // given that we know obj is in the heap.
+      _scanner.set_region(_g1h->heap_region_containing_raw(obj));
+      obj->oop_iterate_backwards(&_scanner);
+    }
+  } else {
+    _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
+    obj = forward_ptr;
+  }
+  return obj;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
+
+#include "gc_implementation/g1/dirtyCardQueue.hpp"
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1OopClosures.hpp"
+#include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/shared/ageTable.hpp"
+#include "memory/allocation.hpp"
+#include "oops/oop.hpp"
+
+class HeapRegion;
+class outputStream;
+
+class G1ParScanThreadState : public StackObj {
+ private:
+  G1CollectedHeap* _g1h;
+  RefToScanQueue*  _refs;
+  DirtyCardQueue   _dcq;
+  G1SATBCardTableModRefBS* _ct_bs;
+  G1RemSet* _g1_rem;
+
+  G1ParGCAllocator*   _g1_par_allocator;
+
+  ageTable            _age_table;
+
+  G1ParScanClosure    _scanner;
+
+  size_t           _alloc_buffer_waste;
+  size_t           _undo_waste;
+
+  OopsInHeapRegionClosure*      _evac_failure_cl;
+
+  int  _hash_seed;
+  uint _queue_num;
+
+  size_t _term_attempts;
+
+  double _start;
+  double _start_strong_roots;
+  double _strong_roots_time;
+  double _start_term;
+  double _term_time;
+
+  // Map from young-age-index (0 == not young, 1 is youngest) to
+  // surviving words. base is what we get back from the malloc call
+  size_t* _surviving_young_words_base;
+  // this points into the array, as we use the first few entries for padding
+  size_t* _surviving_young_words;
+
+#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
+
+  void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
+  void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
+
+  DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
+  G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
+
+ public:
+  G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
+  ~G1ParScanThreadState();
+
+  ageTable*         age_table()       { return &_age_table;       }
+
+#ifdef ASSERT
+  bool queue_is_empty() const { return _refs->is_empty(); }
+
+  bool verify_ref(narrowOop* ref) const;
+  bool verify_ref(oop* ref) const;
+  bool verify_task(StarTask ref) const;
+#endif // ASSERT
+
+  template <class T> void push_on_queue(T* ref) {
+    assert(verify_ref(ref), "sanity");
+    _refs->push(ref);
+  }
+
+  template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
+    // If the new value of the field points to the same region or
+    // is the to-space, we don't need to include it in the Rset updates.
+    if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
+      size_t card_index = ctbs()->index_for(p);
+      // If the card hasn't been added to the buffer, do it.
+      if (ctbs()->mark_card_deferred(card_index)) {
+        dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
+      }
+   }
+  }
+ public:
+
+  void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
+    _evac_failure_cl = evac_failure_cl;
+  }
+
+  OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
+
+  int* hash_seed() { return &_hash_seed; }
+  uint queue_num() { return _queue_num; }
+
+  size_t term_attempts() const  { return _term_attempts; }
+  void note_term_attempt() { _term_attempts++; }
+
+  void start_strong_roots() {
+    _start_strong_roots = os::elapsedTime();
+  }
+  void end_strong_roots() {
+    _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
+  }
+  double strong_roots_time() const { return _strong_roots_time; }
+
+  void start_term_time() {
+    note_term_attempt();
+    _start_term = os::elapsedTime();
+  }
+  void end_term_time() {
+    _term_time += (os::elapsedTime() - _start_term);
+  }
+  double term_time() const { return _term_time; }
+
+  double elapsed_time() const {
+    return os::elapsedTime() - _start;
+  }
+
+  static void print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
+  void print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
+
+  size_t* surviving_young_words() {
+    // We add on to hide entry 0 which accumulates surviving words for
+    // age -1 regions (i.e. non-young ones)
+    return _surviving_young_words;
+  }
+
+ private:
+  #define G1_PARTIAL_ARRAY_MASK 0x2
+
+  inline bool has_partial_array_mask(oop* ref) const {
+    return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
+  }
+
+  // We never encode partial array oops as narrowOop*, so return false immediately.
+  // This allows the compiler to create optimized code when popping references from
+  // the work queue.
+  inline bool has_partial_array_mask(narrowOop* ref) const {
+    assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
+    return false;
+  }
+
+  // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
+  // We always encode partial arrays as regular oop, to allow the
+  // specialization for has_partial_array_mask() for narrowOops above.
+  // This means that unintentional use of this method with narrowOops are caught
+  // by the compiler.
+  inline oop* set_partial_array_mask(oop obj) const {
+    assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
+    return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
+  }
+
+  inline oop clear_partial_array_mask(oop* ref) const {
+    return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
+  }
+
+  inline void do_oop_partial_array(oop* p);
+
+  // This method is applied to the fields of the objects that have just been copied.
+  template <class T> inline void do_oop_evac(T* p, HeapRegion* from);
+
+  template <class T> inline void deal_with_reference(T* ref_to_scan);
+
+  inline void dispatch_reference(StarTask ref);
+ public:
+
+  oop copy_to_survivor_space(oop const obj);
+
+  void trim_queue();
+
+  inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
+
+#include "gc_implementation/g1/g1ParScanThreadState.hpp"
+#include "gc_implementation/g1/g1RemSet.inline.hpp"
+#include "oops/oop.inline.hpp"
+
+template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
+  assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
+         "Reference should not be NULL here as such are never pushed to the task queue.");
+  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+
+  // Although we never intentionally push references outside of the collection
+  // set, due to (benign) races in the claim mechanism during RSet scanning more
+  // than one thread might claim the same card. So the same card may be
+  // processed multiple times. So redo this check.
+  G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
+  if (in_cset_state == G1CollectedHeap::InCSet) {
+    oop forwardee;
+    if (obj->is_forwarded()) {
+      forwardee = obj->forwardee();
+    } else {
+      forwardee = copy_to_survivor_space(obj);
+    }
+    oopDesc::encode_store_heap_oop(p, forwardee);
+  } else if (in_cset_state == G1CollectedHeap::IsHumongous) {
+    _g1h->set_humongous_is_live(obj);
+  } else {
+    assert(in_cset_state == G1CollectedHeap::InNeither,
+           err_msg("In_cset_state must be InNeither here, but is %d", in_cset_state));
+  }
+
+  assert(obj != NULL, "Must be");
+  update_rs(from, p, queue_num());
+}
+
+inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
+  assert(has_partial_array_mask(p), "invariant");
+  oop from_obj = clear_partial_array_mask(p);
+
+  assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
+  assert(from_obj->is_objArray(), "must be obj array");
+  objArrayOop from_obj_array = objArrayOop(from_obj);
+  // The from-space object contains the real length.
+  int length                 = from_obj_array->length();
+
+  assert(from_obj->is_forwarded(), "must be forwarded");
+  oop to_obj                 = from_obj->forwardee();
+  assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
+  objArrayOop to_obj_array   = objArrayOop(to_obj);
+  // We keep track of the next start index in the length field of the
+  // to-space object.
+  int next_index             = to_obj_array->length();
+  assert(0 <= next_index && next_index < length,
+         err_msg("invariant, next index: %d, length: %d", next_index, length));
+
+  int start                  = next_index;
+  int end                    = length;
+  int remainder              = end - start;
+  // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
+  if (remainder > 2 * ParGCArrayScanChunk) {
+    end = start + ParGCArrayScanChunk;
+    to_obj_array->set_length(end);
+    // Push the remainder before we process the range in case another
+    // worker has run out of things to do and can steal it.
+    oop* from_obj_p = set_partial_array_mask(from_obj);
+    push_on_queue(from_obj_p);
+  } else {
+    assert(length == end, "sanity");
+    // We'll process the final range for this object. Restore the length
+    // so that the heap remains parsable in case of evacuation failure.
+    to_obj_array->set_length(end);
+  }
+  _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
+  // Process indexes [start,end). It will also process the header
+  // along with the first chunk (i.e., the chunk with start == 0).
+  // Note that at this point the length field of to_obj_array is not
+  // correct given that we are using it to keep track of the next
+  // start index. oop_iterate_range() (thankfully!) ignores the length
+  // field and only relies on the start / end parameters.  It does
+  // however return the size of the object which will be incorrect. So
+  // we have to ignore it even if we wanted to use it.
+  to_obj_array->oop_iterate_range(&_scanner, start, end);
+}
+
+template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
+  if (!has_partial_array_mask(ref_to_scan)) {
+    // Note: we can use "raw" versions of "region_containing" because
+    // "obj_to_scan" is definitely in the heap, and is not in a
+    // humongous region.
+    HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
+    do_oop_evac(ref_to_scan, r);
+  } else {
+    do_oop_partial_array((oop*)ref_to_scan);
+  }
+}
+
+inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
+  assert(verify_task(ref), "sanity");
+  if (ref.is_narrow()) {
+    deal_with_reference((narrowOop*)ref);
+  } else {
+    deal_with_reference((oop*)ref);
+  }
+}
+
+void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
+  StarTask stolen_task;
+  while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
+    assert(verify_task(stolen_task), "sanity");
+    dispatch_reference(stolen_task);
+
+    // We've just processed a reference and we might have made
+    // available new entries on the queues. So we have to make sure
+    // we drain the queues as necessary.
+    trim_queue();
+  }
+}
+
+#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/virtualspace.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/bitMap.inline.hpp"
+
+G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
+                                             size_t commit_granularity,
+                                             size_t region_granularity,
+                                             MemoryType type) :
+  _storage(),
+  _commit_granularity(commit_granularity),
+  _region_granularity(region_granularity),
+  _listener(NULL),
+  _commit_map() {
+  guarantee(is_power_of_2(commit_granularity), "must be");
+  guarantee(is_power_of_2(region_granularity), "must be");
+  _storage.initialize_with_granularity(rs, commit_granularity);
+
+  MemTracker::record_virtual_memory_type((address)rs.base(), type);
+}
+
+// G1RegionToSpaceMapper implementation where the region granularity is larger than
+// or the same as the commit granularity.
+// Basically, the space corresponding to one region region spans several OS pages.
+class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
+ private:
+  size_t _pages_per_region;
+
+ public:
+  G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
+                                      size_t os_commit_granularity,
+                                      size_t alloc_granularity,
+                                      size_t commit_factor,
+                                      MemoryType type) :
+     G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
+    _pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) {
+
+    guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity");
+    _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
+  }
+
+  virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
+    bool zero_filled = _storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region);
+    _commit_map.set_range(start_idx, start_idx + num_regions);
+    fire_on_commit(start_idx, num_regions, zero_filled);
+  }
+
+  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
+    _storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region);
+    _commit_map.clear_range(start_idx, start_idx + num_regions);
+  }
+};
+
+// G1RegionToSpaceMapper implementation where the region granularity is smaller
+// than the commit granularity.
+// Basically, the contents of one OS page span several regions.
+class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
+ private:
+  class CommitRefcountArray : public G1BiasedMappedArray<uint> {
+   protected:
+     virtual uint default_value() const { return 0; }
+  };
+
+  size_t _regions_per_page;
+
+  CommitRefcountArray _refcounts;
+
+  uintptr_t region_idx_to_page_idx(uint region) const {
+    return region / _regions_per_page;
+  }
+
+ public:
+  G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
+                                       size_t os_commit_granularity,
+                                       size_t alloc_granularity,
+                                       size_t commit_factor,
+                                       MemoryType type) :
+     G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
+    _regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() {
+
+    guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
+    _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity);
+    _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
+  }
+
+  virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
+    for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
+      assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i));
+      uintptr_t idx = region_idx_to_page_idx(i);
+      uint old_refcount = _refcounts.get_by_index(idx);
+      bool zero_filled = false;
+      if (old_refcount == 0) {
+        zero_filled = _storage.commit(idx, 1);
+      }
+      _refcounts.set_by_index(idx, old_refcount + 1);
+      _commit_map.set_bit(i);
+      fire_on_commit(i, 1, zero_filled);
+    }
+  }
+
+  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
+    for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
+      assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i));
+      uintptr_t idx = region_idx_to_page_idx(i);
+      uint old_refcount = _refcounts.get_by_index(idx);
+      assert(old_refcount > 0, "must be");
+      if (old_refcount == 1) {
+        _storage.uncommit(idx, 1);
+      }
+      _refcounts.set_by_index(idx, old_refcount - 1);
+      _commit_map.clear_bit(i);
+    }
+  }
+};
+
+void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
+  if (_listener != NULL) {
+    _listener->on_commit(start_idx, num_regions, zero_filled);
+  }
+}
+
+G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
+                                                            size_t os_commit_granularity,
+                                                            size_t region_granularity,
+                                                            size_t commit_factor,
+                                                            MemoryType type) {
+
+  if (region_granularity >= (os_commit_granularity * commit_factor)) {
+    return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
+  } else {
+    return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
+
+#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+class G1MappingChangedListener VALUE_OBJ_CLASS_SPEC {
+ public:
+  // Fired after commit of the memory, i.e. the memory this listener is registered
+  // for can be accessed.
+  // Zero_filled indicates that the memory can be considered as filled with zero bytes
+  // when called.
+  virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled) = 0;
+};
+
+// Maps region based commit/uncommit requests to the underlying page sized virtual
+// space.
+class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
+ private:
+  G1MappingChangedListener* _listener;
+ protected:
+  // Backing storage.
+  G1PageBasedVirtualSpace _storage;
+  size_t _commit_granularity;
+  size_t _region_granularity;
+  // Mapping management
+  BitMap _commit_map;
+
+  G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type);
+
+  void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled);
+ public:
+  MemRegion reserved() { return _storage.reserved(); }
+
+  void set_mapping_changed_listener(G1MappingChangedListener* listener) { _listener = listener; }
+
+  virtual ~G1RegionToSpaceMapper() {
+    _commit_map.resize(0, /* in_resource_area */ false);
+  }
+
+  bool is_committed(uintptr_t idx) const {
+    return _commit_map.at(idx);
+  }
+
+  virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
+  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
+
+  // Creates an appropriate G1RegionToSpaceMapper for the given parameters.
+  // The byte_translation_factor defines how many bytes in a region correspond to
+  // a single byte in the data structure this mapper is for.
+  // Eg. in the card table, this value corresponds to the size a single card
+  // table entry corresponds to.
+  static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs,
+                                              size_t os_commit_granularity,
+                                              size_t region_granularity,
+                                              size_t byte_translation_factor,
+                                              MemoryType type);
+};
+
+#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP */
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc_implementation/g1/bufferingOopClosure.hpp"
 #include "gc_implementation/g1/concurrentG1Refine.hpp"
 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
@@ -33,7 +32,7 @@
 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
@@ -110,7 +109,7 @@
   G1CollectedHeap* _g1h;
 
   OopsInHeapRegionClosure* _oc;
-  CodeBlobToOopClosure* _code_root_cl;
+  CodeBlobClosure* _code_root_cl;
 
   G1BlockOffsetSharedArray* _bot_shared;
   G1SATBCardTableModRefBS *_ct_bs;
@@ -122,7 +121,7 @@
 
 public:
   ScanRSClosure(OopsInHeapRegionClosure* oc,
-                CodeBlobToOopClosure* code_root_cl,
+                CodeBlobClosure* code_root_cl,
                 uint worker_i) :
     _oc(oc),
     _code_root_cl(code_root_cl),
@@ -212,7 +211,6 @@
 #endif
 
       HeapRegion* card_region = _g1h->heap_region_containing(card_start);
-      assert(card_region != NULL, "Yielding cards not in the heap?");
       _cards++;
 
       if (!card_region->is_on_dirty_cards_region_list()) {
@@ -243,7 +241,7 @@
 };
 
 void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
-                      CodeBlobToOopClosure* code_root_cl,
+                      CodeBlobClosure* code_root_cl,
                       uint worker_i) {
   double rs_time_start = os::elapsedTime();
   HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
@@ -322,7 +320,7 @@
 }
 
 void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
-                                           CodeBlobToOopClosure* code_root_cl,
+                                           CodeBlobClosure* code_root_cl,
                                            uint worker_i) {
 #if CARD_REPEAT_HISTO
   ct_freq_update_histo_and_reset();
@@ -340,12 +338,8 @@
   // are just discarded (there's no need to update the RSets of regions
   // that were in the collection set - after the pause these regions
   // are wholly 'free' of live objects. In the event of an evacuation
-  // failure the cards/buffers in this queue set are:
-  // * passed to the DirtyCardQueueSet that is used to manage deferred
-  //   RSet updates, or
-  // * scanned for references that point into the collection set
-  //   and the RSet of the corresponding region in the collection set
-  //   is updated immediately.
+  // failure the cards/buffers in this queue set are passed to the
+  // DirtyCardQueueSet that is used to manage RSet updates
   DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
 
   assert((ParallelGCThreads > 0) || worker_i == 0, "invariant");
@@ -374,7 +368,6 @@
 
 void G1RemSet::prepare_for_oops_into_collection_set_do() {
   cleanupHRRS();
-  ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
   _g1->set_refine_cte_cl_concurrency(false);
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   dcqs.concatenate_logs();
@@ -387,67 +380,6 @@
   _total_cards_scanned = 0;
 }
 
-
-// This closure, applied to a DirtyCardQueueSet, is used to immediately
-// update the RSets for the regions in the CSet. For each card it iterates
-// through the oops which coincide with that card. It scans the reference
-// fields in each oop; when it finds an oop that points into the collection
-// set, the RSet for the region containing the referenced object is updated.
-class UpdateRSetCardTableEntryIntoCSetClosure: public CardTableEntryClosure {
-  G1CollectedHeap* _g1;
-  CardTableModRefBS* _ct_bs;
-public:
-  UpdateRSetCardTableEntryIntoCSetClosure(G1CollectedHeap* g1,
-                                          CardTableModRefBS* bs):
-    _g1(g1), _ct_bs(bs)
-  { }
-
-  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
-    // Construct the region representing the card.
-    HeapWord* start = _ct_bs->addr_for(card_ptr);
-    // And find the region containing it.
-    HeapRegion* r = _g1->heap_region_containing(start);
-    assert(r != NULL, "unexpected null");
-
-    // Scan oops in the card looking for references into the collection set
-    // Don't use addr_for(card_ptr + 1) which can ask for
-    // a card beyond the heap.  This is not safe without a perm
-    // gen.
-    HeapWord* end   = start + CardTableModRefBS::card_size_in_words;
-    MemRegion scanRegion(start, end);
-
-    UpdateRSetImmediate update_rs_cl(_g1->g1_rem_set());
-    FilterIntoCSClosure update_rs_cset_oop_cl(NULL, _g1, &update_rs_cl);
-    FilterOutOfRegionClosure filter_then_update_rs_cset_oop_cl(r, &update_rs_cset_oop_cl);
-
-    // We can pass false as the "filter_young" parameter here as:
-    // * we should be in a STW pause,
-    // * the DCQS to which this closure is applied is used to hold
-    //   references that point into the collection set from the prior
-    //   RSet updating,
-    // * the post-write barrier shouldn't be logging updates to young
-    //   regions (but there is a situation where this can happen - see
-    //   the comment in G1RemSet::refine_card() below -
-    //   that should not be applicable here), and
-    // * during actual RSet updating, the filtering of cards in young
-    //   regions in HeapRegion::oops_on_card_seq_iterate_careful is
-    //   employed.
-    // As a result, when this closure is applied to "refs into cset"
-    // DCQS, we shouldn't see any cards in young regions.
-    update_rs_cl.set_region(r);
-    HeapWord* stop_point =
-      r->oops_on_card_seq_iterate_careful(scanRegion,
-                                          &filter_then_update_rs_cset_oop_cl,
-                                          false /* filter_young */,
-                                          NULL  /* card_ptr */);
-
-    // Since this is performed in the event of an evacuation failure, we
-    // we shouldn't see a non-null stop point
-    assert(stop_point == NULL, "saw an unallocated region");
-    return true;
-  }
-};
-
 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
   guarantee( _cards_scanned != NULL, "invariant" );
   _total_cards_scanned = 0;
@@ -468,25 +400,10 @@
     double restore_remembered_set_start = os::elapsedTime();
 
     // Restore remembered sets for the regions pointing into the collection set.
-    if (G1DeferredRSUpdate) {
-      // If deferred RS updates are enabled then we just need to transfer
-      // the completed buffers from (a) the DirtyCardQueueSet used to hold
-      // cards that contain references that point into the collection set
-      // to (b) the DCQS used to hold the deferred RS updates
-      _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs);
-    } else {
-
-      CardTableModRefBS* bs = (CardTableModRefBS*)_g1->barrier_set();
-      UpdateRSetCardTableEntryIntoCSetClosure update_rs_cset_immediate(_g1, bs);
-
-      int n_completed_buffers = 0;
-      while (into_cset_dcqs.apply_closure_to_completed_buffer(&update_rs_cset_immediate,
-                                                    0, 0, true)) {
-        n_completed_buffers++;
-      }
-      assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers");
-    }
-
+    // We just need to transfer the completed buffers from the DirtyCardQueueSet
+    // used to hold cards that contain references that point into the collection set
+    // to the DCQS used to hold the deferred RS updates.
+    _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs);
     _g1->g1_policy()->phase_times()->record_evac_fail_restore_remsets((os::elapsedTime() - restore_remembered_set_start) * 1000.0);
   }
 
@@ -557,6 +474,12 @@
 
 bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
                            bool check_for_refs_into_cset) {
+  assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)),
+         err_msg("Card at "PTR_FORMAT" index "SIZE_FORMAT" representing heap at "PTR_FORMAT" (%u) must be in committed heap",
+                 p2i(card_ptr),
+                 _ct_bs->index_for(_ct_bs->addr_for(card_ptr)),
+                 _ct_bs->addr_for(card_ptr),
+                 _g1->addr_to_region(_ct_bs->addr_for(card_ptr))));
 
   // If the card is no longer dirty, nothing to do.
   if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
@@ -569,11 +492,6 @@
   HeapWord* start = _ct_bs->addr_for(card_ptr);
   // And find the region containing it.
   HeapRegion* r = _g1->heap_region_containing(start);
-  if (r == NULL) {
-    // Again no need to return that this card contains refs that
-    // point into the collection set.
-    return false;  // Not in the G1 heap (might be in perm, for example.)
-  }
 
   // Why do we have to check here whether a card is on a young region,
   // given that we dirty young regions and, as a result, the
@@ -626,10 +544,6 @@
 
     start = _ct_bs->addr_for(card_ptr);
     r = _g1->heap_region_containing(start);
-    if (r == NULL) {
-      // Not in the G1 heap
-      return false;
-    }
 
     // Checking whether the region we got back from the cache
     // is young here is inappropriate. The region could have been
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -96,7 +96,7 @@
   // the "i" passed to the calling thread's work(i) function.
   // In the sequential case this param will be ignored.
   void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
-                                   CodeBlobToOopClosure* code_root_cl,
+                                   CodeBlobClosure* code_root_cl,
                                    uint worker_i);
 
   // Prepare for and cleanup after an oops_into_collection_set_do
@@ -108,7 +108,7 @@
   void cleanup_after_oops_into_collection_set_do();
 
   void scanRS(OopsInHeapRegionClosure* oc,
-              CodeBlobToOopClosure* code_root_cl,
+              CodeBlobClosure* code_root_cl,
               uint worker_i);
 
   void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
@@ -193,18 +193,4 @@
   bool apply_to_weak_ref_discovered_field() { return true; }
 };
 
-class UpdateRSetImmediate: public OopsInHeapRegionClosure {
-private:
-  G1RemSet* _g1_rem_set;
-
-  template <class T> void do_oop_work(T* p);
-public:
-  UpdateRSetImmediate(G1RemSet* rs) :
-    _g1_rem_set(rs) {}
-
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(      oop* p) { do_oop_work(p); }
-};
-
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
--- a/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
 
 #include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "oops/oop.inline.hpp"
 
@@ -45,26 +46,28 @@
 template <class T>
 inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
   oop obj = oopDesc::load_decode_heap_oop(p);
+  if (obj == NULL) {
+    return;
+  }
+
 #ifdef ASSERT
   // can't do because of races
   // assert(obj == NULL || obj->is_oop(), "expected an oop");
 
   // Do the safe subset of is_oop
-  if (obj != NULL) {
 #ifdef CHECK_UNHANDLED_OOPS
-    oopDesc* o = obj.obj();
+  oopDesc* o = obj.obj();
 #else
-    oopDesc* o = obj;
+  oopDesc* o = obj;
 #endif // CHECK_UNHANDLED_OOPS
-    assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
-    assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
-  }
+  assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
+  assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
 #endif // ASSERT
 
   assert(from == NULL || from->is_in_reserved(p), "p is not in from");
 
   HeapRegion* to = _g1->heap_region_containing(obj);
-  if (to != NULL && from != to) {
+  if (from != to) {
     assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
     to->rem_set()->add_reference(p, tid);
   }
@@ -76,13 +79,4 @@
   _rs->par_write_ref(_from, p, _worker_i);
 }
 
-template <class T>
-inline void UpdateRSetImmediate::do_oop_work(T* p) {
-  assert(_from->is_in_reserved(p), "paranoia");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {
-    _g1_rem_set->par_write_ref(_from, p, 0);
-  }
-}
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -253,19 +253,22 @@
     size_t occupied_cards = hrrs->occupied();
     size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size();
     if (code_root_mem_sz > max_code_root_mem_sz()) {
+      _max_code_root_mem_sz = code_root_mem_sz;
       _max_code_root_mem_sz_region = r;
     }
     size_t code_root_elems = hrrs->strong_code_roots_list_length();
 
     RegionTypeCounter* current = NULL;
-    if (r->is_young()) {
+    if (r->is_free()) {
+      current = &_free;
+    } else if (r->is_young()) {
       current = &_young;
     } else if (r->isHumongous()) {
       current = &_humonguous;
-    } else if (r->is_empty()) {
-      current = &_free;
+    } else if (r->is_old()) {
+      current = &_old;
     } else {
-      current = &_old;
+      ShouldNotReachHere();
     }
     current->add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
     _all.add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -23,10 +23,12 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/satbQueue.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
 G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
@@ -36,7 +38,6 @@
   _kind = G1SATBCT;
 }
 
-
 void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
   // Nulls should have been already filtered.
   assert(pre_val->is_oop(true), "Error");
@@ -64,6 +65,17 @@
   }
 }
 
+void G1SATBCardTableModRefBS::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
+  if (!dest_uninitialized) {
+    write_ref_array_pre_work(dst, count);
+  }
+}
+void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
+  if (!dest_uninitialized) {
+    write_ref_array_pre_work(dst, count);
+  }
+}
+
 bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
   jbyte val = _byte_map[card_index];
   // It's already processed
@@ -112,13 +124,53 @@
 }
 #endif
 
+void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
+  // Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter.
+  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
+  _card_table->clear(mr);
+}
+
 G1SATBCardTableLoggingModRefBS::
 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
                                int max_covered_regions) :
   G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
-  _dcqs(JavaThread::dirty_card_queue_set())
+  _dcqs(JavaThread::dirty_card_queue_set()),
+  _listener()
 {
   _kind = G1SATBCTLogging;
+  _listener.set_card_table(this);
+}
+
+void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
+  mapper->set_mapping_changed_listener(&_listener);
+
+  _byte_map_size = mapper->reserved().byte_size();
+
+  _guard_index = cards_required(_whole_heap.word_size()) - 1;
+  _last_valid_index = _guard_index - 1;
+
+  HeapWord* low_bound  = _whole_heap.start();
+  HeapWord* high_bound = _whole_heap.end();
+
+  _cur_covered_regions = 1;
+  _covered[0] = _whole_heap;
+
+  _byte_map = (jbyte*) mapper->reserved().start();
+  byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
+  assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
+  assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
+
+  if (TraceCardTableModRefBS) {
+    gclog_or_tty->print_cr("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
+    gclog_or_tty->print_cr("  "
+                  "  &_byte_map[0]: " INTPTR_FORMAT
+                  "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
+                  p2i(&_byte_map[0]),
+                  p2i(&_byte_map[_last_valid_index]));
+    gclog_or_tty->print_cr("  "
+                  "  byte_map_base: " INTPTR_FORMAT,
+                  p2i(byte_map_base));
+  }
 }
 
 void
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,14 +25,14 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
 
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
 #include "memory/cardTableModRefBS.hpp"
 #include "memory/memRegion.hpp"
 #include "oops/oop.inline.hpp"
 #include "utilities/macros.hpp"
 
-#if INCLUDE_ALL_GCS
-
 class DirtyCardQueueSet;
+class G1SATBCardTableLoggingModRefBS;
 
 // This barrier is specialized to use a logging barrier to support
 // snapshot-at-the-beginning marking.
@@ -86,16 +86,8 @@
   }
 
   template <class T> void write_ref_array_pre_work(T* dst, int count);
-  virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
-    if (!dest_uninitialized) {
-      write_ref_array_pre_work(dst, count);
-    }
-  }
-  virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
-    if (!dest_uninitialized) {
-      write_ref_array_pre_work(dst, count);
-    }
-  }
+  virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
+  virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
 
 /*
    Claimed and deferred bits are used together in G1 during the evacuation
@@ -134,18 +126,40 @@
     jbyte val = _byte_map[card_index];
     return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
   }
+};
 
+class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener {
+ private:
+  G1SATBCardTableLoggingModRefBS* _card_table;
+ public:
+  G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { }
+
+  void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; }
+
+  virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 };
 
 // Adds card-table logging to the post-barrier.
 // Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
 class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
+  friend class G1SATBCardTableLoggingModRefBSChangedListener;
  private:
+  G1SATBCardTableLoggingModRefBSChangedListener _listener;
   DirtyCardQueueSet& _dcqs;
  public:
+  static size_t compute_size(size_t mem_region_size_in_words) {
+    size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
+    return ReservedSpace::allocation_align_size_up(number_of_slots);
+  }
+
   G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
                                  int max_covered_regions);
 
+  virtual void initialize() { }
+  virtual void initialize(G1RegionToSpaceMapper* mapper);
+
+  virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
+
   bool is_a(BarrierSet::Name bsn) {
     return bsn == BarrierSet::G1SATBCTLogging ||
       G1SATBCardTableModRefBS::is_a(bsn);
@@ -162,11 +176,6 @@
 
   void write_region_work(MemRegion mr)    { invalidate(mr); }
   void write_ref_array_work(MemRegion mr) { invalidate(mr); }
-
-
 };
 
-
-#endif // INCLUDE_ALL_GCS
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
--- a/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -77,38 +77,37 @@
       break;
     }
 
-    // Include this thread in safepoints
-    stsJoin();
+    {
+      // Include thread in safepoints
+      SuspendibleThreadSetJoiner sts;
 
-    stat.mark_exec();
+      stat.mark_exec();
 
-    // Process the queue
-    for (;;) {
-      oop java_string = G1StringDedupQueue::pop();
-      if (java_string == NULL) {
-        break;
+      // Process the queue
+      for (;;) {
+        oop java_string = G1StringDedupQueue::pop();
+        if (java_string == NULL) {
+          break;
+        }
+
+        G1StringDedupTable::deduplicate(java_string, stat);
+
+        // Safepoint this thread if needed
+        if (sts.should_yield()) {
+          stat.mark_block();
+          sts.yield();
+          stat.mark_unblock();
+        }
       }
 
-      G1StringDedupTable::deduplicate(java_string, stat);
+      G1StringDedupTable::trim_entry_cache();
 
-      // Safepoint this thread if needed
-      if (stsShouldYield()) {
-        stat.mark_block();
-        stsYield(NULL);
-        stat.mark_unblock();
-      }
-    }
+      stat.mark_done();
 
-    G1StringDedupTable::trim_entry_cache();
-
-    stat.mark_done();
-
-    // Print statistics
-    total_stat.add(stat);
-    print(gclog_or_tty, stat, total_stat);
-
-    // Exclude this thread from safepoints
-    stsLeave();
+      // Print statistics
+      total_stat.add(stat);
+      print(gclog_or_tty, stat, total_stat);
+    }
   }
 
   terminate();
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -108,9 +108,6 @@
   develop(bool, G1RSBarrierRegionFilter, true,                              \
           "If true, generate region filtering code in RS barrier")          \
                                                                             \
-  develop(bool, G1DeferredRSUpdate, true,                                   \
-          "If true, use deferred RS updates")                               \
-                                                                            \
   develop(bool, G1RSLogCheckCardTable, false,                               \
           "If true, verify that no dirty cards remain after RS log "        \
           "processing.")                                                    \
@@ -273,21 +270,24 @@
           "Percentage (0-100) of the heap size to use as default "          \
           " maximum young gen size.")                                       \
                                                                             \
-  experimental(uintx, G1MixedGCLiveThresholdPercent, 65,                    \
+  experimental(uintx, G1MixedGCLiveThresholdPercent, 85,                    \
           "Threshold for regions to be considered for inclusion in the "    \
           "collection set of mixed GCs. "                                   \
           "Regions with live bytes exceeding this will not be collected.")  \
                                                                             \
-  product(uintx, G1HeapWastePercent, 10,                                    \
+  product(uintx, G1HeapWastePercent, 5,                                     \
           "Amount of space, expressed as a percentage of the heap size, "   \
           "that G1 is willing not to collect to avoid expensive GCs.")      \
                                                                             \
   product(uintx, G1MixedGCCountTarget, 8,                                   \
           "The target number of mixed GCs after a marking cycle.")          \
                                                                             \
-  experimental(uintx, G1CodeRootsChunkCacheKeepPercent, 10,                 \
-          "The amount of code root chunks that should be kept at most "     \
-          "as percentage of already allocated.")                            \
+  experimental(bool, G1ReclaimDeadHumongousObjectsAtYoungGC, true,          \
+          "Try to reclaim dead large objects at every young GC.")           \
+                                                                            \
+  experimental(bool, G1TraceReclaimDeadHumongousObjectsAtYoungGC, false,    \
+          "Print some information about large object liveness "             \
+          "at every young GC.")                                             \
                                                                             \
   experimental(uintx, G1OldCSetRegionThresholdPercent, 10,                  \
           "An upper bound for the number of old CSet regions expressed "    \
@@ -325,11 +325,14 @@
           "evacuation pauses")                                              \
                                                                             \
   diagnostic(bool, G1VerifyRSetsDuringFullGC, false,                        \
-             "If true, perform verification of each heap region's "         \
-             "remembered set when verifying the heap during a full GC.")    \
+          "If true, perform verification of each heap region's "            \
+          "remembered set when verifying the heap during a full GC.")       \
                                                                             \
   diagnostic(bool, G1VerifyHeapRegionCodeRoots, false,                      \
-             "Verify the code root lists attached to each heap region.")
+          "Verify the code root lists attached to each heap region.")       \
+                                                                            \
+  develop(bool, G1VerifyBitmaps, false,                                     \
+          "Verifies the consistency of the marking bitmaps")
 
 G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
 
--- a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -30,14 +30,21 @@
 // non-virtually, using a mechanism defined in this file.  Extend these
 // macros in the obvious way to add specializations for new closures.
 
-// Forward declarations.
 enum G1Barrier {
   G1BarrierNone,
   G1BarrierEvac,
   G1BarrierKlass
 };
 
-template<G1Barrier barrier, bool do_mark_object>
+enum G1Mark {
+  G1MarkNone,
+  G1MarkFromRoot,
+  G1MarkPromotedFromRoot
+};
+
+// Forward declarations.
+
+template<G1Barrier barrier, G1Mark do_mark_object>
 class G1ParCopyClosure;
 
 class G1ParScanClosure;
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,11 +28,15 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/heapRegion.inline.hpp"
+#include "gc_implementation/g1/heapRegionBounds.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
+#include "gc_implementation/shared/liveRange.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/iterator.hpp"
+#include "memory/space.inline.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
@@ -46,7 +50,7 @@
                                  HeapRegion* hr, ExtendedOopClosure* cl,
                                  CardTableModRefBS::PrecisionStyle precision,
                                  FilterKind fk) :
-  ContiguousSpaceDCTOC(hr, cl, precision, NULL),
+  DirtyCardToOopClosure(hr, cl, precision, NULL),
   _hr(hr), _fk(fk), _g1(g1) { }
 
 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
@@ -58,7 +62,7 @@
                                HeapRegion* hr,
                                HeapWord* cur, HeapWord* top) {
   oop cur_oop = oop(cur);
-  int oop_size = cur_oop->size();
+  size_t oop_size = hr->block_size(cur);
   HeapWord* next_obj = cur + oop_size;
   while (next_obj < top) {
     // Keep filtering the remembered set.
@@ -69,25 +73,24 @@
     }
     cur = next_obj;
     cur_oop = oop(cur);
-    oop_size = cur_oop->size();
+    oop_size = hr->block_size(cur);
     next_obj = cur + oop_size;
   }
   return cur;
 }
 
-void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
-                                              HeapWord* bottom,
-                                              HeapWord* top,
-                                              ExtendedOopClosure* cl) {
+void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
+                                      HeapWord* bottom,
+                                      HeapWord* top) {
   G1CollectedHeap* g1h = _g1;
-  int oop_size;
+  size_t oop_size;
   ExtendedOopClosure* cl2 = NULL;
 
-  FilterIntoCSClosure intoCSFilt(this, g1h, cl);
-  FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
+  FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
+  FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl);
 
   switch (_fk) {
-  case NoFilterKind:          cl2 = cl; break;
+  case NoFilterKind:          cl2 = _cl; break;
   case IntoCSFilterKind:      cl2 = &intoCSFilt; break;
   case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
   default:                    ShouldNotReachHere();
@@ -100,7 +103,7 @@
   if (!g1h->is_obj_dead(oop(bottom), _hr)) {
     oop_size = oop(bottom)->oop_iterate(cl2, mr);
   } else {
-    oop_size = oop(bottom)->size();
+    oop_size = _hr->block_size(bottom);
   }
 
   bottom += oop_size;
@@ -109,17 +112,17 @@
     // We replicate the loop below for several kinds of possible filters.
     switch (_fk) {
     case NoFilterKind:
-      bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
+      bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top);
       break;
 
     case IntoCSFilterKind: {
-      FilterIntoCSClosure filt(this, g1h, cl);
+      FilterIntoCSClosure filt(this, g1h, _cl);
       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
       break;
     }
 
     case OutOfRegionFilterKind: {
-      FilterOutOfRegionClosure filt(_hr, cl);
+      FilterOutOfRegionClosure filt(_hr, _cl);
       bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
       break;
     }
@@ -135,32 +138,16 @@
   }
 }
 
-// Minimum region size; we won't go lower than that.
-// We might want to decrease this in the future, to deal with small
-// heaps a bit more efficiently.
-#define MIN_REGION_SIZE  (      1024 * 1024 )
-
-// Maximum region size; we don't go higher than that. There's a good
-// reason for having an upper bound. We don't want regions to get too
-// large, otherwise cleanup's effectiveness would decrease as there
-// will be fewer opportunities to find totally empty regions after
-// marking.
-#define MAX_REGION_SIZE  ( 32 * 1024 * 1024 )
-
-// The automatic region size calculation will try to have around this
-// many regions in the heap (based on the min heap size).
-#define TARGET_REGION_NUMBER          2048
-
 size_t HeapRegion::max_region_size() {
-  return (size_t)MAX_REGION_SIZE;
+  return HeapRegionBounds::max_size();
 }
 
 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
   uintx region_size = G1HeapRegionSize;
   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
     size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
-    region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
-                       (uintx) MIN_REGION_SIZE);
+    region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
+                       (uintx) HeapRegionBounds::min_size());
   }
 
   int region_size_log = log2_long((jlong) region_size);
@@ -170,10 +157,10 @@
   region_size = ((uintx)1 << region_size_log);
 
   // Now make sure that we don't go over or under our limits.
-  if (region_size < MIN_REGION_SIZE) {
-    region_size = MIN_REGION_SIZE;
-  } else if (region_size > MAX_REGION_SIZE) {
-    region_size = MAX_REGION_SIZE;
+  if (region_size < HeapRegionBounds::min_size()) {
+    region_size = HeapRegionBounds::min_size();
+  } else if (region_size > HeapRegionBounds::max_size()) {
+    region_size = HeapRegionBounds::max_size();
   }
 
   // And recalculate the log.
@@ -208,8 +195,6 @@
 }
 
 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
-  assert(_humongous_type == NotHumongous,
-         "we should have already filtered out humongous regions");
   assert(_humongous_start_region == NULL,
          "we should have already filtered out humongous regions");
   assert(_end == _orig_end,
@@ -217,9 +202,10 @@
 
   _in_collection_set = false;
 
+  set_allocation_context(AllocationContext::system());
   set_young_index_in_cset(-1);
   uninstall_surv_rate_group();
-  set_young_type(NotYoung);
+  set_free();
   reset_pre_dummy_top();
 
   if (!par) {
@@ -270,7 +256,7 @@
   assert(top() == bottom(), "should be empty");
   assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
 
-  _humongous_type = StartsHumongous;
+  _type.set_starts_humongous();
   _humongous_start_region = this;
 
   set_end(new_end);
@@ -284,11 +270,11 @@
   assert(top() == bottom(), "should be empty");
   assert(first_hr->startsHumongous(), "pre-condition");
 
-  _humongous_type = ContinuesHumongous;
+  _type.set_continues_humongous();
   _humongous_start_region = first_hr;
 }
 
-void HeapRegion::set_notHumongous() {
+void HeapRegion::clear_humongous() {
   assert(isHumongous(), "pre-condition");
 
   if (startsHumongous()) {
@@ -304,7 +290,6 @@
   }
 
   assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
-  _humongous_type = NotHumongous;
   _humongous_start_region = NULL;
 }
 
@@ -319,46 +304,19 @@
   return false;
 }
 
-HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
-  HeapWord* low = addr;
-  HeapWord* high = end();
-  while (low < high) {
-    size_t diff = pointer_delta(high, low);
-    // Must add one below to bias toward the high amount.  Otherwise, if
-  // "high" were at the desired value, and "low" were one less, we
-    // would not converge on "high".  This is not symmetric, because
-    // we set "high" to a block start, which might be the right one,
-    // which we don't do for "low".
-    HeapWord* middle = low + (diff+1)/2;
-    if (middle == high) return high;
-    HeapWord* mid_bs = block_start_careful(middle);
-    if (mid_bs < addr) {
-      low = middle;
-    } else {
-      high = mid_bs;
-    }
-  }
-  assert(low == high && low >= addr, "Didn't work.");
-  return low;
-}
-
-#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
-#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
-#endif // _MSC_VER
-
-
-HeapRegion::HeapRegion(uint hrs_index,
+HeapRegion::HeapRegion(uint hrm_index,
                        G1BlockOffsetSharedArray* sharedOffsetArray,
                        MemRegion mr) :
     G1OffsetTableContigSpace(sharedOffsetArray, mr),
-    _hrs_index(hrs_index),
-    _humongous_type(NotHumongous), _humongous_start_region(NULL),
+    _hrm_index(hrm_index),
+    _allocation_context(AllocationContext::system()),
+    _humongous_start_region(NULL),
     _in_collection_set(false),
     _next_in_special_set(NULL), _orig_end(NULL),
     _claimed(InitialClaimValue), _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
-    _young_type(NotYoung), _next_young_region(NULL),
-    _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), _pending_removal(false),
+    _next_young_region(NULL),
+    _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
 #ifdef ASSERT
     _containing_set(NULL),
 #endif // ASSERT
@@ -367,55 +325,24 @@
     _predicted_bytes_to_copy(0)
 {
   _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
+  assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
+
+  initialize(mr);
+}
+
+void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
+  assert(_rem_set->is_empty(), "Remembered set must be empty");
+
+  G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
+
   _orig_end = mr.end();
-  // Note that initialize() will set the start of the unmarked area of the
-  // region.
   hr_clear(false /*par*/, false /*clear_space*/);
   set_top(bottom());
-  set_saved_mark();
-
-  assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
+  record_top_and_timestamp();
 }
 
 CompactibleSpace* HeapRegion::next_compaction_space() const {
-  // We're not using an iterator given that it will wrap around when
-  // it reaches the last region and this is not what we want here.
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  uint index = hrs_index() + 1;
-  while (index < g1h->n_regions()) {
-    HeapRegion* hr = g1h->region_at(index);
-    if (!hr->isHumongous()) {
-      return hr;
-    }
-    index += 1;
-  }
-  return NULL;
-}
-
-void HeapRegion::save_marks() {
-  set_saved_mark();
-}
-
-void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) {
-  HeapWord* p = mr.start();
-  HeapWord* e = mr.end();
-  oop obj;
-  while (p < e) {
-    obj = oop(p);
-    p += obj->oop_iterate(cl);
-  }
-  assert(p == e, "bad memregion: doesn't end on obj boundary");
-}
-
-#define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
-void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
-  ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl);              \
-}
-SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
-
-
-void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) {
-  oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
+  return G1CollectedHeap::heap()->next_compaction_region(this);
 }
 
 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
@@ -423,7 +350,6 @@
   // We always recreate the prev marking info and we'll explicitly
   // mark all objects we find to be self-forwarded on the prev
   // bitmap. So all objects need to be below PTAMS.
-  _prev_top_at_mark_start = top();
   _prev_marked_bytes = 0;
 
   if (during_initial_mark) {
@@ -447,6 +373,7 @@
   assert(0 <= marked_bytes && marked_bytes <= used(),
          err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
                  marked_bytes, used()));
+  _prev_top_at_mark_start = top();
   _prev_marked_bytes = marked_bytes;
 }
 
@@ -477,7 +404,7 @@
     if (cl->abort()) return cur;
     // The check above must occur before the operation below, since an
     // abort might invalidate the "size" operation.
-    cur += obj->size();
+    cur += block_size(cur);
   }
   return NULL;
 }
@@ -549,7 +476,7 @@
       return cur;
     }
     // Otherwise...
-    next = (cur + obj->size());
+    next = cur + block_size(cur);
   }
 
   // If we finish the above loop...We have a parseable object that
@@ -557,10 +484,9 @@
   // inside or spans the entire region.
 
   assert(obj == oop(cur), "sanity");
-  assert(cur <= start &&
-         obj->klass_or_null() != NULL &&
-         (cur + obj->size()) > start,
-         "Loop postcondition");
+  assert(cur <= start, "Loop postcondition");
+  assert(obj->klass_or_null() != NULL, "Loop postcondition");
+  assert((cur + block_size(cur)) > start, "Loop postcondition");
 
   if (!g1h->is_obj_dead(obj)) {
     obj->oop_iterate(cl, mr);
@@ -574,7 +500,7 @@
     };
 
     // Otherwise:
-    next = (cur + obj->size());
+    next = cur + block_size(cur);
 
     if (!g1h->is_obj_dead(obj)) {
       if (next < end || !obj->is_objArray()) {
@@ -600,21 +526,17 @@
   hrrs->add_strong_code_root(nm);
 }
 
+void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  HeapRegionRemSet* hrrs = rem_set();
+  hrrs->add_strong_code_root_locked(nm);
+}
+
 void HeapRegion::remove_strong_code_root(nmethod* nm) {
   HeapRegionRemSet* hrrs = rem_set();
   hrrs->remove_strong_code_root(nm);
 }
 
-void HeapRegion::migrate_strong_code_roots() {
-  assert(in_collection_set(), "only collection set regions");
-  assert(!isHumongous(),
-          err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
-                  HR_FORMAT_PARAMS(this)));
-
-  HeapRegionRemSet* hrrs = rem_set();
-  hrrs->migrate_strong_code_roots();
-}
-
 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
   HeapRegionRemSet* hrrs = rem_set();
   hrrs->strong_code_roots_do(blk);
@@ -750,26 +672,12 @@
 
 void HeapRegion::print() const { print_on(gclog_or_tty); }
 void HeapRegion::print_on(outputStream* st) const {
-  if (isHumongous()) {
-    if (startsHumongous())
-      st->print(" HS");
-    else
-      st->print(" HC");
-  } else {
-    st->print("   ");
-  }
+  st->print("AC%4u", allocation_context());
+  st->print(" %2s", get_short_type_str());
   if (in_collection_set())
     st->print(" CS");
   else
     st->print("   ");
-  if (is_young())
-    st->print(is_survivor() ? " SU" : " Y ");
-  else
-    st->print("   ");
-  if (is_empty())
-    st->print(" F");
-  else
-    st->print("  ");
   st->print(" TS %5d", _gc_time_stamp);
   st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
             prev_top_at_mark_start(), next_top_at_mark_start());
@@ -929,10 +837,11 @@
   size_t object_num = 0;
   while (p < top()) {
     oop obj = oop(p);
-    size_t obj_size = obj->size();
+    size_t obj_size = block_size(p);
     object_num += 1;
 
-    if (is_humongous != g1->isHumongous(obj_size)) {
+    if (is_humongous != g1->isHumongous(obj_size) &&
+        !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
       gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
                              SIZE_FORMAT" words) in a %shumongous region",
                              p, g1->isHumongous(obj_size) ? "" : "non-",
@@ -942,8 +851,10 @@
     }
 
     // If it returns false, verify_for_object() will output the
-    // appropriate messasge.
-    if (do_bot_verify && !_offsets.verify_for_object(p, obj_size)) {
+    // appropriate message.
+    if (do_bot_verify &&
+        !g1->is_obj_dead(obj, this) &&
+        !_offsets.verify_for_object(p, obj_size)) {
       *failures = true;
       return;
     }
@@ -951,7 +862,10 @@
     if (!g1->is_obj_dead_cond(obj, this, vo)) {
       if (obj->is_oop()) {
         Klass* klass = obj->klass();
-        if (!klass->is_metaspace_object()) {
+        bool is_metaspace_object = Metaspace::contains(klass) ||
+                                   (vo == VerifyOption_G1UsePrevMarking &&
+                                   ClassLoaderDataGraph::unload_list_contains(klass));
+        if (!is_metaspace_object) {
           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
                                  "not metadata", klass, (void *)obj);
           *failures = true;
@@ -1065,9 +979,10 @@
 // away eventually.
 
 void G1OffsetTableContigSpace::clear(bool mangle_space) {
-  ContiguousSpace::clear(mangle_space);
-  _offsets.zero_bottom_entry();
-  _offsets.initialize_threshold();
+  set_top(bottom());
+  set_saved_mark_word(bottom());
+  CompactibleSpace::clear(mangle_space);
+  reset_bot();
 }
 
 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
@@ -1100,13 +1015,16 @@
 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
-  if (_gc_time_stamp < g1h->get_gc_time_stamp())
-    return top();
-  else
-    return ContiguousSpace::saved_mark_word();
+  HeapWord* local_top = top();
+  OrderAccess::loadload();
+  if (_gc_time_stamp < g1h->get_gc_time_stamp()) {
+    return local_top;
+  } else {
+    return Space::saved_mark_word();
+  }
 }
 
-void G1OffsetTableContigSpace::set_saved_mark() {
+void G1OffsetTableContigSpace::record_top_and_timestamp() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
 
@@ -1118,7 +1036,7 @@
     // of region. If it does so after _gc_time_stamp = ..., then it
     // will pick up the right saved_mark_word() as the high water mark
     // of the region. Either way, the behaviour will be correct.
-    ContiguousSpace::set_saved_mark();
+    Space::set_saved_mark_word(top());
     OrderAccess::storestore();
     _gc_time_stamp = curr_gc_time_stamp;
     // No need to do another barrier to flush the writes above. If
@@ -1129,6 +1047,26 @@
   }
 }
 
+void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
+  object_iterate(blk);
+}
+
+void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
+  HeapWord* p = bottom();
+  while (p < top()) {
+    if (block_is_obj(p)) {
+      blk->do_object(oop(p));
+    }
+    p += block_size(p);
+  }
+}
+
+#define block_is_always_obj(q) true
+void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
+  SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
+}
+#undef block_is_always_obj
+
 G1OffsetTableContigSpace::
 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
                          MemRegion mr) :
@@ -1137,8 +1075,11 @@
   _gc_time_stamp(0)
 {
   _offsets.set_space(this);
-  // false ==> we'll do the clearing if there's clearing to be done.
-  ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle);
-  _offsets.zero_bottom_entry();
-  _offsets.initialize_threshold();
 }
+
+void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
+  CompactibleSpace::initialize(mr, clear_space, mangle_space);
+  _top = bottom();
+  reset_bot();
+}
+
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,8 +25,10 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
 
-#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
+#include "gc_implementation/g1/g1AllocationContext.hpp"
+#include "gc_implementation/g1/g1BlockOffsetTable.hpp"
 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
+#include "gc_implementation/g1/heapRegionType.hpp"
 #include "gc_implementation/g1/survRateGroup.hpp"
 #include "gc_implementation/shared/ageTable.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
@@ -34,8 +36,6 @@
 #include "memory/watermark.hpp"
 #include "utilities/macros.hpp"
 
-#if INCLUDE_ALL_GCS
-
 // A HeapRegion is the smallest piece of a G1CollectedHeap that
 // can be collected independently.
 
@@ -46,8 +46,6 @@
 // The solution is to remove this method from the definition
 // of a Space.
 
-class CompactibleSpace;
-class ContiguousSpace;
 class HeapRegionRemSet;
 class HeapRegionRemSetIterator;
 class HeapRegion;
@@ -56,22 +54,19 @@
 
 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
 #define HR_FORMAT_PARAMS(_hr_) \
-                (_hr_)->hrs_index(), \
-                (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \
-                (_hr_)->startsHumongous() ? "HS" : \
-                (_hr_)->continuesHumongous() ? "HC" : \
-                !(_hr_)->is_empty() ? "O" : "F", \
+                (_hr_)->hrm_index(), \
+                (_hr_)->get_short_type_str(), \
                 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
 
-// sentinel value for hrs_index
-#define G1_NULL_HRS_INDEX ((uint) -1)
+// sentinel value for hrm_index
+#define G1_NO_HRM_INDEX ((uint) -1)
 
 // A dirty card to oop closure for heap regions. It
 // knows how to get the G1 heap and how to use the bitmap
 // in the concurrent marker used by G1 to filter remembered
 // sets.
 
-class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
+class HeapRegionDCTOC : public DirtyCardToOopClosure {
 public:
   // Specification of possible DirtyCardToOopClosure filtering.
   enum FilterKind {
@@ -85,39 +80,13 @@
   FilterKind _fk;
   G1CollectedHeap* _g1;
 
-  void walk_mem_region_with_cl(MemRegion mr,
-                               HeapWord* bottom, HeapWord* top,
-                               ExtendedOopClosure* cl);
-
-  // We don't specialize this for FilteringClosure; filtering is handled by
-  // the "FilterKind" mechanism.  But we provide this to avoid a compiler
-  // warning.
-  void walk_mem_region_with_cl(MemRegion mr,
-                               HeapWord* bottom, HeapWord* top,
-                               FilteringClosure* cl) {
-    HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
-                                             (ExtendedOopClosure*)cl);
-  }
-
-  // Get the actual top of the area on which the closure will
-  // operate, given where the top is assumed to be (the end of the
-  // memory region passed to do_MemRegion) and where the object
-  // at the top is assumed to start. For example, an object may
-  // start at the top but actually extend past the assumed top,
-  // in which case the top becomes the end of the object.
-  HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
-    return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
-  }
-
   // Walk the given memory region from bottom to (actual) top
   // looking for objects and applying the oop closure (_cl) to
   // them. The base implementation of this treats the area as
   // blocks, where a block may or may not be an object. Sub-
   // classes should override this to provide more accurate
   // or possibly more efficient walking.
-  void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
-    Filtering_DCTOC::walk_mem_region(mr, bottom, top);
-  }
+  void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
 
 public:
   HeapRegionDCTOC(G1CollectedHeap* g1,
@@ -151,9 +120,9 @@
 // the regions anyway) and at the end of a Full GC. The current scheme
 // that uses sequential unsigned ints will fail only if we have 4b
 // evacuation pauses between two cleanups, which is _highly_ unlikely.
-
-class G1OffsetTableContigSpace: public ContiguousSpace {
+class G1OffsetTableContigSpace: public CompactibleSpace {
   friend class VMStructs;
+  HeapWord* _top;
  protected:
   G1BlockOffsetArrayContigSpace _offsets;
   Mutex _par_alloc_lock;
@@ -170,11 +139,35 @@
   G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
                            MemRegion mr);
 
+  void set_top(HeapWord* value) { _top = value; }
+  HeapWord* top() const { return _top; }
+
+ protected:
+  // Reset the G1OffsetTableContigSpace.
+  virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
+
+  HeapWord** top_addr() { return &_top; }
+  // Allocation helpers (return NULL if full).
+  inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
+  inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
+
+ public:
+  void reset_after_compaction() { set_top(compaction_top()); }
+
+  size_t used() const { return byte_size(bottom(), top()); }
+  size_t free() const { return byte_size(top(), end()); }
+  bool is_free_block(const HeapWord* p) const { return p >= top(); }
+
+  MemRegion used_region() const { return MemRegion(bottom(), top()); }
+
+  void object_iterate(ObjectClosure* blk);
+  void safe_object_iterate(ObjectClosure* blk);
+
   void set_bottom(HeapWord* value);
   void set_end(HeapWord* value);
 
   virtual HeapWord* saved_mark_word() const;
-  virtual void set_saved_mark();
+  void record_top_and_timestamp();
   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
   unsigned get_gc_time_stamp() { return _gc_time_stamp; }
 
@@ -194,6 +187,8 @@
   HeapWord* block_start(const void* p);
   HeapWord* block_start_const(const void* p) const;
 
+  void prepare_for_compaction(CompactPoint* cp);
+
   // Add offset table update.
   virtual HeapWord* allocate(size_t word_size);
   HeapWord* par_allocate(size_t word_size);
@@ -205,12 +200,7 @@
   virtual void print() const;
 
   void reset_bot() {
-    _offsets.zero_bottom_entry();
-    _offsets.initialize_threshold();
-  }
-
-  void update_bot_for_object(HeapWord* start, size_t word_size) {
-    _offsets.alloc_block(start, word_size);
+    _offsets.reset_bot();
   }
 
   void print_bot_on(outputStream* out) {
@@ -222,16 +212,6 @@
   friend class VMStructs;
  private:
 
-  enum HumongousType {
-    NotHumongous = 0,
-    StartsHumongous,
-    ContinuesHumongous
-  };
-
-  // Requires that the region "mr" be dense with objects, and begin and end
-  // with an object.
-  void oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl);
-
   // The remembered set for this region.
   // (Might want to make this "inline" later, to avoid some alloc failure
   // issues.)
@@ -241,9 +221,12 @@
 
  protected:
   // The index of this region in the heap region sequence.
-  uint  _hrs_index;
+  uint  _hrm_index;
 
-  HumongousType _humongous_type;
+  AllocationContext_t _allocation_context;
+
+  HeapRegionType _type;
+
   // For a humongous region, region in which it starts.
   HeapRegion* _humongous_start_region;
   // For the start region of a humongous sequence, it's original end().
@@ -256,11 +239,9 @@
   bool _evacuation_failed;
 
   // A heap region may be a member one of a number of special subsets, each
-  // represented as linked lists through the field below.  Currently, these
-  // sets include:
+  // represented as linked lists through the field below.  Currently, there
+  // is only one set:
   //   The collection set.
-  //   The set of allocation regions used in a collection pause.
-  //   Spaces that may contain gray objects.
   HeapRegion* _next_in_special_set;
 
   // next region in the young "generation" region set
@@ -275,7 +256,6 @@
 #ifdef ASSERT
   HeapRegionSetBase* _containing_set;
 #endif // ASSERT
-  bool _pending_removal;
 
   // For parallel heapRegion traversal.
   jint _claimed;
@@ -288,13 +268,6 @@
   // The calculated GC efficiency of the region.
   double _gc_efficiency;
 
-  enum YoungType {
-    NotYoung,                   // a region is not young
-    Young,                      // a region is young
-    Survivor                    // a region is young and it contains survivors
-  };
-
-  volatile YoungType _young_type;
   int  _young_index_in_cset;
   SurvRateGroup* _surv_rate_group;
   int  _age_index;
@@ -319,12 +292,6 @@
     _next_top_at_mark_start = bot;
   }
 
-  void set_young_type(YoungType new_type) {
-    //assert(_young_type != new_type, "setting the same type" );
-    // TODO: add more assertions here
-    _young_type = new_type;
-  }
-
   // Cached attributes used in the collection set policy information
 
   // The RSet length that was added to the total value
@@ -340,10 +307,16 @@
   size_t _predicted_bytes_to_copy;
 
  public:
-  HeapRegion(uint hrs_index,
+  HeapRegion(uint hrm_index,
              G1BlockOffsetSharedArray* sharedOffsetArray,
              MemRegion mr);
 
+  // Initializing the HeapRegion not only resets the data structure, but also
+  // resets the BOT for that heap region.
+  // The default values for clear_space means that we will do the clearing if
+  // there's clearing to be done ourselves. We also always mangle the space.
+  virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
+
   static int    LogOfHRGrainBytes;
   static int    LogOfHRGrainWords;
 
@@ -379,18 +352,19 @@
     ParMarkRootClaimValue      = 9
   };
 
-  inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
-    assert(is_young(), "we can only skip BOT updates on young regions");
-    return ContiguousSpace::par_allocate(word_size);
-  }
-  inline HeapWord* allocate_no_bot_updates(size_t word_size) {
-    assert(is_young(), "we can only skip BOT updates on young regions");
-    return ContiguousSpace::allocate(word_size);
-  }
+  // All allocated blocks are occupied by objects in a HeapRegion
+  bool block_is_obj(const HeapWord* p) const;
+
+  // Returns the object size for all valid block starts
+  // and the amount of unallocated words if called on top()
+  size_t block_size(const HeapWord* p) const;
 
-  // If this region is a member of a HeapRegionSeq, the index in that
+  inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
+  inline HeapWord* allocate_no_bot_updates(size_t word_size);
+
+  // If this region is a member of a HeapRegionManager, the index in that
   // sequence, otherwise -1.
-  uint hrs_index() const { return _hrs_index; }
+  uint hrm_index() const { return _hrm_index; }
 
   // The number of bytes marked live in the region in the last marking phase.
   size_t marked_bytes()    { return _prev_marked_bytes; }
@@ -437,9 +411,21 @@
     _prev_marked_bytes = _next_marked_bytes = 0;
   }
 
-  bool isHumongous() const { return _humongous_type != NotHumongous; }
-  bool startsHumongous() const { return _humongous_type == StartsHumongous; }
-  bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
+  const char* get_type_str() const { return _type.get_str(); }
+  const char* get_short_type_str() const { return _type.get_short_str(); }
+
+  bool is_free() const { return _type.is_free(); }
+
+  bool is_young()    const { return _type.is_young();    }
+  bool is_eden()     const { return _type.is_eden();     }
+  bool is_survivor() const { return _type.is_survivor(); }
+
+  bool isHumongous() const { return _type.is_humongous(); }
+  bool startsHumongous() const { return _type.is_starts_humongous(); }
+  bool continuesHumongous() const { return _type.is_continues_humongous();   }
+
+  bool is_old() const { return _type.is_old(); }
+
   // For a humongous region, region in which it starts.
   HeapRegion* humongous_start_region() const {
     return _humongous_start_region;
@@ -461,7 +447,7 @@
   // with this HS region.
   uint last_hc_index() const {
     assert(startsHumongous(), "don't call this otherwise");
-    return hrs_index() + region_num();
+    return hrm_index() + region_num();
   }
 
   // Same as Space::is_in_reserved, but will use the original size of the region.
@@ -503,7 +489,7 @@
   void set_continuesHumongous(HeapRegion* first_hr);
 
   // Unsets the humongous-related fields on the region.
-  void set_notHumongous();
+  void clear_humongous();
 
   // If the region has a remembered set, return a pointer to it.
   HeapRegionRemSet* rem_set() const {
@@ -530,6 +516,14 @@
     _next_in_special_set = r;
   }
 
+  void set_allocation_context(AllocationContext_t context) {
+    _allocation_context = context;
+  }
+
+  AllocationContext_t  allocation_context() const {
+    return _allocation_context;
+  }
+
   // Methods used by the HeapRegionSetBase class and subclasses.
 
   // Getter and setter for the next and prev fields used to link regions into
@@ -563,26 +557,6 @@
   // to provide a dummy version of it.
 #endif // ASSERT
 
-  // If we want to remove regions from a list in bulk we can simply tag
-  // them with the pending_removal tag and call the
-  // remove_all_pending() method on the list.
-
-  bool pending_removal() { return _pending_removal; }
-
-  void set_pending_removal(bool pending_removal) {
-    if (pending_removal) {
-      assert(!_pending_removal && containing_set() != NULL,
-             "can only set pending removal to true if it's false and "
-             "the region belongs to a region set");
-    } else {
-      assert( _pending_removal && containing_set() == NULL,
-              "can only set pending removal to false if it's true and "
-              "the region does not belong to a region set");
-    }
-
-    _pending_removal = pending_removal;
-  }
-
   HeapRegion* get_next_young_region() { return _next_young_region; }
   void set_next_young_region(HeapRegion* hr) {
     _next_young_region = hr;
@@ -593,10 +567,7 @@
   void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
   bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
 
-  HeapWord* orig_end() { return _orig_end; }
-
-  // Allows logical separation between objects allocated before and after.
-  void save_marks();
+  HeapWord* orig_end() const { return _orig_end; }
 
   // Reset HR stuff to default values.
   void hr_clear(bool par, bool clear_space, bool locked = false);
@@ -606,10 +577,6 @@
   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
   HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
 
-  // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
-  // allocated in the current region before the last call to "save_mark".
-  void oop_before_save_marks_iterate(ExtendedOopClosure* cl);
-
   // Note the start or end of marking. This tells the heap region
   // that the collector is about to start or has finished (concurrently)
   // marking the heap.
@@ -657,9 +624,6 @@
   void calc_gc_efficiency(void);
   double gc_efficiency() { return _gc_efficiency;}
 
-  bool is_young() const     { return _young_type != NotYoung; }
-  bool is_survivor() const  { return _young_type == Survivor; }
-
   int  young_index_in_cset() const { return _young_index_in_cset; }
   void set_young_index_in_cset(int index) {
     assert( (index == -1) || is_young(), "pre-condition" );
@@ -711,11 +675,13 @@
     }
   }
 
-  void set_young() { set_young_type(Young); }
+  void set_free() { _type.set_free(); }
 
-  void set_survivor() { set_young_type(Survivor); }
+  void set_eden()        { _type.set_eden();        }
+  void set_eden_pre_gc() { _type.set_eden_pre_gc(); }
+  void set_survivor()    { _type.set_survivor();    }
 
-  void set_not_young() { set_young_type(NotYoung); }
+  void set_old() { _type.set_old(); }
 
   // Determine if an object has been allocated since the last
   // mark performed by the collector. This returns true iff the object
@@ -767,18 +733,6 @@
                                    bool filter_young,
                                    jbyte* card_ptr);
 
-  // A version of block start that is guaranteed to find *some* block
-  // boundary at or before "p", but does not object iteration, and may
-  // therefore be used safely when the heap is unparseable.
-  HeapWord* block_start_careful(const void* p) const {
-    return _offsets.block_start_careful(p);
-  }
-
-  // Requires that "addr" is within the region.  Returns the start of the
-  // first ("careful") block that starts at or after "addr", or else the
-  // "end" of the region if there is no such block.
-  HeapWord* next_block_start_careful(HeapWord* addr);
-
   size_t recorded_rs_length() const        { return _recorded_rs_length; }
   double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
   size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
@@ -795,10 +749,6 @@
     _predicted_bytes_to_copy = bytes;
   }
 
-#define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
-  virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
-  SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
-
   virtual CompactibleSpace* next_compaction_space() const;
 
   virtual void reset_after_compaction();
@@ -806,14 +756,9 @@
   // Routines for managing a list of code roots (attached to the
   // this region's RSet) that point into this heap region.
   void add_strong_code_root(nmethod* nm);
+  void add_strong_code_root_locked(nmethod* nm);
   void remove_strong_code_root(nmethod* nm);
 
-  // During a collection, migrate the successfully evacuated
-  // strong code roots that referenced into this region to the
-  // new regions that they now point into. Unsuccessfully
-  // evacuated code roots are not migrated.
-  void migrate_strong_code_roots();
-
   // Applies blk->do_code_blob() to each of the entries in
   // the strong code roots list for this region
   void strong_code_roots_do(CodeBlobClosure* blk) const;
@@ -847,7 +792,7 @@
 // HeapRegionClosure is used for iterating over regions.
 // Terminates the iteration when the "doHeapRegion" method returns "true".
 class HeapRegionClosure : public StackObj {
-  friend class HeapRegionSeq;
+  friend class HeapRegionManager;
   friend class G1CollectedHeap;
 
   bool _complete;
@@ -864,6 +809,4 @@
   bool complete() { return _complete; }
 };
 
-#endif // INCLUDE_ALL_GCS
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
--- a/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,8 +25,49 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
 
+#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "memory/space.hpp"
+#include "runtime/atomic.inline.hpp"
+
+// This version requires locking.
+inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size,
+                                                HeapWord* const end_value) {
+  HeapWord* obj = top();
+  if (pointer_delta(end_value, obj) >= size) {
+    HeapWord* new_top = obj + size;
+    set_top(new_top);
+    assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
+    return obj;
+  } else {
+    return NULL;
+  }
+}
+
+// This version is lock-free.
+inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size,
+                                                    HeapWord* const end_value) {
+  do {
+    HeapWord* obj = top();
+    if (pointer_delta(end_value, obj) >= size) {
+      HeapWord* new_top = obj + size;
+      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
+      // result can be one of two:
+      //  the old top value: the exchange succeeded
+      //  otherwise: the new value of the top is returned.
+      if (result == obj) {
+        assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
+        return obj;
+      }
+    } else {
+      return NULL;
+    }
+  } while (true);
+}
+
 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
-  HeapWord* res = ContiguousSpace::allocate(size);
+  HeapWord* res = allocate_impl(size, end());
   if (res != NULL) {
     _offsets.alloc_block(res, size);
   }
@@ -38,12 +79,7 @@
 // this is used for larger LAB allocations only.
 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
   MutexLocker x(&_par_alloc_lock);
-  // Given that we take the lock no need to use par_allocate() here.
-  HeapWord* res = ContiguousSpace::allocate(size);
-  if (res != NULL) {
-    _offsets.alloc_block(res, size);
-  }
-  return res;
+  return allocate(size);
 }
 
 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
@@ -55,6 +91,52 @@
   return _offsets.block_start_const(p);
 }
 
+inline bool
+HeapRegion::block_is_obj(const HeapWord* p) const {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  if (ClassUnloadingWithConcurrentMark) {
+    return !g1h->is_obj_dead(oop(p), this);
+  }
+  return p < top();
+}
+
+inline size_t
+HeapRegion::block_size(const HeapWord *addr) const {
+  if (addr == top()) {
+    return pointer_delta(end(), addr);
+  }
+
+  if (block_is_obj(addr)) {
+    return oop(addr)->size();
+  }
+
+  assert(ClassUnloadingWithConcurrentMark,
+      err_msg("All blocks should be objects if G1 Class Unloading isn't used. "
+              "HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") "
+              "addr: " PTR_FORMAT,
+              p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)));
+
+  // Old regions' dead objects may have dead classes
+  // We need to find the next live object in some other
+  // manner than getting the oop size
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
+      getNextMarkedWordAddress(addr, prev_top_at_mark_start());
+
+  assert(next > addr, "must get the next live object");
+  return pointer_delta(next, addr);
+}
+
+inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
+  assert(is_young(), "we can only skip BOT updates on young regions");
+  return par_allocate_impl(word_size, end());
+}
+
+inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
+  assert(is_young(), "we can only skip BOT updates on young regions");
+  return allocate_impl(word_size, end());
+}
+
 inline void HeapRegion::note_start_of_marking() {
   _next_marked_bytes = 0;
   _next_top_at_mark_start = top();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionBounds.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
+
+class HeapRegionBounds : public AllStatic {
+private:
+  // Minimum region size; we won't go lower than that.
+  // We might want to decrease this in the future, to deal with small
+  // heaps a bit more efficiently.
+  static const size_t MIN_REGION_SIZE = 1024 * 1024;
+
+  // Maximum region size; we don't go higher than that. There's a good
+  // reason for having an upper bound. We don't want regions to get too
+  // large, otherwise cleanup's effectiveness would decrease as there
+  // will be fewer opportunities to find totally empty regions after
+  // marking.
+  static const size_t MAX_REGION_SIZE = 32 * 1024 * 1024;
+
+  // The automatic region size calculation will try to have around this
+  // many regions in the heap (based on the min heap size).
+  static const size_t TARGET_REGION_NUMBER = 2048;
+
+public:
+  static inline size_t min_size();
+  static inline size_t max_size();
+  static inline size_t target_number();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionBounds.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "gc_implementation/g1/heapRegionBounds.hpp"
+
+size_t HeapRegionBounds::min_size() {
+  return MIN_REGION_SIZE;
+}
+
+size_t HeapRegionBounds::max_size() {
+  return MAX_REGION_SIZE;
+}
+
+size_t HeapRegionBounds::target_number() {
+  return TARGET_REGION_NUMBER;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/concurrentG1Refine.hpp"
+#include "memory/allocation.hpp"
+
+void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
+                               G1RegionToSpaceMapper* prev_bitmap,
+                               G1RegionToSpaceMapper* next_bitmap,
+                               G1RegionToSpaceMapper* bot,
+                               G1RegionToSpaceMapper* cardtable,
+                               G1RegionToSpaceMapper* card_counts) {
+  _allocated_heapregions_length = 0;
+
+  _heap_mapper = heap_storage;
+
+  _prev_bitmap_mapper = prev_bitmap;
+  _next_bitmap_mapper = next_bitmap;
+
+  _bot_mapper = bot;
+  _cardtable_mapper = cardtable;
+
+  _card_counts_mapper = card_counts;
+
+  MemRegion reserved = heap_storage->reserved();
+  _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
+
+  _available_map.resize(_regions.length(), false);
+  _available_map.clear();
+}
+
+bool HeapRegionManager::is_available(uint region) const {
+  return _available_map.at(region);
+}
+
+#ifdef ASSERT
+bool HeapRegionManager::is_free(HeapRegion* hr) const {
+  return _free_list.contains(hr);
+}
+#endif
+
+HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
+  MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
+  assert(reserved().contains(mr), "invariant");
+  return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
+}
+
+void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
+  guarantee(num_regions > 0, "Must commit more than zero regions");
+  guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
+
+  _num_committed += (uint)num_regions;
+
+  _heap_mapper->commit_regions(index, num_regions);
+
+  // Also commit auxiliary data
+  _prev_bitmap_mapper->commit_regions(index, num_regions);
+  _next_bitmap_mapper->commit_regions(index, num_regions);
+
+  _bot_mapper->commit_regions(index, num_regions);
+  _cardtable_mapper->commit_regions(index, num_regions);
+
+  _card_counts_mapper->commit_regions(index, num_regions);
+}
+
+void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) {
+  guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start));
+  guarantee(_num_committed >= num_regions, "pre-condition");
+
+  // Print before uncommitting.
+  if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
+    for (uint i = start; i < start + num_regions; i++) {
+      HeapRegion* hr = at(i);
+      G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end());
+    }
+  }
+
+  _num_committed -= (uint)num_regions;
+
+  _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
+  _heap_mapper->uncommit_regions(start, num_regions);
+
+  // Also uncommit auxiliary data
+  _prev_bitmap_mapper->uncommit_regions(start, num_regions);
+  _next_bitmap_mapper->uncommit_regions(start, num_regions);
+
+  _bot_mapper->uncommit_regions(start, num_regions);
+  _cardtable_mapper->uncommit_regions(start, num_regions);
+
+  _card_counts_mapper->uncommit_regions(start, num_regions);
+}
+
+void HeapRegionManager::make_regions_available(uint start, uint num_regions) {
+  guarantee(num_regions > 0, "No point in calling this for zero regions");
+  commit_regions(start, num_regions);
+  for (uint i = start; i < start + num_regions; i++) {
+    if (_regions.get_by_index(i) == NULL) {
+      HeapRegion* new_hr = new_heap_region(i);
+      _regions.set_by_index(i, new_hr);
+      _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
+    }
+  }
+
+  _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
+
+  for (uint i = start; i < start + num_regions; i++) {
+    assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
+    HeapRegion* hr = at(i);
+    if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
+      G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end());
+    }
+    HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
+    MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
+
+    hr->initialize(mr);
+    insert_into_free_list(at(i));
+  }
+}
+
+uint HeapRegionManager::expand_by(uint num_regions) {
+  return expand_at(0, num_regions);
+}
+
+uint HeapRegionManager::expand_at(uint start, uint num_regions) {
+  if (num_regions == 0) {
+    return 0;
+  }
+
+  uint cur = start;
+  uint idx_last_found = 0;
+  uint num_last_found = 0;
+
+  uint expanded = 0;
+
+  while (expanded < num_regions &&
+         (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
+    uint to_expand = MIN2(num_regions - expanded, num_last_found);
+    make_regions_available(idx_last_found, to_expand);
+    expanded += to_expand;
+    cur = idx_last_found + num_last_found + 1;
+  }
+
+  verify_optional();
+  return expanded;
+}
+
+uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) {
+  uint found = 0;
+  size_t length_found = 0;
+  uint cur = 0;
+
+  while (length_found < num && cur < max_length()) {
+    HeapRegion* hr = _regions.get_by_index(cur);
+    if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
+      // This region is a potential candidate for allocation into.
+      length_found++;
+    } else {
+      // This region is not a candidate. The next region is the next possible one.
+      found = cur + 1;
+      length_found = 0;
+    }
+    cur++;
+  }
+
+  if (length_found == num) {
+    for (uint i = found; i < (found + num); i++) {
+      HeapRegion* hr = _regions.get_by_index(i);
+      // sanity check
+      guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
+                err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
+                        " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)));
+    }
+    return found;
+  } else {
+    return G1_NO_HRM_INDEX;
+  }
+}
+
+HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
+  guarantee(r != NULL, "Start region must be a valid region");
+  guarantee(is_available(r->hrm_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrm_index()));
+  for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
+    HeapRegion* hr = _regions.get_by_index(i);
+    if (is_available(i)) {
+      return hr;
+    }
+  }
+  return NULL;
+}
+
+void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
+  uint len = max_length();
+
+  for (uint i = 0; i < len; i++) {
+    if (!is_available(i)) {
+      continue;
+    }
+    guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i));
+    bool res = blk->doHeapRegion(at(i));
+    if (res) {
+      blk->incomplete();
+      return;
+    }
+  }
+}
+
+uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
+  guarantee(res_idx != NULL, "checking");
+  guarantee(start_idx <= (max_length() + 1), "checking");
+
+  uint num_regions = 0;
+
+  uint cur = start_idx;
+  while (cur < max_length() && is_available(cur)) {
+    cur++;
+  }
+  if (cur == max_length()) {
+    return num_regions;
+  }
+  *res_idx = cur;
+  while (cur < max_length() && !is_available(cur)) {
+    cur++;
+  }
+  num_regions = cur - *res_idx;
+#ifdef ASSERT
+  for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
+    assert(!is_available(i), "just checking");
+  }
+  assert(cur == max_length() || num_regions == 0 || is_available(cur),
+         err_msg("The region at the current position %u must be available or at the end of the heap.", cur));
+#endif
+  return num_regions;
+}
+
+uint HeapRegionManager::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
+  return num_regions * worker_i / num_workers;
+}
+
+void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
+  const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
+
+  // Every worker will actually look at all regions, skipping over regions that
+  // are currently not committed.
+  // This also (potentially) iterates over regions newly allocated during GC. This
+  // is no problem except for some extra work.
+  for (uint count = 0; count < _allocated_heapregions_length; count++) {
+    const uint index = (start_index + count) % _allocated_heapregions_length;
+    assert(0 <= index && index < _allocated_heapregions_length, "sanity");
+    // Skip over unavailable regions
+    if (!is_available(index)) {
+      continue;
+    }
+    HeapRegion* r = _regions.get_by_index(index);
+    // We'll ignore "continues humongous" regions (we'll process them
+    // when we come across their corresponding "start humongous"
+    // region) and regions already claimed.
+    if (r->claim_value() == claim_value || r->continuesHumongous()) {
+      continue;
+    }
+    // OK, try to claim it
+    if (!r->claimHeapRegion(claim_value)) {
+      continue;
+    }
+    // Success!
+    if (r->startsHumongous()) {
+      // If the region is "starts humongous" we'll iterate over its
+      // "continues humongous" first; in fact we'll do them
+      // first. The order is important. In one case, calling the
+      // closure on the "starts humongous" region might de-allocate
+      // and clear all its "continues humongous" regions and, as a
+      // result, we might end up processing them twice. So, we'll do
+      // them first (note: most closures will ignore them anyway) and
+      // then we'll do the "starts humongous" region.
+      for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
+        HeapRegion* chr = _regions.get_by_index(ch_index);
+
+        assert(chr->continuesHumongous(), "Must be humongous region");
+        assert(chr->humongous_start_region() == r,
+               err_msg("Must work on humongous continuation of the original start region "
+                       PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
+        assert(chr->claim_value() != claim_value,
+               "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
+
+        bool claim_result = chr->claimHeapRegion(claim_value);
+        // We should always be able to claim it; no one else should
+        // be trying to claim this region.
+        guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
+
+        bool res2 = blk->doHeapRegion(chr);
+        if (res2) {
+          return;
+        }
+
+        // Right now, this holds (i.e., no closure that actually
+        // does something with "continues humongous" regions
+        // clears them). We might have to weaken it in the future,
+        // but let's leave these two asserts here for extra safety.
+        assert(chr->continuesHumongous(), "should still be the case");
+        assert(chr->humongous_start_region() == r, "sanity");
+      }
+    }
+
+    bool res = blk->doHeapRegion(r);
+    if (res) {
+      return;
+    }
+  }
+}
+
+uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
+  assert(length() > 0, "the region sequence should not be empty");
+  assert(length() <= _allocated_heapregions_length, "invariant");
+  assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
+  assert(num_regions_to_remove < length(), "We should never remove all regions");
+
+  if (num_regions_to_remove == 0) {
+    return 0;
+  }
+
+  uint removed = 0;
+  uint cur = _allocated_heapregions_length - 1;
+  uint idx_last_found = 0;
+  uint num_last_found = 0;
+
+  while ((removed < num_regions_to_remove) &&
+      (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
+    uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
+
+    uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
+
+    cur -= num_last_found;
+    removed += to_remove;
+  }
+
+  verify_optional();
+
+  return removed;
+}
+
+uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
+  guarantee(start_idx < _allocated_heapregions_length, "checking");
+  guarantee(res_idx != NULL, "checking");
+
+  uint num_regions_found = 0;
+
+  jlong cur = start_idx;
+  while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
+    cur--;
+  }
+  if (cur == -1) {
+    return num_regions_found;
+  }
+  jlong old_cur = cur;
+  // cur indexes the first empty region
+  while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
+    cur--;
+  }
+  *res_idx = cur + 1;
+  num_regions_found = old_cur - cur;
+
+#ifdef ASSERT
+  for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
+    assert(at(i)->is_empty(), "just checking");
+  }
+#endif
+  return num_regions_found;
+}
+
+void HeapRegionManager::verify() {
+  guarantee(length() <= _allocated_heapregions_length,
+            err_msg("invariant: _length: %u _allocated_length: %u",
+                    length(), _allocated_heapregions_length));
+  guarantee(_allocated_heapregions_length <= max_length(),
+            err_msg("invariant: _allocated_length: %u _max_length: %u",
+                    _allocated_heapregions_length, max_length()));
+
+  bool prev_committed = true;
+  uint num_committed = 0;
+  HeapWord* prev_end = heap_bottom();
+  for (uint i = 0; i < _allocated_heapregions_length; i++) {
+    if (!is_available(i)) {
+      prev_committed = false;
+      continue;
+    }
+    num_committed++;
+    HeapRegion* hr = _regions.get_by_index(i);
+    guarantee(hr != NULL, err_msg("invariant: i: %u", i));
+    guarantee(!prev_committed || hr->bottom() == prev_end,
+              err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
+                      i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
+    guarantee(hr->hrm_index() == i,
+              err_msg("invariant: i: %u hrm_index(): %u", i, hr->hrm_index()));
+    // Asserts will fire if i is >= _length
+    HeapWord* addr = hr->bottom();
+    guarantee(addr_to_region(addr) == hr, "sanity");
+    // We cannot check whether the region is part of a particular set: at the time
+    // this method may be called, we have only completed allocation of the regions,
+    // but not put into a region set.
+    prev_committed = true;
+    if (hr->startsHumongous()) {
+      prev_end = hr->orig_end();
+    } else {
+      prev_end = hr->end();
+    }
+  }
+  for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
+    guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
+  }
+
+  guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed));
+  _free_list.verify();
+}
+
+#ifndef PRODUCT
+void HeapRegionManager::verify_optional() {
+  verify();
+}
+#endif // PRODUCT
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionManager.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
+
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
+#include "gc_implementation/g1/heapRegionSet.hpp"
+
+class HeapRegion;
+class HeapRegionClosure;
+class FreeRegionList;
+
+class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
+ protected:
+  virtual HeapRegion* default_value() const { return NULL; }
+};
+
+// This class keeps track of the actual heap memory, auxiliary data
+// and its metadata (i.e., HeapRegion instances) and the list of free regions.
+//
+// This allows maximum flexibility for deciding what to commit or uncommit given
+// a request from outside.
+//
+// HeapRegions are kept in the _regions array in address order. A region's
+// index in the array corresponds to its index in the heap (i.e., 0 is the
+// region at the bottom of the heap, 1 is the one after it, etc.). Two
+// regions that are consecutive in the array should also be adjacent in the
+// address space (i.e., region(i).end() == region(i+1).bottom().
+//
+// We create a HeapRegion when we commit the region's address space
+// for the first time. When we uncommit the address space of a
+// region we retain the HeapRegion to be able to re-use it in the
+// future (in case we recommit it).
+//
+// We keep track of three lengths:
+//
+// * _num_committed (returned by length()) is the number of currently
+//   committed regions. These may not be contiguous.
+// * _allocated_heapregions_length (not exposed outside this class) is the
+//   number of regions+1 for which we have HeapRegions.
+// * max_length() returns the maximum number of regions the heap can have.
+//
+
+class HeapRegionManager: public CHeapObj<mtGC> {
+  friend class VMStructs;
+
+  G1HeapRegionTable _regions;
+
+  G1RegionToSpaceMapper* _heap_mapper;
+  G1RegionToSpaceMapper* _prev_bitmap_mapper;
+  G1RegionToSpaceMapper* _next_bitmap_mapper;
+  G1RegionToSpaceMapper* _bot_mapper;
+  G1RegionToSpaceMapper* _cardtable_mapper;
+  G1RegionToSpaceMapper* _card_counts_mapper;
+
+  FreeRegionList _free_list;
+
+  // Each bit in this bitmap indicates that the corresponding region is available
+  // for allocation.
+  BitMap _available_map;
+
+   // The number of regions committed in the heap.
+  uint _num_committed;
+
+  // Internal only. The highest heap region +1 we allocated a HeapRegion instance for.
+  uint _allocated_heapregions_length;
+
+   HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
+   HeapWord* heap_end() const {return _regions.end_address_mapped(); }
+
+  void make_regions_available(uint index, uint num_regions = 1);
+
+  // Pass down commit calls to the VirtualSpace.
+  void commit_regions(uint index, size_t num_regions = 1);
+  void uncommit_regions(uint index, size_t num_regions = 1);
+
+  // Notify other data structures about change in the heap layout.
+  void update_committed_space(HeapWord* old_end, HeapWord* new_end);
+  // Calculate the starting region for each worker during parallel iteration so
+  // that they do not all start from the same region.
+  uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
+
+  // Find a contiguous set of empty or uncommitted regions of length num and return
+  // the index of the first region or G1_NO_HRM_INDEX if the search was unsuccessful.
+  // If only_empty is true, only empty regions are considered.
+  // Searches from bottom to top of the heap, doing a first-fit.
+  uint find_contiguous(size_t num, bool only_empty);
+  // Finds the next sequence of unavailable regions starting from start_idx. Returns the
+  // length of the sequence found. If this result is zero, no such sequence could be found,
+  // otherwise res_idx indicates the start index of these regions.
+  uint find_unavailable_from_idx(uint start_idx, uint* res_idx) const;
+  // Finds the next sequence of empty regions starting from start_idx, going backwards in
+  // the heap. Returns the length of the sequence found. If this value is zero, no
+  // sequence could be found, otherwise res_idx contains the start index of this range.
+  uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
+  // Allocate a new HeapRegion for the given index.
+  HeapRegion* new_heap_region(uint hrm_index);
+#ifdef ASSERT
+public:
+  bool is_free(HeapRegion* hr) const;
+#endif
+  // Returns whether the given region is available for allocation.
+  bool is_available(uint region) const;
+
+ public:
+  // Empty constructor, we'll initialize it with the initialize() method.
+  HeapRegionManager() : _regions(), _heap_mapper(NULL), _num_committed(0),
+                    _next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL),
+                    _allocated_heapregions_length(0), _available_map(),
+                    _free_list("Free list", new MasterFreeRegionListMtSafeChecker())
+  { }
+
+  void initialize(G1RegionToSpaceMapper* heap_storage,
+                  G1RegionToSpaceMapper* prev_bitmap,
+                  G1RegionToSpaceMapper* next_bitmap,
+                  G1RegionToSpaceMapper* bot,
+                  G1RegionToSpaceMapper* cardtable,
+                  G1RegionToSpaceMapper* card_counts);
+
+  // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
+  // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
+  // the heap from the lowest address, this region (and its associated data
+  // structures) are available and we do not need to check further.
+  HeapRegion* get_dummy_region() { return new_heap_region(0); }
+
+  // Return the HeapRegion at the given index. Assume that the index
+  // is valid.
+  inline HeapRegion* at(uint index) const;
+
+  // If addr is within the committed space return its corresponding
+  // HeapRegion, otherwise return NULL.
+  inline HeapRegion* addr_to_region(HeapWord* addr) const;
+
+  // Insert the given region into the free region list.
+  inline void insert_into_free_list(HeapRegion* hr);
+
+  // Insert the given region list into the global free region list.
+  void insert_list_into_free_list(FreeRegionList* list) {
+    _free_list.add_ordered(list);
+  }
+
+  HeapRegion* allocate_free_region(bool is_old) {
+    HeapRegion* hr = _free_list.remove_region(is_old);
+
+    if (hr != NULL) {
+      assert(hr->next() == NULL, "Single region should not have next");
+      assert(is_available(hr->hrm_index()), "Must be committed");
+    }
+    return hr;
+  }
+
+  inline void allocate_free_regions_starting_at(uint first, uint num_regions);
+
+  // Remove all regions from the free list.
+  void remove_all_free_regions() {
+    _free_list.remove_all();
+  }
+
+  // Return the number of committed free regions in the heap.
+  uint num_free_regions() const {
+    return _free_list.length();
+  }
+
+  size_t total_capacity_bytes() const {
+    return num_free_regions() * HeapRegion::GrainBytes;
+  }
+
+  // Return the number of available (uncommitted) regions.
+  uint available() const { return max_length() - length(); }
+
+  // Return the number of regions that have been committed in the heap.
+  uint length() const { return _num_committed; }
+
+  // Return the maximum number of regions in the heap.
+  uint max_length() const { return (uint)_regions.length(); }
+
+  MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
+
+  // Expand the sequence to reflect that the heap has grown. Either create new
+  // HeapRegions, or re-use existing ones. Returns the number of regions the
+  // sequence was expanded by. If a HeapRegion allocation fails, the resulting
+  // number of regions might be smaller than what's desired.
+  uint expand_by(uint num_regions);
+
+  // Makes sure that the regions from start to start+num_regions-1 are available
+  // for allocation. Returns the number of regions that were committed to achieve
+  // this.
+  uint expand_at(uint start, uint num_regions);
+
+  // Find a contiguous set of empty regions of length num. Returns the start index of
+  // that set, or G1_NO_HRM_INDEX.
+  uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
+  // Find a contiguous set of empty or unavailable regions of length num. Returns the
+  // start index of that set, or G1_NO_HRM_INDEX.
+  uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
+
+  HeapRegion* next_region_in_heap(const HeapRegion* r) const;
+
+  // Apply blk->doHeapRegion() on all committed regions in address order,
+  // terminating the iteration early if doHeapRegion() returns true.
+  void iterate(HeapRegionClosure* blk) const;
+
+  void par_iterate(HeapRegionClosure* blk, uint worker_id, uint no_of_par_workers, jint claim_value) const;
+
+  // Uncommit up to num_regions_to_remove regions that are completely free.
+  // Return the actual number of uncommitted regions.
+  uint shrink_by(uint num_regions_to_remove);
+
+  void verify();
+
+  // Do some sanity checking.
+  void verify_optional() PRODUCT_RETURN;
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionManager.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP
+
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/heapRegionManager.hpp"
+#include "gc_implementation/g1/heapRegionSet.inline.hpp"
+
+inline HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const {
+  assert(addr < heap_end(),
+        err_msg("addr: "PTR_FORMAT" end: "PTR_FORMAT, p2i(addr), p2i(heap_end())));
+  assert(addr >= heap_bottom(),
+        err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom())));
+
+  HeapRegion* hr = _regions.get_by_address(addr);
+  return hr;
+}
+
+inline HeapRegion* HeapRegionManager::at(uint index) const {
+  assert(is_available(index), "pre-condition");
+  HeapRegion* hr = _regions.get_by_index(index);
+  assert(hr != NULL, "sanity");
+  assert(hr->hrm_index() == index, "sanity");
+  return hr;
+}
+
+inline void HeapRegionManager::insert_into_free_list(HeapRegion* hr) {
+  _free_list.add_ordered(hr);
+}
+
+inline void HeapRegionManager::allocate_free_regions_starting_at(uint first, uint num_regions) {
+  _free_list.remove_starting_at(at(first), num_regions);
+}
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -27,7 +27,7 @@
 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
 #include "memory/allocation.hpp"
 #include "memory/padded.inline.hpp"
 #include "memory/space.inline.hpp"
@@ -169,7 +169,7 @@
 
   // Mem size in bytes.
   size_t mem_size() const {
-    return sizeof(this) + _bm.size_in_words() * HeapWordSize;
+    return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
   }
 
   // Requires "from" to be in "hr()".
@@ -288,7 +288,7 @@
   }
 
   _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
-                        mtGC, 0, AllocFailStrategy::RETURN_NULL);
+                        mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
 
   if (_fine_grain_regions == NULL) {
     vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
@@ -372,17 +372,17 @@
                                                        _max_regions,
                                                        &_static_mem_size);
 
-  for (uint i = 0; i < n_par_rs; i++) {
-    for (uint j = 0; j < _max_regions; j++) {
-      set(i, j, InvalidCard);
-    }
-  }
+  invalidate(0, _max_regions);
 }
 
-void FromCardCache::shrink(uint new_num_regions) {
+void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
+  guarantee((size_t)start_idx + new_num_regions <= max_uintx,
+            err_msg("Trying to invalidate beyond maximum region, from %u size "SIZE_FORMAT,
+                    start_idx, new_num_regions));
   for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
-    assert(new_num_regions <= _max_regions, "Must be within max.");
-    for (uint j = new_num_regions; j < _max_regions; j++) {
+    uint end_idx = (start_idx + (uint)new_num_regions);
+    assert(end_idx <= _max_regions, "Must be within max.");
+    for (uint j = start_idx; j < end_idx; j++) {
       set(i, j, InvalidCard);
     }
   }
@@ -406,12 +406,12 @@
   }
 }
 
-void OtherRegionsTable::init_from_card_cache(uint max_regions) {
+void OtherRegionsTable::initialize(uint max_regions) {
   FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
 }
 
-void OtherRegionsTable::shrink_from_card_cache(uint new_num_regions) {
-  FromCardCache::shrink(new_num_regions);
+void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
+  FromCardCache::invalidate(start_idx, num_regions);
 }
 
 void OtherRegionsTable::print_from_card_cache() {
@@ -419,7 +419,7 @@
 }
 
 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
-  uint cur_hrs_ind = hr()->hrs_index();
+  uint cur_hrm_ind = hr()->hrm_index();
 
   if (G1TraceHeapRegionRememberedSet) {
     gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
@@ -434,10 +434,10 @@
   if (G1TraceHeapRegionRememberedSet) {
     gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = "INT32_FORMAT")",
                   hr()->bottom(), from_card,
-                  FromCardCache::at((uint)tid, cur_hrs_ind));
+                  FromCardCache::at((uint)tid, cur_hrm_ind));
   }
 
-  if (FromCardCache::contains_or_replace((uint)tid, cur_hrs_ind, from_card)) {
+  if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) {
     if (G1TraceHeapRegionRememberedSet) {
       gclog_or_tty->print_cr("  from-card cache hit.");
     }
@@ -447,10 +447,10 @@
 
   // Note that this may be a continued H region.
   HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
-  RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
+  RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
 
   // If the region is already coarsened, return.
-  if (_coarse_map.at(from_hrs_ind)) {
+  if (_coarse_map.at(from_hrm_ind)) {
     if (G1TraceHeapRegionRememberedSet) {
       gclog_or_tty->print_cr("  coarse map hit.");
     }
@@ -459,7 +459,7 @@
   }
 
   // Otherwise find a per-region table to add it to.
-  size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
+  size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
   PerRegionTable* prt = find_region_table(ind, from_hr);
   if (prt == NULL) {
     MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
@@ -474,7 +474,7 @@
       assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
              "Must be in range.");
       if (G1HRRSUseSparseTable &&
-          _sparse_table.add_card(from_hrs_ind, card_index)) {
+          _sparse_table.add_card(from_hrm_ind, card_index)) {
         if (G1RecordHRRSOops) {
           HeapRegionRemSet::record(hr(), from);
           if (G1TraceHeapRegionRememberedSet) {
@@ -493,8 +493,8 @@
       } else {
         if (G1TraceHeapRegionRememberedSet) {
           gclog_or_tty->print_cr("   [tid %d] sparse table entry "
-                        "overflow(f: %d, t: %d)",
-                        tid, from_hrs_ind, cur_hrs_ind);
+                        "overflow(f: %d, t: %u)",
+                        tid, from_hrm_ind, cur_hrm_ind);
         }
       }
 
@@ -515,7 +515,7 @@
 
       if (G1HRRSUseSparseTable) {
         // Transfer from sparse to fine-grain.
-        SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
+        SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
         assert(sprt_entry != NULL, "There should have been an entry");
         for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
           CardIdx_t c = sprt_entry->card(i);
@@ -524,7 +524,7 @@
           }
         }
         // Now we can delete the sparse entry.
-        bool res = _sparse_table.delete_entry(from_hrs_ind);
+        bool res = _sparse_table.delete_entry(from_hrm_ind);
         assert(res, "It should have been there.");
       }
     }
@@ -606,9 +606,9 @@
   guarantee(max != NULL, "Since _n_fine_entries > 0");
 
   // Set the corresponding coarse bit.
-  size_t max_hrs_index = (size_t) max->hr()->hrs_index();
-  if (!_coarse_map.at(max_hrs_index)) {
-    _coarse_map.at_put(max_hrs_index, true);
+  size_t max_hrm_index = (size_t) max->hr()->hrm_index();
+  if (!_coarse_map.at(max_hrm_index)) {
+    _coarse_map.at_put(max_hrm_index, true);
     _n_coarse_entries++;
     if (G1TraceHeapRegionRememberedSet) {
       gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
@@ -632,7 +632,7 @@
                               BitMap* region_bm, BitMap* card_bm) {
   // First eliminated garbage regions from the coarse map.
   if (G1RSScrubVerbose) {
-    gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrs_index());
+    gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrm_index());
   }
 
   assert(_coarse_map.size() == region_bm->size(), "Precondition");
@@ -655,9 +655,9 @@
       // If the entire region is dead, eliminate.
       if (G1RSScrubVerbose) {
         gclog_or_tty->print_cr("     For other region %u:",
-                               cur->hr()->hrs_index());
+                               cur->hr()->hrm_index());
       }
-      if (!region_bm->at((size_t) cur->hr()->hrs_index())) {
+      if (!region_bm->at((size_t) cur->hr()->hrm_index())) {
         *prev = nxt;
         cur->set_collision_list_next(NULL);
         _n_fine_entries--;
@@ -694,6 +694,9 @@
   clear_fcc();
 }
 
+bool OtherRegionsTable::is_empty() const {
+  return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL;
+}
 
 size_t OtherRegionsTable::occupied() const {
   size_t sum = occ_fine();
@@ -735,7 +738,7 @@
   sum += (sizeof(PerRegionTable*) * _max_fine_entries);
   sum += (_coarse_map.size_in_words() * HeapWordSize);
   sum += (_sparse_table.mem_size());
-  sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above.
+  sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above.
   return sum;
 }
 
@@ -748,7 +751,7 @@
 }
 
 void OtherRegionsTable::clear_fcc() {
-  FromCardCache::clear(hr()->hrs_index());
+  FromCardCache::clear(hr()->hrm_index());
 }
 
 void OtherRegionsTable::clear() {
@@ -770,30 +773,6 @@
   clear_fcc();
 }
 
-void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
-  MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
-  size_t hrs_ind = (size_t) from_hr->hrs_index();
-  size_t ind = hrs_ind & _mod_max_fine_entries_mask;
-  if (del_single_region_table(ind, from_hr)) {
-    assert(!_coarse_map.at(hrs_ind), "Inv");
-  } else {
-    _coarse_map.par_at_put(hrs_ind, 0);
-  }
-  // Check to see if any of the fcc entries come from here.
-  uint hr_ind = hr()->hrs_index();
-  for (uint tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
-    int fcc_ent = FromCardCache::at(tid, hr_ind);
-    if (fcc_ent != FromCardCache::InvalidCard) {
-      HeapWord* card_addr = (HeapWord*)
-        (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift);
-      if (hr()->is_in_reserved(card_addr)) {
-        // Clear the from card cache.
-        FromCardCache::set(tid, hr_ind, FromCardCache::InvalidCard);
-      }
-    }
-  }
-}
-
 bool OtherRegionsTable::del_single_region_table(size_t ind,
                                                 HeapRegion* hr) {
   assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
@@ -823,8 +802,7 @@
 
 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
   HeapRegion* hr = _g1h->heap_region_containing_raw(from);
-  if (hr == NULL) return false;
-  RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
+  RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
   // Is this region in the coarse map?
   if (_coarse_map.at(hr_ind)) return true;
 
@@ -861,8 +839,8 @@
 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                                    HeapRegion* hr)
   : _bosa(bosa),
-    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #"UINT32_FORMAT, hr->hrs_index()), true),
-    _code_roots(), _other_regions(hr, &_m) {
+    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true),
+    _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
   reset_for_par_iteration();
 }
 
@@ -945,123 +923,61 @@
 }
 
 // Code roots support
+//
+// The code root set is protected by two separate locking schemes
+// When at safepoint the per-hrrs lock must be held during modifications
+// except when doing a full gc.
+// When not at safepoint the CodeCache_lock must be held during modifications.
+// When concurrent readers access the contains() function
+// (during the evacuation phase) no removals are allowed.
 
 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
   assert(nm != NULL, "sanity");
+  // Optimistic unlocked contains-check
+  if (!_code_roots.contains(nm)) {
+    MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
+    add_strong_code_root_locked(nm);
+  }
+}
+
+void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
+  assert(nm != NULL, "sanity");
   _code_roots.add(nm);
 }
 
 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
   assert(nm != NULL, "sanity");
+  assert_locked_or_safepoint(CodeCache_lock);
+
+  MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag);
   _code_roots.remove(nm);
+
   // Check that there were no duplicates
   guarantee(!_code_roots.contains(nm), "duplicate entry found");
 }
 
-class NMethodMigrationOopClosure : public OopClosure {
-  G1CollectedHeap* _g1h;
-  HeapRegion* _from;
-  nmethod* _nm;
-
-  uint _num_self_forwarded;
-
-  template <class T> void do_oop_work(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-      if (_from->is_in(obj)) {
-        // Reference still points into the source region.
-        // Since roots are immediately evacuated this means that
-        // we must have self forwarded the object
-        assert(obj->is_forwarded(),
-               err_msg("code roots should be immediately evacuated. "
-                       "Ref: "PTR_FORMAT", "
-                       "Obj: "PTR_FORMAT", "
-                       "Region: "HR_FORMAT,
-                       p, (void*) obj, HR_FORMAT_PARAMS(_from)));
-        assert(obj->forwardee() == obj,
-               err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
-
-        // The object has been self forwarded.
-        // Note, if we're during an initial mark pause, there is
-        // no need to explicitly mark object. It will be marked
-        // during the regular evacuation failure handling code.
-        _num_self_forwarded++;
-      } else {
-        // The reference points into a promotion or to-space region
-        HeapRegion* to = _g1h->heap_region_containing(obj);
-        to->rem_set()->add_strong_code_root(_nm);
-      }
-    }
-  }
-
-public:
-  NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
-    _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
-
-  void do_oop(narrowOop* p) { do_oop_work(p); }
-  void do_oop(oop* p)       { do_oop_work(p); }
-
-  uint retain() { return _num_self_forwarded > 0; }
-};
-
-void HeapRegionRemSet::migrate_strong_code_roots() {
-  assert(hr()->in_collection_set(), "only collection set regions");
-  assert(!hr()->isHumongous(),
-         err_msg("humongous region "HR_FORMAT" should not have been added to the collection set",
-                 HR_FORMAT_PARAMS(hr())));
-
-  ResourceMark rm;
-
-  // List of code blobs to retain for this region
-  GrowableArray<nmethod*> to_be_retained(10);
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  while (!_code_roots.is_empty()) {
-    nmethod *nm = _code_roots.pop();
-    if (nm != NULL) {
-      NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
-      nm->oops_do(&oop_cl);
-      if (oop_cl.retain()) {
-        to_be_retained.push(nm);
-      }
-    }
-  }
-
-  // Now push any code roots we need to retain
-  assert(to_be_retained.is_empty() || hr()->evacuation_failed(),
-         "Retained nmethod list must be empty or "
-         "evacuation of this region failed");
-
-  while (to_be_retained.is_nonempty()) {
-    nmethod* nm = to_be_retained.pop();
-    assert(nm != NULL, "sanity");
-    add_strong_code_root(nm);
-  }
-}
-
 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
   _code_roots.nmethods_do(blk);
 }
 
+void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) {
+  _code_roots.clean(hr);
+}
+
 size_t HeapRegionRemSet::strong_code_roots_mem_size() {
   return _code_roots.mem_size();
 }
 
-//-------------------- Iteration --------------------
-
 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
   _hrrs(hrrs),
   _g1h(G1CollectedHeap::heap()),
   _coarse_map(&hrrs->_other_regions._coarse_map),
-  _fine_grain_regions(hrrs->_other_regions._fine_grain_regions),
   _bosa(hrrs->bosa()),
   _is(Sparse),
   // Set these values so that we increment to the first region.
   _coarse_cur_region_index(-1),
   _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
-  _cur_region_cur_card(0),
-  _fine_array_index(-1),
+  _cur_card_in_prt(HeapRegion::CardsPerRegion),
   _fine_cur_prt(NULL),
   _n_yielded_coarse(0),
   _n_yielded_fine(0),
@@ -1093,58 +1009,59 @@
   return true;
 }
 
-void HeapRegionRemSetIterator::fine_find_next_non_null_prt() {
-  // Otherwise, find the next bucket list in the array.
-  _fine_array_index++;
-  while (_fine_array_index < (int) OtherRegionsTable::_max_fine_entries) {
-    _fine_cur_prt = _fine_grain_regions[_fine_array_index];
-    if (_fine_cur_prt != NULL) return;
-    else _fine_array_index++;
-  }
-  assert(_fine_cur_prt == NULL, "Loop post");
-}
-
 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
   if (fine_has_next()) {
-    _cur_region_cur_card =
-      _fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1);
+    _cur_card_in_prt =
+      _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
   }
-  while (!fine_has_next()) {
-    if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) {
-      _cur_region_cur_card = 0;
-      _fine_cur_prt = _fine_cur_prt->collision_list_next();
+  if (_cur_card_in_prt == HeapRegion::CardsPerRegion) {
+    // _fine_cur_prt may still be NULL in case if there are not PRTs at all for
+    // the remembered set.
+    if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) {
+      return false;
     }
-    if (_fine_cur_prt == NULL) {
-      fine_find_next_non_null_prt();
-      if (_fine_cur_prt == NULL) return false;
-    }
-    assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0,
-           "inv.");
-    HeapWord* r_bot =
-      _fine_cur_prt->hr()->bottom();
-    _cur_region_card_offset = _bosa->index_for(r_bot);
-    _cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0);
+    PerRegionTable* next_prt = _fine_cur_prt->next();
+    switch_to_prt(next_prt);
+    _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
   }
-  assert(fine_has_next(), "Or else we exited the loop via the return.");
-  card_index = _cur_region_card_offset + _cur_region_cur_card;
+
+  card_index = _cur_region_card_offset + _cur_card_in_prt;
+  guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion,
+            err_msg("Card index "SIZE_FORMAT" must be within the region", _cur_card_in_prt));
   return true;
 }
 
 bool HeapRegionRemSetIterator::fine_has_next() {
-  return
-    _fine_cur_prt != NULL &&
-    _cur_region_cur_card < HeapRegion::CardsPerRegion;
+  return _cur_card_in_prt != HeapRegion::CardsPerRegion;
+}
+
+void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
+  assert(prt != NULL, "Cannot switch to NULL prt");
+  _fine_cur_prt = prt;
+
+  HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
+  _cur_region_card_offset = _bosa->index_for(r_bot);
+
+  // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
+  // To avoid special-casing this start case, and not miss the first bitmap
+  // entry, initialize _cur_region_cur_card with -1 instead of 0.
+  _cur_card_in_prt = (size_t)-1;
 }
 
 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
   switch (_is) {
-  case Sparse:
+  case Sparse: {
     if (_sparse_iter.has_next(card_index)) {
       _n_yielded_sparse++;
       return true;
     }
     // Otherwise, deliberate fall-through
     _is = Fine;
+    PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts;
+    if (initial_fine_prt != NULL) {
+      switch_to_prt(_hrrs->_other_regions._first_all_fine_prts);
+    }
+  }
   case Fine:
     if (fine_has_next(card_index)) {
       _n_yielded_fine++;
@@ -1276,6 +1193,11 @@
 #ifndef PRODUCT
 void PerRegionTable::test_fl_mem_size() {
   PerRegionTable* dummy = alloc(NULL);
+
+  size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize;
+  assert(dummy->mem_size() > min_prt_size,
+         err_msg("PerRegionTable memory usage is suspiciously small, only has "SIZE_FORMAT" bytes. "
+                 "Should be at least "SIZE_FORMAT" bytes.", dummy->mem_size(), min_prt_size));
   free(dummy);
   guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
   // try to reset the state
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -84,7 +84,7 @@
 
   static void initialize(uint n_par_rs, uint max_num_regions);
 
-  static void shrink(uint new_num_regions);
+  static void invalidate(uint start_idx, size_t num_regions);
 
   static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN;
 
@@ -185,6 +185,9 @@
   // objects.
   void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
 
+  // Returns whether this remembered set (and all sub-sets) contain no entries.
+  bool is_empty() const;
+
   size_t occupied() const;
   size_t occ_fine() const;
   size_t occ_coarse() const;
@@ -206,18 +209,15 @@
   // Specifically clear the from_card_cache.
   void clear_fcc();
 
-  // "from_hr" is being cleared; remove any entries from it.
-  void clear_incoming_entry(HeapRegion* from_hr);
-
   void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
 
   // Declare the heap size (in # of regions) to the OtherRegionsTable.
   // (Uses it to initialize from_card_cache).
-  static void init_from_card_cache(uint max_regions);
+  static void initialize(uint max_regions);
 
-  // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
-  // Make sure any entries for higher regions are invalid.
-  static void shrink_from_card_cache(uint new_num_regions);
+  // Declares that regions between start_idx <= i < start_idx + num_regions are
+  // not in use. Make sure that any entries for these regions are invalid.
+  static void invalidate(uint start_idx, size_t num_regions);
 
   static void print_from_card_cache();
 };
@@ -272,6 +272,10 @@
     return _other_regions.hr();
   }
 
+  bool is_empty() const {
+    return (strong_code_roots_list_length() == 0) && _other_regions.is_empty();
+  }
+
   size_t occupied() {
     MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
     return occupied_locked();
@@ -342,7 +346,7 @@
     return _other_regions.mem_size()
       // This correction is necessary because the above includes the second
       // part.
-      + (sizeof(this) - sizeof(OtherRegionsTable))
+      + (sizeof(HeapRegionRemSet) - sizeof(OtherRegionsTable))
       + strong_code_roots_mem_size();
   }
 
@@ -355,7 +359,7 @@
   // Returns the memory occupancy of all free_list data structures associated
   // with remembered sets.
   static size_t fl_mem_size() {
-    return OtherRegionsTable::fl_mem_size() + G1CodeRootSet::fl_mem_size();
+    return OtherRegionsTable::fl_mem_size();
   }
 
   bool contains_reference(OopOrNarrowOopStar from) const {
@@ -365,20 +369,17 @@
   // Routines for managing the list of code roots that point into
   // the heap region that owns this RSet.
   void add_strong_code_root(nmethod* nm);
+  void add_strong_code_root_locked(nmethod* nm);
   void remove_strong_code_root(nmethod* nm);
 
-  // During a collection, migrate the successfully evacuated strong
-  // code roots that referenced into the region that owns this RSet
-  // to the RSets of the new regions that they now point into.
-  // Unsuccessfully evacuated code roots are not migrated.
-  void migrate_strong_code_roots();
-
   // Applies blk->do_code_blob() to each of the entries in
   // the strong code roots list
   void strong_code_roots_do(CodeBlobClosure* blk) const;
 
+  void clean_strong_code_roots(HeapRegion* hr);
+
   // Returns the number of elements in the strong code roots list
-  size_t strong_code_roots_list_length() {
+  size_t strong_code_roots_list_length() const {
     return _code_roots.length();
   }
 
@@ -400,13 +401,11 @@
   // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
   // (Uses it to initialize from_card_cache).
   static void init_heap(uint max_regions) {
-    G1CodeRootSet::initialize();
-    OtherRegionsTable::init_from_card_cache(max_regions);
+    OtherRegionsTable::initialize(max_regions);
   }
 
-  // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
-  static void shrink_heap(uint new_n_regs) {
-    OtherRegionsTable::shrink_from_card_cache(new_n_regs);
+  static void invalidate(uint start_idx, uint num_regions) {
+    OtherRegionsTable::invalidate(start_idx, num_regions);
   }
 
 #ifndef PRODUCT
@@ -433,26 +432,24 @@
 };
 
 class HeapRegionRemSetIterator : public StackObj {
-
-  // The region RSet over which we're iterating.
+ private:
+  // The region RSet over which we are iterating.
   HeapRegionRemSet* _hrrs;
 
   // Local caching of HRRS fields.
   const BitMap*             _coarse_map;
-  PerRegionTable**          _fine_grain_regions;
 
   G1BlockOffsetSharedArray* _bosa;
   G1CollectedHeap*          _g1h;
 
-  // The number yielded since initialization.
+  // The number of cards yielded since initialization.
   size_t _n_yielded_fine;
   size_t _n_yielded_coarse;
   size_t _n_yielded_sparse;
 
-  // Indicates what granularity of table that we're currently iterating over.
+  // Indicates what granularity of table that we are currently iterating over.
   // We start iterating over the sparse table, progress to the fine grain
   // table, and then finish with the coarse table.
-  // See HeapRegionRemSetIterator::has_next().
   enum IterState {
     Sparse,
     Fine,
@@ -460,38 +457,30 @@
   };
   IterState _is;
 
-  // In both kinds of iteration, heap offset of first card of current
-  // region.
+  // For both Coarse and Fine remembered set iteration this contains the
+  // first card number of the heap region we currently iterate over.
   size_t _cur_region_card_offset;
-  // Card offset within cur region.
-  size_t _cur_region_cur_card;
 
-  // Coarse table iteration fields:
-
-  // Current region index;
+  // Current region index for the Coarse remembered set iteration.
   int    _coarse_cur_region_index;
   size_t _coarse_cur_region_cur_card;
 
   bool coarse_has_next(size_t& card_index);
 
-  // Fine table iteration fields:
-
-  // Index of bucket-list we're working on.
-  int _fine_array_index;
+  // The PRT we are currently iterating over.
+  PerRegionTable* _fine_cur_prt;
+  // Card offset within the current PRT.
+  size_t _cur_card_in_prt;
 
-  // Per Region Table we're doing within current bucket list.
-  PerRegionTable* _fine_cur_prt;
-
-  /* SparsePRT::*/ SparsePRTIter _sparse_iter;
-
-  void fine_find_next_non_null_prt();
-
+  // Update internal variables when switching to the given PRT.
+  void switch_to_prt(PerRegionTable* prt);
   bool fine_has_next();
   bool fine_has_next(size_t& card_index);
 
-public:
-  // We require an iterator to be initialized before use, so the
-  // constructor does little.
+  // The Sparse remembered set iterator.
+  SparsePRTIter _sparse_iter;
+
+ public:
   HeapRegionRemSetIterator(HeapRegionRemSet* hrrs);
 
   // If there remains one or more cards to be yielded, returns true and
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,261 +0,0 @@
-/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc_implementation/g1/heapRegion.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
-#include "gc_implementation/g1/heapRegionSet.hpp"
-#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
-#include "memory/allocation.hpp"
-
-// Private
-
-uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
-  uint len = length();
-  assert(num > 1, "use this only for sequences of length 2 or greater");
-  assert(from <= len,
-         err_msg("from: %u should be valid and <= than %u", from, len));
-
-  uint curr = from;
-  uint first = G1_NULL_HRS_INDEX;
-  uint num_so_far = 0;
-  while (curr < len && num_so_far < num) {
-    if (at(curr)->is_empty()) {
-      if (first == G1_NULL_HRS_INDEX) {
-        first = curr;
-        num_so_far = 1;
-      } else {
-        num_so_far += 1;
-      }
-    } else {
-      first = G1_NULL_HRS_INDEX;
-      num_so_far = 0;
-    }
-    curr += 1;
-  }
-  assert(num_so_far <= num, "post-condition");
-  if (num_so_far == num) {
-    // we found enough space for the humongous object
-    assert(from <= first && first < len, "post-condition");
-    assert(first < curr && (curr - first) == num, "post-condition");
-    for (uint i = first; i < first + num; ++i) {
-      assert(at(i)->is_empty(), "post-condition");
-    }
-    return first;
-  } else {
-    // we failed to find enough space for the humongous object
-    return G1_NULL_HRS_INDEX;
-  }
-}
-
-// Public
-
-void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
-  assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
-         "bottom should be heap region aligned");
-  assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
-         "end should be heap region aligned");
-
-  _next_search_index = 0;
-  _allocated_length = 0;
-
-  _regions.initialize(bottom, end, HeapRegion::GrainBytes);
-}
-
-MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
-                                   HeapWord* new_end,
-                                   FreeRegionList* list) {
-  assert(old_end < new_end, "don't call it otherwise");
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  HeapWord* next_bottom = old_end;
-  assert(heap_bottom() <= next_bottom, "invariant");
-  while (next_bottom < new_end) {
-    assert(next_bottom < heap_end(), "invariant");
-    uint index = length();
-
-    assert(index < max_length(), "otherwise we cannot expand further");
-    if (index == 0) {
-      // We have not allocated any regions so far
-      assert(next_bottom == heap_bottom(), "invariant");
-    } else {
-      // next_bottom should match the end of the last/previous region
-      assert(next_bottom == at(index - 1)->end(), "invariant");
-    }
-
-    if (index == _allocated_length) {
-      // We have to allocate a new HeapRegion.
-      HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
-      if (new_hr == NULL) {
-        // allocation failed, we bail out and return what we have done so far
-        return MemRegion(old_end, next_bottom);
-      }
-      assert(_regions.get_by_index(index) == NULL, "invariant");
-      _regions.set_by_index(index, new_hr);
-      increment_allocated_length();
-    }
-    // Have to increment the length first, otherwise we will get an
-    // assert failure at(index) below.
-    increment_length();
-    HeapRegion* hr = at(index);
-    list->add_as_tail(hr);
-
-    next_bottom = hr->end();
-  }
-  assert(next_bottom == new_end, "post-condition");
-  return MemRegion(old_end, next_bottom);
-}
-
-uint HeapRegionSeq::free_suffix() {
-  uint res = 0;
-  uint index = length();
-  while (index > 0) {
-    index -= 1;
-    if (!at(index)->is_empty()) {
-      break;
-    }
-    res += 1;
-  }
-  return res;
-}
-
-uint HeapRegionSeq::find_contiguous(uint num) {
-  assert(num > 1, "use this only for sequences of length 2 or greater");
-  assert(_next_search_index <= length(),
-         err_msg("_next_search_index: %u should be valid and <= than %u",
-                 _next_search_index, length()));
-
-  uint start = _next_search_index;
-  uint res = find_contiguous_from(start, num);
-  if (res == G1_NULL_HRS_INDEX && start > 0) {
-    // Try starting from the beginning. If _next_search_index was 0,
-    // no point in doing this again.
-    res = find_contiguous_from(0, num);
-  }
-  if (res != G1_NULL_HRS_INDEX) {
-    assert(res < length(), err_msg("res: %u should be valid", res));
-    _next_search_index = res + num;
-    assert(_next_search_index <= length(),
-           err_msg("_next_search_index: %u should be valid and <= than %u",
-                   _next_search_index, length()));
-  }
-  return res;
-}
-
-void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
-  iterate_from((HeapRegion*) NULL, blk);
-}
-
-void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
-  uint hr_index = 0;
-  if (hr != NULL) {
-    hr_index = hr->hrs_index();
-  }
-
-  uint len = length();
-  for (uint i = hr_index; i < len; i += 1) {
-    bool res = blk->doHeapRegion(at(i));
-    if (res) {
-      blk->incomplete();
-      return;
-    }
-  }
-  for (uint i = 0; i < hr_index; i += 1) {
-    bool res = blk->doHeapRegion(at(i));
-    if (res) {
-      blk->incomplete();
-      return;
-    }
-  }
-}
-
-uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
-  // Reset this in case it's currently pointing into the regions that
-  // we just removed.
-  _next_search_index = 0;
-
-  assert(length() > 0, "the region sequence should not be empty");
-  assert(length() <= _allocated_length, "invariant");
-  assert(_allocated_length > 0, "we should have at least one region committed");
-  assert(num_regions_to_remove < length(), "We should never remove all regions");
-
-  uint i = 0;
-  for (; i < num_regions_to_remove; i++) {
-    HeapRegion* cur = at(length() - 1);
-
-    if (!cur->is_empty()) {
-      // We have to give up if the region can not be moved
-      break;
-  }
-    assert(!cur->isHumongous(), "Humongous regions should not be empty");
-
-    decrement_length();
-  }
-  return i;
-}
-
-#ifndef PRODUCT
-void HeapRegionSeq::verify_optional() {
-  guarantee(length() <= _allocated_length,
-            err_msg("invariant: _length: %u _allocated_length: %u",
-                    length(), _allocated_length));
-  guarantee(_allocated_length <= max_length(),
-            err_msg("invariant: _allocated_length: %u _max_length: %u",
-                    _allocated_length, max_length()));
-  guarantee(_next_search_index <= length(),
-            err_msg("invariant: _next_search_index: %u _length: %u",
-                    _next_search_index, length()));
-
-  HeapWord* prev_end = heap_bottom();
-  for (uint i = 0; i < _allocated_length; i += 1) {
-    HeapRegion* hr = _regions.get_by_index(i);
-    guarantee(hr != NULL, err_msg("invariant: i: %u", i));
-    guarantee(hr->bottom() == prev_end,
-              err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
-                      i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
-    guarantee(hr->hrs_index() == i,
-              err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
-    if (i < length()) {
-      // Asserts will fire if i is >= _length
-      HeapWord* addr = hr->bottom();
-      guarantee(addr_to_region(addr) == hr, "sanity");
-      guarantee(addr_to_region_unsafe(addr) == hr, "sanity");
-    } else {
-      guarantee(hr->is_empty(), "sanity");
-      guarantee(!hr->isHumongous(), "sanity");
-      // using assert instead of guarantee here since containing_set()
-      // is only available in non-product builds.
-      assert(hr->containing_set() == NULL, "sanity");
-    }
-    if (hr->startsHumongous()) {
-      prev_end = hr->orig_end();
-    } else {
-      prev_end = hr->end();
-    }
-  }
-  for (uint i = _allocated_length; i < max_length(); i += 1) {
-    guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
-  }
-}
-#endif // PRODUCT
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
-
-#include "gc_implementation/g1/g1BiasedArray.hpp"
-
-class HeapRegion;
-class HeapRegionClosure;
-class FreeRegionList;
-
-class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
- protected:
-   virtual HeapRegion* default_value() const { return NULL; }
-};
-
-// This class keeps track of the region metadata (i.e., HeapRegion
-// instances). They are kept in the _regions array in address
-// order. A region's index in the array corresponds to its index in
-// the heap (i.e., 0 is the region at the bottom of the heap, 1 is
-// the one after it, etc.). Two regions that are consecutive in the
-// array should also be adjacent in the address space (i.e.,
-// region(i).end() == region(i+1).bottom().
-//
-// We create a HeapRegion when we commit the region's address space
-// for the first time. When we uncommit the address space of a
-// region we retain the HeapRegion to be able to re-use it in the
-// future (in case we recommit it).
-//
-// We keep track of three lengths:
-//
-// * _committed_length (returned by length()) is the number of currently
-//   committed regions.
-// * _allocated_length (not exposed outside this class) is the
-//   number of regions for which we have HeapRegions.
-// * max_length() returns the maximum number of regions the heap can have.
-//
-// and maintain that: _committed_length <= _allocated_length <= max_length()
-
-class HeapRegionSeq: public CHeapObj<mtGC> {
-  friend class VMStructs;
-
-  G1HeapRegionTable _regions;
-
-  // The number of regions committed in the heap.
-  uint _committed_length;
-
-  // A hint for which index to start searching from for humongous
-  // allocations.
-  uint _next_search_index;
-
-  // The number of regions for which we have allocated HeapRegions for.
-  uint _allocated_length;
-
-  // Find a contiguous set of empty regions of length num, starting
-  // from the given index.
-  uint find_contiguous_from(uint from, uint num);
-
-  void increment_allocated_length() {
-    assert(_allocated_length < max_length(), "pre-condition");
-    _allocated_length++;
-  }
-
-  void increment_length() {
-    assert(length() < max_length(), "pre-condition");
-    _committed_length++;
-  }
-
-  void decrement_length() {
-    assert(length() > 0, "pre-condition");
-    _committed_length--;
-  }
-
-  HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
-  HeapWord* heap_end() const {return _regions.end_address_mapped(); }
-
- public:
-  // Empty contructor, we'll initialize it with the initialize() method.
-  HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
-
-  void initialize(HeapWord* bottom, HeapWord* end);
-
-  // Return the HeapRegion at the given index. Assume that the index
-  // is valid.
-  inline HeapRegion* at(uint index) const;
-
-  // If addr is within the committed space return its corresponding
-  // HeapRegion, otherwise return NULL.
-  inline HeapRegion* addr_to_region(HeapWord* addr) const;
-
-  // Return the HeapRegion that corresponds to the given
-  // address. Assume the address is valid.
-  inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
-
-  // Return the number of regions that have been committed in the heap.
-  uint length() const { return _committed_length; }
-
-  // Return the maximum number of regions in the heap.
-  uint max_length() const { return (uint)_regions.length(); }
-
-  // Expand the sequence to reflect that the heap has grown from
-  // old_end to new_end. Either create new HeapRegions, or re-use
-  // existing ones, and return them in the given list. Returns the
-  // memory region that covers the newly-created regions. If a
-  // HeapRegion allocation fails, the result memory region might be
-  // smaller than the desired one.
-  MemRegion expand_by(HeapWord* old_end, HeapWord* new_end,
-                      FreeRegionList* list);
-
-  // Return the number of contiguous regions at the end of the sequence
-  // that are available for allocation.
-  uint free_suffix();
-
-  // Find a contiguous set of empty regions of length num and return
-  // the index of the first region or G1_NULL_HRS_INDEX if the
-  // search was unsuccessful.
-  uint find_contiguous(uint num);
-
-  // Apply blk->doHeapRegion() on all committed regions in address order,
-  // terminating the iteration early if doHeapRegion() returns true.
-  void iterate(HeapRegionClosure* blk) const;
-
-  // As above, but start the iteration from hr and loop around. If hr
-  // is NULL, we start from the first region in the heap.
-  void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const;
-
-  // Tag as uncommitted as many regions that are completely free as
-  // possible, up to num_regions_to_remove, from the suffix of the committed
-  // sequence. Return the actual number of removed regions.
-  uint shrink_by(uint num_regions_to_remove);
-
-  // Do some sanity checking.
-  void verify_optional() PRODUCT_RETURN;
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
-
-#include "gc_implementation/g1/heapRegion.hpp"
-#include "gc_implementation/g1/heapRegionSeq.hpp"
-
-inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
-  HeapRegion* hr = _regions.get_by_address(addr);
-  assert(hr != NULL, "invariant");
-  return hr;
-}
-
-inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
-  if (addr != NULL && addr < heap_end()) {
-    assert(addr >= heap_bottom(),
-          err_msg("addr: " PTR_FORMAT " bottom: " PTR_FORMAT, p2i(addr), p2i(heap_bottom())));
-    return addr_to_region_unsafe(addr);
-  }
-  return NULL;
-}
-
-inline HeapRegion* HeapRegionSeq::at(uint index) const {
-  assert(index < length(), "pre-condition");
-  HeapRegion* hr = _regions.get_by_index(index);
-  assert(hr != NULL, "sanity");
-  assert(hr->hrs_index() == index, "sanity");
-  return hr;
-}
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
 
@@ -38,11 +39,13 @@
 
 #ifndef PRODUCT
 void HeapRegionSetBase::verify_region(HeapRegion* hr) {
-  assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrs_index()));
-  assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrs_index())); // currently we don't use these sets for young regions
-  assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrs_index(), name()));
-  assert(hr->is_empty() == regions_empty(), err_msg("Wrong empty state for region %u and set %s", hr->hrs_index(), name()));
-  assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrs_index()));
+  assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrm_index()));
+  assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrm_index())); // currently we don't use these sets for young regions
+  assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
+  assert(hr->is_free() == regions_free(), err_msg("Wrong free state for region %u and set %s", hr->hrm_index(), name()));
+  assert(!hr->is_free() || hr->is_empty(), err_msg("Free region %u is not empty for set %s", hr->hrm_index(), name()));
+  assert(!hr->is_empty() || hr->is_free(), err_msg("Empty region %u is not free for set %s", hr->hrm_index(), name()));
+  assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrm_index()));
 }
 #endif
 
@@ -67,7 +70,7 @@
   // Do the basic verification first before we do the checks over the regions.
   HeapRegionSetBase::verify();
 
-  _verify_in_progress        = true;
+  _verify_in_progress = true;
 }
 
 void HeapRegionSetBase::verify_end() {
@@ -84,16 +87,16 @@
   out->print_cr("Set: %s ("PTR_FORMAT")", name(), this);
   out->print_cr("  Region Assumptions");
   out->print_cr("    humongous         : %s", BOOL_TO_STR(regions_humongous()));
-  out->print_cr("    empty             : %s", BOOL_TO_STR(regions_empty()));
+  out->print_cr("    free              : %s", BOOL_TO_STR(regions_free()));
   out->print_cr("  Attributes");
   out->print_cr("    length            : %14u", length());
   out->print_cr("    total capacity    : "SIZE_FORMAT_W(14)" bytes",
                 total_capacity_bytes());
 }
 
-HeapRegionSetBase::HeapRegionSetBase(const char* name, bool humongous, bool empty, HRSMtSafeChecker* mt_safety_checker)
+HeapRegionSetBase::HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker)
   : _name(name), _verify_in_progress(false),
-    _is_humongous(humongous), _is_empty(empty), _mt_safety_checker(mt_safety_checker),
+    _is_humongous(humongous), _is_free(free), _mt_safety_checker(mt_safety_checker),
     _count()
 { }
 
@@ -103,62 +106,7 @@
 }
 
 void FreeRegionList::fill_in_ext_msg_extra(hrs_ext_msg* msg) {
-  msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail());
-}
-
-void FreeRegionList::add_as_head_or_tail(FreeRegionList* from_list, bool as_head) {
-  check_mt_safety();
-  from_list->check_mt_safety();
-
-  verify_optional();
-  from_list->verify_optional();
-
-  if (from_list->is_empty()) {
-    return;
-  }
-
-#ifdef ASSERT
-  FreeRegionListIterator iter(from_list);
-  while (iter.more_available()) {
-    HeapRegion* hr = iter.get_next();
-    // In set_containing_set() we check that we either set the value
-    // from NULL to non-NULL or vice versa to catch bugs. So, we have
-    // to NULL it first before setting it to the value.
-    hr->set_containing_set(NULL);
-    hr->set_containing_set(this);
-  }
-#endif // ASSERT
-
-  if (_head == NULL) {
-    assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant"));
-    _head = from_list->_head;
-    _tail = from_list->_tail;
-  } else {
-    assert(length() > 0 && _tail != NULL, hrs_ext_msg(this, "invariant"));
-    if (as_head) {
-      from_list->_tail->set_next(_head);
-      _head->set_prev(from_list->_tail);
-      _head = from_list->_head;
-    } else {
-      _tail->set_next(from_list->_head);
-      from_list->_head->set_prev(_tail);
-      _tail = from_list->_tail;
-    }
-  }
-
-  _count.increment(from_list->length(), from_list->total_capacity_bytes());
-  from_list->clear();
-
-  verify_optional();
-  from_list->verify_optional();
-}
-
-void FreeRegionList::add_as_head(FreeRegionList* from_list) {
-  add_as_head_or_tail(from_list, true /* as_head */);
-}
-
-void FreeRegionList::add_as_tail(FreeRegionList* from_list) {
-  add_as_head_or_tail(from_list, false /* as_head */);
+  msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, _head, _tail);
 }
 
 void FreeRegionList::remove_all() {
@@ -191,11 +139,6 @@
     return;
   }
 
-  if (is_empty()) {
-    add_as_head(from_list);
-    return;
-  }
-
   #ifdef ASSERT
   FreeRegionListIterator iter(from_list);
   while (iter.more_available()) {
@@ -208,39 +151,45 @@
   }
   #endif // ASSERT
 
-  HeapRegion* curr_to = _head;
-  HeapRegion* curr_from = from_list->_head;
+  if (is_empty()) {
+    assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant"));
+    _head = from_list->_head;
+    _tail = from_list->_tail;
+  } else {
+    HeapRegion* curr_to = _head;
+    HeapRegion* curr_from = from_list->_head;
+
+    while (curr_from != NULL) {
+      while (curr_to != NULL && curr_to->hrm_index() < curr_from->hrm_index()) {
+        curr_to = curr_to->next();
+      }
 
-  while (curr_from != NULL) {
-    while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) {
-      curr_to = curr_to->next();
+      if (curr_to == NULL) {
+        // The rest of the from list should be added as tail
+        _tail->set_next(curr_from);
+        curr_from->set_prev(_tail);
+        curr_from = NULL;
+      } else {
+        HeapRegion* next_from = curr_from->next();
+
+        curr_from->set_next(curr_to);
+        curr_from->set_prev(curr_to->prev());
+        if (curr_to->prev() == NULL) {
+          _head = curr_from;
+        } else {
+          curr_to->prev()->set_next(curr_from);
+        }
+        curr_to->set_prev(curr_from);
+
+        curr_from = next_from;
+      }
     }
 
-    if (curr_to == NULL) {
-      // The rest of the from list should be added as tail
-      _tail->set_next(curr_from);
-      curr_from->set_prev(_tail);
-      curr_from = NULL;
-    } else {
-      HeapRegion* next_from = curr_from->next();
-
-      curr_from->set_next(curr_to);
-      curr_from->set_prev(curr_to->prev());
-      if (curr_to->prev() == NULL) {
-        _head = curr_from;
-      } else {
-        curr_to->prev()->set_next(curr_from);
-      }
-      curr_to->set_prev(curr_from);
-
-      curr_from = next_from;
+    if (_tail->hrm_index() < from_list->_tail->hrm_index()) {
+      _tail = from_list->_tail;
     }
   }
 
-  if (_tail->hrs_index() < from_list->_tail->hrs_index()) {
-    _tail = from_list->_tail;
-  }
-
   _count.increment(from_list->length(), from_list->total_capacity_bytes());
   from_list->clear();
 
@@ -248,68 +197,59 @@
   from_list->verify_optional();
 }
 
-void FreeRegionList::remove_all_pending(uint target_count) {
+void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
   check_mt_safety();
-  assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
+  assert(num_regions >= 1, hrs_ext_msg(this, "pre-condition"));
   assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
 
   verify_optional();
   DEBUG_ONLY(uint old_length = length();)
 
-  HeapRegion* curr = _head;
+  HeapRegion* curr = first;
   uint count = 0;
-  while (curr != NULL) {
+  while (count < num_regions) {
     verify_region(curr);
     HeapRegion* next = curr->next();
     HeapRegion* prev = curr->prev();
 
-    if (curr->pending_removal()) {
-      assert(count < target_count,
-             hrs_err_msg("[%s] should not come across more regions "
-                         "pending for removal than target_count: %u",
-                         name(), target_count));
+    assert(count < num_regions,
+           hrs_err_msg("[%s] should not come across more regions "
+                       "pending for removal than num_regions: %u",
+                       name(), num_regions));
 
-      if (prev == NULL) {
-        assert(_head == curr, hrs_ext_msg(this, "invariant"));
-        _head = next;
-      } else {
-        assert(_head != curr, hrs_ext_msg(this, "invariant"));
-        prev->set_next(next);
-      }
-      if (next == NULL) {
-        assert(_tail == curr, hrs_ext_msg(this, "invariant"));
-        _tail = prev;
-      } else {
-        assert(_tail != curr, hrs_ext_msg(this, "invariant"));
-        next->set_prev(prev);
-      }
-      if (_last = curr) {
-        _last = NULL;
-      }
+    if (prev == NULL) {
+      assert(_head == curr, hrs_ext_msg(this, "invariant"));
+      _head = next;
+    } else {
+      assert(_head != curr, hrs_ext_msg(this, "invariant"));
+      prev->set_next(next);
+    }
+    if (next == NULL) {
+      assert(_tail == curr, hrs_ext_msg(this, "invariant"));
+      _tail = prev;
+    } else {
+      assert(_tail != curr, hrs_ext_msg(this, "invariant"));
+      next->set_prev(prev);
+    }
+    if (_last = curr) {
+      _last = NULL;
+    }
 
-      curr->set_next(NULL);
-      curr->set_prev(NULL);
-      remove(curr);
-      curr->set_pending_removal(false);
-
-      count += 1;
+    curr->set_next(NULL);
+    curr->set_prev(NULL);
+    remove(curr);
 
-      // If we have come across the target number of regions we can
-      // just bail out. However, for debugging purposes, we can just
-      // carry on iterating to make sure there are not more regions
-      // tagged with pending removal.
-      DEBUG_ONLY(if (count == target_count) break;)
-    }
+    count++;
     curr = next;
   }
 
-  assert(count == target_count,
-         hrs_err_msg("[%s] count: %u should be == target_count: %u",
-                     name(), count, target_count));
-  assert(length() + target_count == old_length,
+  assert(count == num_regions,
+         hrs_err_msg("[%s] count: %u should be == num_regions: %u",
+                     name(), count, num_regions));
+  assert(length() + num_regions == old_length,
          hrs_err_msg("[%s] new length should be consistent "
-                     "new length: %u old length: %u target_count: %u",
-                     name(), length(), old_length, target_count));
+                     "new length: %u old length: %u num_regions: %u",
+                     name(), length(), old_length, num_regions));
 
   verify_optional();
 }
@@ -348,10 +288,12 @@
       hr->print_on(out);
     }
   }
+
+  out->cr();
 }
 
 void FreeRegionList::verify_list() {
-  HeapRegion* curr = head();
+  HeapRegion* curr = _head;
   HeapRegion* prev1 = NULL;
   HeapRegion* prev0 = NULL;
   uint count = 0;
@@ -369,8 +311,8 @@
     if (curr->next() != NULL) {
       guarantee(curr->next()->prev() == curr, "Next or prev pointers messed up");
     }
-    guarantee(curr->hrs_index() == 0 || curr->hrs_index() > last_index, "List should be sorted");
-    last_index = curr->hrs_index();
+    guarantee(curr->hrm_index() == 0 || curr->hrm_index() > last_index, "List should be sorted");
+    last_index = curr->hrm_index();
 
     capacity += curr->capacity();
 
@@ -379,7 +321,7 @@
     curr = curr->next();
   }
 
-  guarantee(tail() == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), tail()->hrs_index(), prev0->hrs_index()));
+  guarantee(_tail == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), _tail->hrm_index(), prev0->hrm_index()));
   guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
   guarantee(length() == count, err_msg("%s count mismatch. Expected %u, actual %u.", name(), length(), count));
   guarantee(total_capacity_bytes() == capacity, err_msg("%s capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
@@ -463,3 +405,41 @@
               "master humongous set MT safety protocol outside a safepoint");
   }
 }
+
+void FreeRegionList_test() {
+  FreeRegionList l("test");
+
+  const uint num_regions_in_test = 5;
+  // Create a fake heap. It does not need to be valid, as the HeapRegion constructor
+  // does not access it.
+  MemRegion heap(NULL, num_regions_in_test * HeapRegion::GrainWords);
+  // Allocate a fake BOT because the HeapRegion constructor initializes
+  // the BOT.
+  size_t bot_size = G1BlockOffsetSharedArray::compute_size(heap.word_size());
+  HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC);
+  ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size()));
+  G1RegionToSpaceMapper* bot_storage =
+    G1RegionToSpaceMapper::create_mapper(bot_rs,
+                                         os::vm_page_size(),
+                                         HeapRegion::GrainBytes,
+                                         G1BlockOffsetSharedArray::N_bytes,
+                                         mtGC);
+  G1BlockOffsetSharedArray oa(heap, bot_storage);
+  bot_storage->commit_regions(0, num_regions_in_test);
+  HeapRegion hr0(0, &oa, heap);
+  HeapRegion hr1(1, &oa, heap);
+  HeapRegion hr2(2, &oa, heap);
+  HeapRegion hr3(3, &oa, heap);
+  HeapRegion hr4(4, &oa, heap);
+  l.add_ordered(&hr1);
+  l.add_ordered(&hr0);
+  l.add_ordered(&hr3);
+  l.add_ordered(&hr4);
+  l.add_ordered(&hr2);
+  assert(l.length() == num_regions_in_test, "wrong length");
+  l.verify_list();
+
+  bot_storage->uncommit_regions(0, num_regions_in_test);
+  delete bot_storage;
+  FREE_C_HEAP_ARRAY(HeapWord, bot_data, mtGC);
+}
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -81,7 +81,7 @@
   friend class VMStructs;
 private:
   bool _is_humongous;
-  bool _is_empty;
+  bool _is_free;
   HRSMtSafeChecker* _mt_safety_checker;
 
 protected:
@@ -102,9 +102,9 @@
   // not. Only used during verification.
   bool regions_humongous() { return _is_humongous; }
 
-  // Indicates whether all regions in the set should be empty or
+  // Indicates whether all regions in the set should be free or
   // not. Only used during verification.
-  bool regions_empty() { return _is_empty; }
+  bool regions_free() { return _is_free; }
 
   void check_mt_safety() {
     if (_mt_safety_checker != NULL) {
@@ -114,12 +114,12 @@
 
   virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg) { }
 
-  HeapRegionSetBase(const char* name, bool humongous, bool empty, HRSMtSafeChecker* mt_safety_checker);
+  HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker);
 
 public:
   const char* name() { return _name; }
 
-  uint length() { return _count.length(); }
+  uint length() const { return _count.length(); }
 
   bool is_empty() { return _count.length() == 0; }
 
@@ -162,7 +162,7 @@
 // diagnosing failures.
 class hrs_ext_msg : public hrs_err_msg {
 public:
-  hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("%s","") {
+  hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("%s", "") {
     set->fill_in_ext_msg(this, message);
   }
 };
@@ -171,7 +171,7 @@
   do {                                                                        \
     assert(((_set1_)->regions_humongous() ==                                  \
                                             (_set2_)->regions_humongous()) && \
-           ((_set1_)->regions_empty() == (_set2_)->regions_empty()),          \
+           ((_set1_)->regions_free() == (_set2_)->regions_free()),            \
            hrs_err_msg("the contents of set %s and set %s should match",      \
                        (_set1_)->name(), (_set2_)->name()));                  \
   } while (0)
@@ -184,7 +184,7 @@
 class HeapRegionSet : public HeapRegionSetBase {
 public:
   HeapRegionSet(const char* name, bool humongous, HRSMtSafeChecker* mt_safety_checker):
-    HeapRegionSetBase(name, humongous, false /* empty */, mt_safety_checker) { }
+    HeapRegionSetBase(name, humongous, false /* free */, mt_safety_checker) { }
 
   void bulk_remove(const HeapRegionSetCount& removed) {
     _count.decrement(removed.length(), removed.capacity());
@@ -192,13 +192,9 @@
 };
 
 // A set that links all the regions added to it in a doubly-linked
-// list. We should try to avoid doing operations that iterate over
+// sorted list. We should try to avoid doing operations that iterate over
 // such lists in performance critical paths. Typically we should
-// add / remove one region at a time or concatenate two lists. There are
-// two ways to treat your lists, ordered and un-ordered. All un-ordered
-// operations are done in constant time. To keep a list ordered only use
-// add_ordered() to add elements to the list. If a list is not ordered
-// from start, there is no way to sort it later.
+// add / remove one region at a time or concatenate two lists.
 
 class FreeRegionListIterator;
 
@@ -210,13 +206,13 @@
   HeapRegion* _tail;
 
   // _last is used to keep track of where we added an element the last
-  // time in ordered lists. It helps to improve performance when adding
-  // several ordered items in a row.
+  // time. It helps to improve performance when adding several ordered items in a row.
   HeapRegion* _last;
 
   static uint _unrealistically_long_length;
 
-  void add_as_head_or_tail(FreeRegionList* from_list, bool as_head);
+  inline HeapRegion* remove_from_head_impl();
+  inline HeapRegion* remove_from_tail_impl();
 
 protected:
   virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg);
@@ -232,65 +228,33 @@
 
   void verify_list();
 
-  HeapRegion* head() { return _head; }
-  HeapRegion* tail() { return _tail; }
+#ifdef ASSERT
+  bool contains(HeapRegion* hr) const {
+    return hr->containing_set() == this;
+  }
+#endif
 
   static void set_unrealistically_long_length(uint len);
 
   // Add hr to the list. The region should not be a member of another set.
   // Assumes that the list is ordered and will preserve that order. The order
-  // is determined by hrs_index.
+  // is determined by hrm_index.
   inline void add_ordered(HeapRegion* hr);
 
-  // It adds hr to the list as the new head. The region should not be
-  // a member of another set.
-  inline void add_as_head(HeapRegion* hr);
-
-  // It adds hr to the list as the new tail. The region should not be
-  // a member of another set.
-  inline void add_as_tail(HeapRegion* hr);
-
-  // It removes and returns the head of the list. It assumes that the
-  // list is not empty so it will return a non-NULL value.
-  inline HeapRegion* remove_head();
-
-  // Convenience method.
-  inline HeapRegion* remove_head_or_null();
-
-  // Removes and returns the last element (_tail) of the list. It assumes
-  // that the list isn't empty so that it can return a non-NULL value.
-  inline HeapRegion* remove_tail();
-
-  // Convenience method
-  inline HeapRegion* remove_tail_or_null();
-
   // Removes from head or tail based on the given argument.
-  inline HeapRegion* remove_region(bool from_head);
+  HeapRegion* remove_region(bool from_head);
 
   // Merge two ordered lists. The result is also ordered. The order is
-  // determined by hrs_index.
+  // determined by hrm_index.
   void add_ordered(FreeRegionList* from_list);
 
-  // It moves the regions from from_list to this list and empties
-  // from_list. The new regions will appear in the same order as they
-  // were in from_list and be linked in the beginning of this list.
-  void add_as_head(FreeRegionList* from_list);
-
-  // It moves the regions from from_list to this list and empties
-  // from_list. The new regions will appear in the same order as they
-  // were in from_list and be linked in the end of this list.
-  void add_as_tail(FreeRegionList* from_list);
-
   // It empties the list by removing all regions from it.
   void remove_all();
 
-  // It removes all regions in the list that are pending for removal
-  // (i.e., they have been tagged with "pending_removal"). The list
-  // must not be empty, target_count should reflect the exact number
-  // of regions that are pending for removal in the list, and
-  // target_count should be > 1 (currently, we never need to remove a
-  // single region using this).
-  void remove_all_pending(uint target_count);
+  // Remove all (contiguous) regions from first to first + num_regions -1 from
+  // this list.
+  // Num_regions must be > 1.
+  void remove_starting_at(HeapRegion* first, uint num_regions);
 
   virtual void verify();
 
@@ -298,7 +262,7 @@
 };
 
 // Iterator class that provides a convenient way to iterate over the
-// regions of a HeapRegionLinkedList instance.
+// regions of a FreeRegionList.
 
 class FreeRegionListIterator : public StackObj {
 private:
@@ -324,7 +288,7 @@
   }
 
   FreeRegionListIterator(FreeRegionList* list) : _curr(NULL), _list(list) {
-    _curr = list->head();
+    _curr = list->_head;
   }
 };
 
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -30,7 +30,8 @@
 inline void HeapRegionSetBase::add(HeapRegion* hr) {
   check_mt_safety();
   assert(hr->containing_set() == NULL, hrs_ext_msg(this, "should not already have a containing set %u"));
-  assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked"));
+  assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked"));
+  assert(hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked"));
 
   _count.increment(1u, hr->capacity());
   hr->set_containing_set(this);
@@ -40,7 +41,8 @@
 inline void HeapRegionSetBase::remove(HeapRegion* hr) {
   check_mt_safety();
   verify_region(hr);
-  assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked"));
+  assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked"));
+  assert(hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked"));
 
   hr->set_containing_set(NULL);
   assert(_count.length() > 0, hrs_ext_msg(this, "pre-condition"));
@@ -48,8 +50,7 @@
 }
 
 inline void FreeRegionList::add_ordered(HeapRegion* hr) {
-  check_mt_safety();
-  assert((length() == 0 && _head == NULL && _tail == NULL) ||
+  assert((length() == 0 && _head == NULL && _tail == NULL && _last == NULL) ||
          (length() >  0 && _head != NULL && _tail != NULL),
          hrs_ext_msg(this, "invariant"));
   // add() will verify the region and check mt safety.
@@ -59,14 +60,14 @@
   if (_head != NULL) {
     HeapRegion* curr;
 
-    if (_last != NULL && _last->hrs_index() < hr->hrs_index()) {
+    if (_last != NULL && _last->hrm_index() < hr->hrm_index()) {
       curr = _last;
     } else {
       curr = _head;
     }
 
     // Find first entry with a Region Index larger than entry to insert.
-    while (curr != NULL && curr->hrs_index() < hr->hrs_index()) {
+    while (curr != NULL && curr->hrm_index() < hr->hrm_index()) {
       curr = curr->next();
     }
 
@@ -95,55 +96,48 @@
   _last = hr;
 }
 
-inline void FreeRegionList::add_as_head(HeapRegion* hr) {
-  assert((length() == 0 && _head == NULL && _tail == NULL) ||
-         (length() >  0 && _head != NULL && _tail != NULL),
-         hrs_ext_msg(this, "invariant"));
-  // add() will verify the region and check mt safety.
-  add(hr);
-
-  // Now link the region.
-  if (_head != NULL) {
-    hr->set_next(_head);
-    _head->set_prev(hr);
-  } else {
-    _tail = hr;
-  }
-  _head = hr;
-}
-
-inline void FreeRegionList::add_as_tail(HeapRegion* hr) {
-  check_mt_safety();
-  assert((length() == 0 && _head == NULL && _tail == NULL) ||
-         (length() >  0 && _head != NULL && _tail != NULL),
-         hrs_ext_msg(this, "invariant"));
-  // add() will verify the region and check mt safety.
-  add(hr);
-
-  // Now link the region.
-  if (_tail != NULL) {
-    _tail->set_next(hr);
-    hr->set_prev(_tail);
-  } else {
-    _head = hr;
-  }
-  _tail = hr;
-}
-
-inline HeapRegion* FreeRegionList::remove_head() {
-  assert(!is_empty(), hrs_ext_msg(this, "the list should not be empty"));
-  assert(length() > 0 && _head != NULL && _tail != NULL,
-         hrs_ext_msg(this, "invariant"));
-
-  // We need to unlink it first.
-  HeapRegion* hr = _head;
-  _head = hr->next();
+inline HeapRegion* FreeRegionList::remove_from_head_impl() {
+  HeapRegion* result = _head;
+  _head = result->next();
   if (_head == NULL) {
     _tail = NULL;
   } else {
     _head->set_prev(NULL);
   }
-  hr->set_next(NULL);
+  result->set_next(NULL);
+  return result;
+}
+
+inline HeapRegion* FreeRegionList::remove_from_tail_impl() {
+  HeapRegion* result = _tail;
+
+  _tail = result->prev();
+  if (_tail == NULL) {
+    _head = NULL;
+  } else {
+    _tail->set_next(NULL);
+  }
+  result->set_prev(NULL);
+  return result;
+}
+
+inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
+  check_mt_safety();
+  verify_optional();
+
+  if (is_empty()) {
+    return NULL;
+  }
+  assert(length() > 0 && _head != NULL && _tail != NULL,
+         hrs_ext_msg(this, "invariant"));
+
+  HeapRegion* hr;
+
+  if (from_head) {
+    hr = remove_from_head_impl();
+  } else {
+    hr = remove_from_tail_impl();
+  }
 
   if (_last == hr) {
     _last = NULL;
@@ -154,56 +148,5 @@
   return hr;
 }
 
-inline HeapRegion* FreeRegionList::remove_head_or_null() {
-  check_mt_safety();
-  if (!is_empty()) {
-    return remove_head();
-  } else {
-    return NULL;
-  }
-}
-
-inline HeapRegion* FreeRegionList::remove_tail() {
-  assert(!is_empty(), hrs_ext_msg(this, "The list should not be empty"));
-  assert(length() > 0 && _head != NULL && _tail != NULL,
-         hrs_ext_msg(this, "invariant"));
-
-  // We need to unlink it first
-  HeapRegion* hr = _tail;
-
-  _tail = hr->prev();
-  if (_tail == NULL) {
-    _head = NULL;
-  } else {
-    _tail->set_next(NULL);
-  }
-  hr->set_prev(NULL);
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
 
-  if (_last == hr) {
-    _last = NULL;
-  }
-
-  // remove() will verify the region and check mt safety.
-  remove(hr);
-  return hr;
-}
-
-inline HeapRegion* FreeRegionList::remove_tail_or_null() {
-  check_mt_safety();
-
-  if (!is_empty()) {
-    return remove_tail();
-  } else {
-    return NULL;
-  }
-}
-
-inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
-  if (from_head) {
-    return remove_head_or_null();
-  } else {
-    return remove_tail_or_null();
-  }
-}
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionType.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/heapRegionType.hpp"
+
+bool HeapRegionType::is_valid(Tag tag) {
+  switch (tag) {
+    case FreeTag:
+    case EdenTag:
+    case SurvTag:
+    case HumStartsTag:
+    case HumContTag:
+    case OldTag:
+      return true;
+  }
+  return false;
+}
+
+const char* HeapRegionType::get_str() const {
+  hrt_assert_is_valid(_tag);
+  switch (_tag) {
+    case FreeTag:      return "FREE";
+    case EdenTag:      return "EDEN";
+    case SurvTag:      return "SURV";
+    case HumStartsTag: return "HUMS";
+    case HumContTag:   return "HUMC";
+    case OldTag:       return "OLD";
+  }
+  ShouldNotReachHere();
+  // keep some compilers happy
+  return NULL;
+}
+
+const char* HeapRegionType::get_short_str() const {
+  hrt_assert_is_valid(_tag);
+  switch (_tag) {
+    case FreeTag:      return "F";
+    case EdenTag:      return "E";
+    case SurvTag:      return "S";
+    case HumStartsTag: return "HS";
+    case HumContTag:   return "HC";
+    case OldTag:       return "O";
+  }
+  ShouldNotReachHere();
+  // keep some compilers happy
+  return NULL;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/heapRegionType.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONTYPE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONTYPE_HPP
+
+#include "memory/allocation.hpp"
+
+#define hrt_assert_is_valid(tag) \
+  assert(is_valid((tag)), err_msg("invalid HR type: %u", (uint) (tag)))
+
+class HeapRegionType VALUE_OBJ_CLASS_SPEC {
+private:
+  // We encode the value of the heap region type so the generation can be
+  // determined quickly. The tag is split into two parts:
+  //
+  //   major type (young, humongous)                         : top N-1 bits
+  //   minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit
+  //
+  // If there's need to increase the number of minor types in the
+  // future, we'll have to increase the size of the latter and hence
+  // decrease the size of the former.
+  //
+  // 0000 0 [ 0] Free
+  //
+  // 0001 0      Young Mask
+  // 0001 0 [ 2] Eden
+  // 0001 1 [ 3] Survivor
+  //
+  // 0010 0      Humongous Mask
+  // 0010 0 [ 4] Humongous Starts
+  // 0010 1 [ 5] Humongous Continues
+  //
+  // 01000 [ 8] Old
+  typedef enum {
+    FreeTag       = 0,
+
+    YoungMask     = 2,
+    EdenTag       = YoungMask,
+    SurvTag       = YoungMask + 1,
+
+    HumMask       = 4,
+    HumStartsTag  = HumMask,
+    HumContTag    = HumMask + 1,
+
+    OldTag        = 8
+  } Tag;
+
+  volatile Tag _tag;
+
+  static bool is_valid(Tag tag);
+
+  Tag get() const {
+    hrt_assert_is_valid(_tag);
+    return _tag;
+  }
+
+  // Sets the type to 'tag'.
+  void set(Tag tag) {
+    hrt_assert_is_valid(tag);
+    hrt_assert_is_valid(_tag);
+    _tag = tag;
+  }
+
+  // Sets the type to 'tag', expecting the type to be 'before'. This
+  // is available for when we want to add sanity checking to the type
+  // transition.
+  void set_from(Tag tag, Tag before) {
+    hrt_assert_is_valid(tag);
+    hrt_assert_is_valid(before);
+    hrt_assert_is_valid(_tag);
+    assert(_tag == before,
+           err_msg("HR tag: %u, expected: %u new tag; %u", _tag, before, tag));
+    _tag = tag;
+  }
+
+public:
+  // Queries
+
+  bool is_free() const { return get() == FreeTag; }
+
+  bool is_young()    const { return (get() & YoungMask) != 0; }
+  bool is_eden()     const { return get() == EdenTag;  }
+  bool is_survivor() const { return get() == SurvTag;  }
+
+  bool is_humongous()           const { return (get() & HumMask) != 0; }
+  bool is_starts_humongous()    const { return get() == HumStartsTag;  }
+  bool is_continues_humongous() const { return get() == HumContTag;    }
+
+  bool is_old() const { return get() == OldTag; }
+
+  // Setters
+
+  void set_free() { set(FreeTag); }
+
+  void set_eden()        { set_from(EdenTag, FreeTag); }
+  void set_eden_pre_gc() { set_from(EdenTag, SurvTag); }
+  void set_survivor()    { set_from(SurvTag, FreeTag); }
+
+  void set_starts_humongous()    { set_from(HumStartsTag, FreeTag); }
+  void set_continues_humongous() { set_from(HumContTag,   FreeTag); }
+
+  void set_old() { set(OldTag); }
+
+  // Misc
+
+  const char* get_str() const;
+  const char* get_short_str() const;
+
+  HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONTYPE_HPP
--- a/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,11 +31,15 @@
 #include "runtime/thread.inline.hpp"
 
 PtrQueue::PtrQueue(PtrQueueSet* qset, bool perm, bool active) :
-  _qset(qset), _buf(NULL), _index(0), _active(active),
+  _qset(qset), _buf(NULL), _index(0), _sz(0), _active(active),
   _perm(perm), _lock(NULL)
 {}
 
-void PtrQueue::flush() {
+PtrQueue::~PtrQueue() {
+  assert(_perm || (_buf == NULL), "queue must be flushed before delete");
+}
+
+void PtrQueue::flush_impl() {
   if (!_perm && _buf != NULL) {
     if (_index == _sz) {
       // No work to do.
--- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,15 +65,18 @@
   Mutex* _lock;
 
   PtrQueueSet* qset() { return _qset; }
+  bool is_permanent() const { return _perm; }
+
+  // Process queue entries and release resources, if not permanent.
+  void flush_impl();
 
 public:
   // Initialize this queue to contain a null buffer, and be part of the
   // given PtrQueueSet.
   PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
-  // Release any contained resources.
-  virtual void flush();
-  // Calls flush() when destroyed.
-  ~PtrQueue() { flush(); }
+
+  // Requires queue flushed or permanent.
+  ~PtrQueue();
 
   // Associate a lock with a ptr queue.
   void set_lock(Mutex* lock) { _lock = lock; }
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -39,7 +39,7 @@
   // first before we flush it, otherwise we might end up with an
   // enqueued buffer with refs into the CSet which breaks our invariants.
   filter();
-  PtrQueue::flush();
+  flush_impl();
 }
 
 // This method removes entries from an SATB buffer that will not be
@@ -285,37 +285,6 @@
   _par_closures[i] = par_closure;
 }
 
-void SATBMarkQueueSet::iterate_closure_all_threads() {
-  for(JavaThread* t = Threads::first(); t; t = t->next()) {
-    t->satb_mark_queue().apply_closure_and_empty(_closure);
-  }
-  shared_satb_queue()->apply_closure_and_empty(_closure);
-}
-
-void SATBMarkQueueSet::par_iterate_closure_all_threads(uint worker) {
-  SharedHeap* sh = SharedHeap::heap();
-  int parity = sh->strong_roots_parity();
-
-  for(JavaThread* t = Threads::first(); t; t = t->next()) {
-    if (t->claim_oops_do(true, parity)) {
-      t->satb_mark_queue().apply_closure_and_empty(_par_closures[worker]);
-    }
-  }
-
-  // We also need to claim the VMThread so that its parity is updated
-  // otherwise the next call to Thread::possibly_parallel_oops_do inside
-  // a StrongRootsScope might skip the VMThread because it has a stale
-  // parity that matches the parity set by the StrongRootsScope
-  //
-  // Whichever worker succeeds in claiming the VMThread gets to do
-  // the shared queue.
-
-  VMThread* vmt = VMThread::vm_thread();
-  if (vmt->claim_oops_do(true, parity)) {
-    shared_satb_queue()->apply_closure_and_empty(_par_closures[worker]);
-  }
-}
-
 bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
                                                               uint worker) {
   BufferNode* nd = NULL;
--- a/src/share/vm/gc_implementation/g1/satbQueue.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,9 @@
 
 // A ptrQueue whose elements are "oops", pointers to object heads.
 class ObjPtrQueue: public PtrQueue {
+  friend class Threads;
   friend class SATBMarkQueueSet;
+  friend class G1RemarkThreadsClosure;
 
 private:
   // Filter out unwanted entries from the buffer.
@@ -58,9 +60,8 @@
     // field to true. This is done in JavaThread::initialize_queues().
     PtrQueue(qset, perm, false /* active */) { }
 
-  // Overrides PtrQueue::flush() so that it can filter the buffer
-  // before it is flushed.
-  virtual void flush();
+  // Process queue entries and free resources.
+  void flush();
 
   // Overrides PtrQueue::should_enqueue_buffer(). See the method's
   // definition for more information.
@@ -119,13 +120,6 @@
   // closures, one for each parallel GC thread.
   void set_par_closure(int i, ObjectClosure* closure);
 
-  // Apply the registered closure to all entries on each
-  // currently-active buffer and then empty the buffer. It should only
-  // be called serially and at a safepoint.
-  void iterate_closure_all_threads();
-  // Parallel version of the above.
-  void par_iterate_closure_all_threads(uint worker);
-
   // If there exists some completed buffer, pop it, then apply the
   // registered closure to all its elements, and return true.  If no
   // completed buffers exist, return false.
--- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -370,7 +370,7 @@
 }
 
 size_t RSHashTable::mem_size() const {
-  return sizeof(this) +
+  return sizeof(RSHashTable) +
     capacity() * (SparsePRTEntry::size() + sizeof(int));
 }
 
@@ -472,13 +472,13 @@
 size_t SparsePRT::mem_size() const {
   // We ignore "_cur" here, because it either = _next, or else it is
   // on the deleted list.
-  return sizeof(this) + _next->mem_size();
+  return sizeof(SparsePRT) + _next->mem_size();
 }
 
 bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
 #if SPARSE_PRT_VERBOSE
   gclog_or_tty->print_cr("  Adding card %d from region %d to region %u sparse.",
-                         card_index, region_id, _hr->hrs_index());
+                         card_index, region_id, _hr->hrm_index());
 #endif
   if (_next->occupied_entries() * 2 > _next->capacity()) {
     expand();
@@ -530,7 +530,7 @@
 
 #if SPARSE_PRT_VERBOSE
   gclog_or_tty->print_cr("  Expanded sparse table for %u to %d.",
-                         _hr->hrs_index(), _next->capacity());
+                         _hr->hrm_index(), _next->capacity());
 #endif
   for (size_t i = 0; i < last->capacity(); i++) {
     SparsePRTEntry* e = last->entry((int)i);
--- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,7 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
 
 #include "gc_implementation/g1/heapRegion.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 
 #define VM_STRUCTS_G1(nonstatic_field, static_field)                          \
@@ -34,21 +34,24 @@
   static_field(HeapRegion, GrainBytes,        size_t)                         \
   static_field(HeapRegion, LogOfHRGrainBytes, int)                            \
                                                                               \
+  nonstatic_field(G1OffsetTableContigSpace, _top,       HeapWord*)            \
+                                                                              \
   nonstatic_field(G1HeapRegionTable, _base,             address)              \
   nonstatic_field(G1HeapRegionTable, _length,           size_t)               \
   nonstatic_field(G1HeapRegionTable, _biased_base,      address)              \
   nonstatic_field(G1HeapRegionTable, _bias,             size_t)               \
   nonstatic_field(G1HeapRegionTable, _shift_by,         uint)                 \
                                                                               \
-  nonstatic_field(HeapRegionSeq,   _regions,            G1HeapRegionTable)    \
-  nonstatic_field(HeapRegionSeq,   _committed_length,   uint)                 \
+  nonstatic_field(HeapRegionManager, _regions,          G1HeapRegionTable)    \
+  nonstatic_field(HeapRegionManager, _num_committed,    uint)                 \
                                                                               \
-  nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
-  nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
-  nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t)               \
+  nonstatic_field(G1Allocator,     _summary_bytes_used, size_t)               \
+                                                                              \
+  nonstatic_field(G1CollectedHeap, _hrm,                HeapRegionManager)    \
   nonstatic_field(G1CollectedHeap, _g1mm,               G1MonitoringSupport*) \
   nonstatic_field(G1CollectedHeap, _old_set,            HeapRegionSetBase)    \
   nonstatic_field(G1CollectedHeap, _humongous_set,      HeapRegionSetBase)    \
+  nonstatic_field(G1CollectedHeap, _allocator,          G1Allocator*)         \
                                                                               \
   nonstatic_field(G1MonitoringSupport, _eden_committed,     size_t)           \
   nonstatic_field(G1MonitoringSupport, _eden_used,          size_t)           \
@@ -69,16 +72,18 @@
                                                                               \
   declare_type(G1CollectedHeap, SharedHeap)                                   \
                                                                               \
-  declare_type(G1OffsetTableContigSpace, ContiguousSpace)                     \
-  declare_type(HeapRegion, ContiguousSpace)                                   \
-  declare_toplevel_type(HeapRegionSeq)                                        \
+  declare_type(G1OffsetTableContigSpace, CompactibleSpace)                    \
+  declare_type(HeapRegion, G1OffsetTableContigSpace)                          \
+  declare_toplevel_type(HeapRegionManager)                                    \
   declare_toplevel_type(HeapRegionSetBase)                                    \
   declare_toplevel_type(HeapRegionSetCount)                                   \
   declare_toplevel_type(G1MonitoringSupport)                                  \
+  declare_toplevel_type(G1Allocator)                                          \
                                                                               \
   declare_toplevel_type(G1CollectedHeap*)                                     \
   declare_toplevel_type(HeapRegion*)                                          \
   declare_toplevel_type(G1MonitoringSupport*)                                 \
+  declare_toplevel_type(G1Allocator*)                                         \
 
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -45,7 +45,8 @@
 void VM_G1CollectForAllocation::doit() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   GCCauseSetter x(g1h, _gc_cause);
-  _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
+
+  _result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded);
   assert(_result == NULL || _pause_succeeded,
          "if we get back a result, the pause should have succeeded");
 }
@@ -94,12 +95,13 @@
   assert(!_should_initiate_conc_mark ||
   ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
    (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
-    _gc_cause == GCCause::_g1_humongous_allocation),
-         "only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle");
+    _gc_cause == GCCause::_g1_humongous_allocation ||
+    _gc_cause == GCCause::_update_allocation_context_stats_inc),
+      "only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
 
   if (_word_size > 0) {
     // An allocation has been requested. So, try to do that first.
-    _result = g1h->attempt_allocation_at_safepoint(_word_size,
+    _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
                                      false /* expect_null_cur_alloc_region */);
     if (_result != NULL) {
       // If we can successfully allocate before we actually do the
@@ -152,7 +154,7 @@
     g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
   if (_pause_succeeded && _word_size > 0) {
     // An allocation had been requested.
-    _result = g1h->attempt_allocation_at_safepoint(_word_size,
+    _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
                                       true /* expect_null_cur_alloc_region */);
   } else {
     assert(_result == NULL, "invariant");
@@ -211,8 +213,12 @@
   assert(_needs_pll, "don't call this otherwise");
   // The caller may block while communicating
   // with the SLT thread in order to acquire/release the PLL.
-  ConcurrentMarkThread::slt()->
-    manipulatePLL(SurrogateLockerThread::acquirePLL);
+  SurrogateLockerThread* slt = ConcurrentMarkThread::slt();
+  if (slt != NULL) {
+    slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
+  } else {
+    SurrogateLockerThread::report_missing_slt();
+  }
 }
 
 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
@@ -226,7 +232,7 @@
 void VM_CGC_Operation::doit() {
   gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
   TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
-  GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm());
+  GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
   SharedHeap* sh = SharedHeap::heap();
   // This could go away if CollectedHeap gave access to _gc_is_active...
   if (sh != NULL) {
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
 
+#include "gc_implementation/g1/g1AllocationContext.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
 
 // VM_operations for the G1 collector.
@@ -40,6 +41,7 @@
   size_t    _word_size;
   HeapWord* _result;
   bool      _pause_succeeded;
+  AllocationContext_t _allocation_context;
 
 public:
   VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
@@ -49,6 +51,8 @@
       _word_size(word_size), _result(NULL), _pause_succeeded(false) { }
   HeapWord* result() { return _result; }
   bool pause_succeeded() { return _pause_succeeded; }
+  void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
+  AllocationContext_t  allocation_context() { return _allocation_context; }
 };
 
 class VM_G1CollectFull: public VM_GC_Operation {
@@ -56,7 +60,7 @@
   VM_G1CollectFull(unsigned int gc_count_before,
                    unsigned int full_gc_count_before,
                    GCCause::Cause cause)
-    : VM_GC_Operation(gc_count_before, cause, full_gc_count_before) { }
+    : VM_GC_Operation(gc_count_before, cause, full_gc_count_before, true) { }
   virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
   virtual void doit();
   virtual const char* name() const {
--- a/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -32,6 +32,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/virtualspace.hpp"
 #include "runtime/vmThread.hpp"
 
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,12 +28,12 @@
 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
 #include "gc_implementation/shared/ageTable.hpp"
-#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "gc_implementation/shared/gcHeapSummary.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcTraceTime.hpp"
-#include "gc_implementation/shared/copyFailedInfo.hpp"
+#include "gc_implementation/shared/parGCAllocBuffer.inline.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "memory/defNewGeneration.inline.hpp"
 #include "memory/genCollectedHeap.hpp"
@@ -50,7 +50,7 @@
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/workgroup.hpp"
@@ -251,7 +251,7 @@
         plab->set_word_size(buf_size);
         plab->set_buf(buf_space);
         record_survivor_plab(buf_space, buf_size);
-        obj = plab->allocate(word_sz);
+        obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
         // Note that we cannot compare buf_size < word_sz below
         // because of AlignmentReserve (see ParGCAllocBuffer::allocate()).
         assert(obj != NULL || plab->words_remaining() < word_sz,
@@ -613,20 +613,21 @@
 
   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
                                       gch->rem_set()->klass_rem_set());
-
-  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+  CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
+                                           &par_scan_state.to_space_root_closure(),
+                                           false);
 
   par_scan_state.start_strong_roots();
-  gch->gen_process_strong_roots(_gen->level(),
-                                true,  // Process younger gens, if any,
-                                       // as strong roots.
-                                false, // no scope; this is parallel code
-                                true,  // is scavenging
-                                SharedHeap::ScanningOption(so),
-                                &par_scan_state.to_space_root_closure(),
-                                true,   // walk *all* scavengable nmethods
-                                &par_scan_state.older_gen_closure(),
-                                &klass_scan_closure);
+  gch->gen_process_roots(_gen->level(),
+                         true,  // Process younger gens, if any,
+                                // as strong roots.
+                         false, // no scope; this is parallel code
+                         SharedHeap::SO_ScavengeCodeCache,
+                         GenCollectedHeap::StrongAndWeakRoots,
+                         &par_scan_state.to_space_root_closure(),
+                         &par_scan_state.older_gen_closure(),
+                         &cld_scan_closure);
+
   par_scan_state.end_strong_roots();
 
   // "evacuate followers".
@@ -957,7 +958,7 @@
     size_policy->minor_collection_begin();
   }
 
-  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
+  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
   // Capture heap used before collection (for printing).
   size_t gch_prev_used = gch->used();
 
@@ -1015,14 +1016,14 @@
     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
     stats = rp->process_discovered_references(&is_alive, &keep_alive,
                                               &evacuate_followers, &task_executor,
-                                              _gc_timer);
+                                              _gc_timer, gc_tracer.gc_id());
   } else {
     thread_state_set.flush();
     gch->set_par_threads(0);  // 0 ==> non-parallel.
     gch->save_marks();
     stats = rp->process_discovered_references(&is_alive, &keep_alive,
                                               &evacuate_followers, NULL,
-                                              _gc_timer);
+                                              _gc_timer, gc_tracer.gc_id());
   }
   gc_tracer.report_gc_reference_stats(stats);
   if (!promotion_failed()) {
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -69,7 +69,7 @@
   ParScanWithoutBarrierClosure         _to_space_closure; // scan_without_gc_barrier
   ParScanWithBarrierClosure            _old_gen_closure; // scan_with_gc_barrier
   ParRootScanWithoutBarrierClosure     _to_space_root_closure; // scan_root_without_gc_barrier
-  // One of these two will be passed to process_strong_roots, which will
+  // One of these two will be passed to process_roots, which will
   // set its generation.  The first is for two-gen configs where the
   // old gen collects the perm gen; the second is for arbitrary configs.
   // The second isn't used right now (it used to be used for the train, an
@@ -168,7 +168,7 @@
   HeapWord* alloc_in_to_space_slow(size_t word_sz);
 
   HeapWord* alloc_in_to_space(size_t word_sz) {
-    HeapWord* obj = to_space_alloc_buffer()->allocate(word_sz);
+    HeapWord* obj = to_space_alloc_buffer()->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
     if (obj != NULL) return obj;
     else return alloc_in_to_space_slow(word_sz);
   }
--- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -30,6 +30,7 @@
 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oop.psgc.inline.hpp"
+#include "runtime/prefetch.inline.hpp"
 
 // Checks an individual oop for missing precise marks. Mark
 // may be either dirty or newgen.
--- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -30,6 +30,7 @@
 #include "memory/allocation.inline.hpp"
 #include "runtime/mutex.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
--- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -71,7 +71,7 @@
   if (_virtual_space != NULL && _virtual_space->expand_by(_reserved_byte_size)) {
     _region_start = covered_region.start();
     _region_size = covered_region.word_size();
-    idx_t* map = (idx_t*)_virtual_space->reserved_low_addr();
+    BitMap::bm_word_t* map = (BitMap::bm_word_t*)_virtual_space->reserved_low_addr();
     _beg_bits.set_map(map);
     _beg_bits.set_size(bits / 2);
     _end_bits.set_map(map + words / 2);
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -78,6 +78,7 @@
                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
   CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
+  barrier_set->initialize();
   _barrier_set = barrier_set;
   oopDesc::set_bs(_barrier_set);
   if (_barrier_set == NULL) {
@@ -484,10 +485,6 @@
   young_gen()->eden_space()->ensure_parsability();
 }
 
-size_t ParallelScavengeHeap::unsafe_max_alloc() {
-  return young_gen()->eden_space()->free_in_bytes();
-}
-
 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
   return young_gen()->eden_space()->tlab_capacity(thr);
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -184,8 +184,6 @@
   void accumulate_statistics_all_tlabs();
   void resize_all_tlabs();
 
-  size_t unsafe_max_alloc();
-
   bool supports_tlab_allocation() const { return true; }
 
   size_t tlab_capacity(Thread* thr) const;
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -53,13 +53,13 @@
   ResourceMark rm;
 
   NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
 
   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
   CLDToOopClosure mark_and_push_from_clds(&mark_and_push_closure, true);
-  CodeBlobToOopClosure mark_and_push_in_blobs(&mark_and_push_closure, /*do_marking=*/ true);
+  MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
 
   if (_java_thread != NULL)
     _java_thread->oops_do(
@@ -82,7 +82,7 @@
   assert(Universe::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
@@ -100,7 +100,7 @@
     case threads:
     {
       ResourceMark rm;
-      CodeBlobToOopClosure each_active_code_blob(&mark_and_push_closure, /*do_marking=*/ true);
+      MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
       CLDToOopClosure mark_and_push_from_cld(&mark_and_push_closure);
       Threads::oops_do(&mark_and_push_closure, &mark_and_push_from_cld, &each_active_code_blob);
     }
@@ -153,7 +153,7 @@
   assert(Universe::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("RefProcTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
@@ -209,7 +209,7 @@
   assert(Universe::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("StealMarkingTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
 
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
@@ -242,7 +242,7 @@
   assert(Universe::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
 
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
@@ -309,7 +309,7 @@
 void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
 
   NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
 
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
@@ -324,7 +324,7 @@
   assert(Universe::heap()->is_gc_active(), "called outside gc");
 
   NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
 
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -169,7 +169,7 @@
 
     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
+    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 
@@ -513,7 +513,7 @@
 
 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
+  GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
   trace(" 1");
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@@ -528,14 +528,14 @@
     Universe::oops_do(mark_and_push_closure());
     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
     CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
-    CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
+    MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
     Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
     ObjectSynchronizer::oops_do(mark_and_push_closure());
     FlatProfiler::oops_do(mark_and_push_closure());
     Management::oops_do(mark_and_push_closure());
     JvmtiExport::oops_do(mark_and_push_closure());
     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
-    ClassLoaderDataGraph::always_strong_oops_do(mark_and_push_closure(), follow_klass_closure(), true);
+    ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
   }
@@ -548,7 +548,7 @@
     ref_processor()->setup_policy(clear_all_softrefs);
     const ReferenceProcessorStats& stats =
       ref_processor()->process_discovered_references(
-        is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
+        is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id());
     gc_tracer()->report_gc_reference_stats(stats);
   }
 
@@ -574,7 +574,7 @@
 
 
 void PSMarkSweep::mark_sweep_phase2() {
-  GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
+  GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
   trace("2");
 
   // Now all live objects are marked, compute the new object addresses.
@@ -604,7 +604,7 @@
 
 void PSMarkSweep::mark_sweep_phase3() {
   // Adjust the pointers to reflect the new locations
-  GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
+  GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
   trace("3");
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@@ -625,16 +625,16 @@
   FlatProfiler::oops_do(adjust_pointer_closure());
   Management::oops_do(adjust_pointer_closure());
   JvmtiExport::oops_do(adjust_pointer_closure());
-  // SO_AllClasses
   SystemDictionary::oops_do(adjust_pointer_closure());
-  ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
+  ClassLoaderDataGraph::cld_do(adjust_cld_closure());
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
   // Global (weak) JNI handles
   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 
-  CodeCache::oops_do(adjust_pointer_closure());
+  CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
+  CodeCache::blobs_do(&adjust_from_blobs);
   StringTable::oops_do(adjust_pointer_closure());
   ref_processor()->weak_oops_do(adjust_pointer_closure());
   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
@@ -647,7 +647,7 @@
 
 void PSMarkSweep::mark_sweep_phase4() {
   EventMark m("4 compact heap");
-  GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
+  GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id());
   trace("4");
 
   // All pointers are now adjusted, move objects accordingly
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -40,11 +40,11 @@
   static CollectorCounters*  _counters;
 
   // Closure accessors
-  static OopClosure* mark_and_push_closure() { return &MarkSweep::mark_and_push_closure; }
-  static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; }
-  static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
-  static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
-  static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; }
+  static OopClosure* mark_and_push_closure()   { return &MarkSweep::mark_and_push_closure; }
+  static VoidClosure* follow_stack_closure()   { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
+  static CLDClosure* follow_cld_closure()      { return &MarkSweep::follow_cld_closure; }
+  static OopClosure* adjust_pointer_closure()  { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
+  static CLDClosure* adjust_cld_closure()      { return &MarkSweep::adjust_cld_closure; }
   static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; }
 
  debug_only(public:)  // Used for PSParallelCompact debugging
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -32,6 +32,7 @@
 #include "gc_implementation/shared/markSweep.inline.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/prefetch.inline.hpp"
 
 PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL;
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -978,7 +978,7 @@
   // at each young gen gc.  Do the update unconditionally (even though a
   // promotion failure does not swap spaces) because an unknown number of minor
   // collections will have swapped the spaces an unknown number of times.
-  GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
+  GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
   ParallelScavengeHeap* heap = gc_heap();
   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
@@ -1021,7 +1021,7 @@
 
 void PSParallelCompact::post_compact()
 {
-  GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
+  GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
 
   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
     // Clear the marking bitmap, summary data and split info.
@@ -1847,7 +1847,7 @@
 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
                                       bool maximum_compaction)
 {
-  GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
+  GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
   // trace("2");
 
 #ifdef  ASSERT
@@ -2056,7 +2056,7 @@
 
     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
+    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 
@@ -2351,7 +2351,7 @@
                                       bool maximum_heap_compaction,
                                       ParallelOldTracer *gc_tracer) {
   // Recursively traverse all live objects and mark them
-  GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
+  GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
 
   ParallelScavengeHeap* heap = gc_heap();
   uint parallel_gc_threads = heap->gc_task_manager()->workers();
@@ -2366,7 +2366,7 @@
   ClassLoaderDataGraph::clear_claimed_marks();
 
   {
-    GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
+    GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
 
     ParallelScavengeHeap::ParStrongRootsScope psrs;
 
@@ -2395,24 +2395,24 @@
 
   // Process reference objects found during marking
   {
-    GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
+    GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
 
     ReferenceProcessorStats stats;
     if (ref_processor()->processing_is_mt()) {
       RefProcTaskExecutor task_executor;
       stats = ref_processor()->process_discovered_references(
         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
-        &task_executor, &_gc_timer);
+        &task_executor, &_gc_timer, _gc_tracer.gc_id());
     } else {
       stats = ref_processor()->process_discovered_references(
         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
-        &_gc_timer);
+        &_gc_timer, _gc_tracer.gc_id());
     }
 
     gc_tracer->report_gc_reference_stats(stats);
   }
 
-  GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
+  GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
 
   // This is the point where the entire marking should have completed.
   assert(cm->marking_stacks_empty(), "Marking should have completed");
@@ -2451,7 +2451,7 @@
 
 void PSParallelCompact::adjust_roots() {
   // Adjust the pointers to reflect the new locations
-  GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
+  GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
 
   // Need new claim bits when tracing through and adjusting pointers.
   ClassLoaderDataGraph::clear_claimed_marks();
@@ -2465,7 +2465,6 @@
   FlatProfiler::oops_do(adjust_pointer_closure());
   Management::oops_do(adjust_pointer_closure());
   JvmtiExport::oops_do(adjust_pointer_closure());
-  // SO_AllClasses
   SystemDictionary::oops_do(adjust_pointer_closure());
   ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
 
@@ -2474,7 +2473,8 @@
   // Global (weak) JNI handles
   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 
-  CodeCache::oops_do(adjust_pointer_closure());
+  CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
+  CodeCache::blobs_do(&adjust_from_blobs);
   StringTable::oops_do(adjust_pointer_closure());
   ref_processor()->weak_oops_do(adjust_pointer_closure());
   // Roots were visited so references into the young gen in roots
@@ -2487,7 +2487,7 @@
 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
                                                       uint parallel_gc_threads)
 {
-  GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
+  GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
 
   // Find the threads that are active
   unsigned int which = 0;
@@ -2561,7 +2561,7 @@
 
 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
                                                     uint parallel_gc_threads) {
-  GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
+  GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
 
   ParallelCompactData& sd = PSParallelCompact::summary_data();
 
@@ -2643,7 +2643,7 @@
                                      GCTaskQueue* q,
                                      ParallelTaskTerminator* terminator_ptr,
                                      uint parallel_gc_threads) {
-  GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
+  GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
 
   // Once a thread has drained it's stack, it should try to steal regions from
   // other threads.
@@ -2691,7 +2691,7 @@
 
 void PSParallelCompact::compact() {
   // trace("5");
-  GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
+  GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
@@ -2708,7 +2708,7 @@
   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
 
   {
-    GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
+    GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
 
     gc_task_manager()->execute_and_wait(q);
 
@@ -2722,7 +2722,7 @@
 
   {
     // Update the deferred objects, if any.  Any compaction manager can be used.
-    GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
+    GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
       update_deferred_objects(cm, SpaceId(id));
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1004,6 +1004,10 @@
   static bool   _dwl_initialized;
 #endif  // #ifdef ASSERT
 
+
+ public:
+  static ParallelOldTracer* gc_tracer() { return &_gc_tracer; }
+
  private:
 
   static void initialize_space_info();
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_HPP
 
 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/allocation.hpp"
 
 //
@@ -94,23 +95,9 @@
   PSYoungPromotionLAB() { }
 
   // Not MT safe
-  HeapWord* allocate(size_t size) {
-    // Can't assert this, when young fills, we keep the LAB around, but flushed.
-    // assert(_state != flushed, "Sanity");
-    HeapWord* obj = top();
-    HeapWord* new_top = obj + size;
-    // The 'new_top>obj' check is needed to detect overflow of obj+size.
-    if (new_top > obj && new_top <= end()) {
-      set_top(new_top);
-      assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top),
-             "checking alignment");
-      return obj;
-    }
+  inline HeapWord* allocate(size_t size);
 
-    return NULL;
-  }
-
-  debug_only(virtual bool lab_is_valid(MemRegion lab));
+  debug_only(virtual bool lab_is_valid(MemRegion lab);)
 };
 
 class PSOldPromotionLAB : public PSPromotionLAB {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_INLINE_HPP
+
+#include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
+
+HeapWord* PSYoungPromotionLAB::allocate(size_t size) {
+  // Can't assert this, when young fills, we keep the LAB around, but flushed.
+  // assert(_state != flushed, "Sanity");
+  HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end(), SurvivorAlignmentInBytes);
+  if (obj == NULL) {
+    return NULL;
+  }
+
+  HeapWord* new_top = obj + size;
+  // The 'new_top>obj' check is needed to detect overflow of obj+size.
+  if (new_top > obj && new_top <= end()) {
+    set_top(new_top);
+    assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_object_aligned((intptr_t)new_top),
+           "checking alignment");
+    return obj;
+  } else {
+    set_top(obj);
+    return NULL;
+  }
+}
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_INLINE_HPP
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -27,6 +27,7 @@
 
 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
+#include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 #include "oops/oop.psgc.inline.hpp"
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -331,7 +331,7 @@
 
     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
+    GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
 
@@ -397,7 +397,7 @@
     // We'll use the promotion manager again later.
     PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
     {
-      GCTraceTime tm("Scavenge", false, false, &_gc_timer);
+      GCTraceTime tm("Scavenge", false, false, &_gc_timer, _gc_tracer.gc_id());
       ParallelScavengeHeap::ParStrongRootsScope psrs;
 
       GCTaskQueue* q = GCTaskQueue::create();
@@ -439,7 +439,7 @@
 
     // Process reference objects discovered during scavenge
     {
-      GCTraceTime tm("References", false, false, &_gc_timer);
+      GCTraceTime tm("References", false, false, &_gc_timer, _gc_tracer.gc_id());
 
       reference_processor()->setup_policy(false); // not always_clear
       reference_processor()->set_active_mt_degree(active_workers);
@@ -450,10 +450,10 @@
         PSRefProcTaskExecutor task_executor;
         stats = reference_processor()->process_discovered_references(
           &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
-          &_gc_timer);
+          &_gc_timer, _gc_tracer.gc_id());
       } else {
         stats = reference_processor()->process_discovered_references(
-          &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
+          &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer, _gc_tracer.gc_id());
       }
 
       _gc_tracer.report_gc_reference_stats(stats);
@@ -468,7 +468,7 @@
     }
 
     {
-      GCTraceTime tm("StringTable", false, false, &_gc_timer);
+      GCTraceTime tm("StringTable", false, false, &_gc_timer, _gc_tracer.gc_id());
       // Unlink any dead interned Strings and process the remaining live ones.
       PSScavengeRootsClosure root_closure(promotion_manager);
       StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
@@ -638,7 +638,7 @@
     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
 
     {
-      GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);
+      GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer, _gc_tracer.gc_id());
 
       CodeCache::prune_scavenge_root_nmethods();
     }
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -65,7 +65,7 @@
     case threads:
     {
       ResourceMark rm;
-      CLDToOopClosure* cld_closure = NULL; // Not needed. All CLDs are already visited.
+      CLDClosure* cld_closure = NULL; // Not needed. All CLDs are already visited.
       Threads::oops_do(&roots_closure, cld_closure, NULL);
     }
     break;
@@ -100,7 +100,7 @@
 
     case code_cache:
       {
-        CodeBlobToOopClosure each_scavengable_code_blob(&roots_to_old_closure, /*do_marking=*/ true);
+        MarkingCodeBlobClosure each_scavengable_code_blob(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations);
         CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob);
       }
       break;
@@ -122,8 +122,8 @@
 
   PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
   PSScavengeRootsClosure roots_closure(pm);
-  CLDToOopClosure* roots_from_clds = NULL;  // Not needed. All CLDs are already visited.
-  CodeBlobToOopClosure roots_in_blobs(&roots_closure, /*do_marking=*/ true);
+  CLDClosure* roots_from_clds = NULL;  // Not needed. All CLDs are already visited.
+  MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations);
 
   if (_java_thread != NULL)
     _java_thread->oops_do(&roots_closure, roots_from_clds, &roots_in_blobs);
--- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -70,7 +70,7 @@
     "must be a ParallelScavengeHeap");
 
   GCCauseSetter gccs(heap, _gc_cause);
-  if (_gc_cause == GCCause::_gc_locker
+  if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc
       DEBUG_ONLY(|| _gc_cause == GCCause::_scavenge_alot)) {
     // If (and only if) the scavenge fails, this will invoke a full gc.
     heap->invoke_scavenge();
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -36,21 +36,10 @@
 
 int  ConcurrentGCThread::_CGC_flag            = CGC_nil;
 
-SuspendibleThreadSet ConcurrentGCThread::_sts;
-
 ConcurrentGCThread::ConcurrentGCThread() :
   _should_terminate(false), _has_terminated(false) {
-  _sts.initialize();
 };
 
-void ConcurrentGCThread::safepoint_synchronize() {
-  _sts.suspend_all();
-}
-
-void ConcurrentGCThread::safepoint_desynchronize() {
-  _sts.resume_all();
-}
-
 void ConcurrentGCThread::create_and_start() {
   if (os::create_thread(this, os::cgc_thread)) {
     // XXX: need to set this to low priority
@@ -91,78 +80,6 @@
   ThreadLocalStorage::set_thread(NULL);
 }
 
-
-void SuspendibleThreadSet::initialize_work() {
-  MutexLocker x(STS_init_lock);
-  if (!_initialized) {
-    _m             = new Monitor(Mutex::leaf,
-                                 "SuspendibleThreadSetLock", true);
-    _async         = 0;
-    _async_stop    = false;
-    _async_stopped = 0;
-    _initialized   = true;
-  }
-}
-
-void SuspendibleThreadSet::join() {
-  initialize();
-  MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
-  while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag);
-  _async++;
-  assert(_async > 0, "Huh.");
-}
-
-void SuspendibleThreadSet::leave() {
-  assert(_initialized, "Must be initialized.");
-  MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
-  _async--;
-  assert(_async >= 0, "Huh.");
-  if (_async_stop) _m->notify_all();
-}
-
-void SuspendibleThreadSet::yield(const char* id) {
-  assert(_initialized, "Must be initialized.");
-  if (_async_stop) {
-    MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
-    if (_async_stop) {
-      _async_stopped++;
-      assert(_async_stopped > 0, "Huh.");
-      if (_async_stopped == _async) {
-        if (ConcGCYieldTimeout > 0) {
-          double now = os::elapsedTime();
-          guarantee((now - _suspend_all_start) * 1000.0 <
-                    (double)ConcGCYieldTimeout,
-                    "Long delay; whodunit?");
-        }
-      }
-      _m->notify_all();
-      while (_async_stop) _m->wait(Mutex::_no_safepoint_check_flag);
-      _async_stopped--;
-      assert(_async >= 0, "Huh");
-      _m->notify_all();
-    }
-  }
-}
-
-void SuspendibleThreadSet::suspend_all() {
-  initialize();  // If necessary.
-  if (ConcGCYieldTimeout > 0) {
-    _suspend_all_start = os::elapsedTime();
-  }
-  MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
-  assert(!_async_stop, "Only one at a time.");
-  _async_stop = true;
-  while (_async_stopped < _async) _m->wait(Mutex::_no_safepoint_check_flag);
-}
-
-void SuspendibleThreadSet::resume_all() {
-  assert(_initialized, "Must be initialized.");
-  MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
-  assert(_async_stopped == _async, "Huh.");
-  _async_stop = false;
-  _m->notify_all();
-}
-
 static void _sltLoop(JavaThread* thread, TRAPS) {
   SurrogateLockerThread* slt = (SurrogateLockerThread*)thread;
   slt->loop();
@@ -220,6 +137,13 @@
   return res;
 }
 
+void SurrogateLockerThread::report_missing_slt() {
+  vm_exit_during_initialization(
+    "GC before GC support fully initialized: "
+    "SLT is needed but has not yet been created.");
+  ShouldNotReachHere();
+}
+
 void SurrogateLockerThread::manipulatePLL(SLT_msg_type msg) {
   MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag);
   assert(_buffer == empty, "Should be empty");
@@ -282,30 +206,3 @@
   }
   assert(!_monitor.owned_by_self(), "Should unlock before exit.");
 }
-
-
-// ===== STS Access From Outside CGCT =====
-
-void ConcurrentGCThread::stsYield(const char* id) {
-  assert( Thread::current()->is_ConcurrentGC_thread(),
-          "only a conc GC thread can call this" );
-  _sts.yield(id);
-}
-
-bool ConcurrentGCThread::stsShouldYield() {
-  assert( Thread::current()->is_ConcurrentGC_thread(),
-          "only a conc GC thread can call this" );
-  return _sts.should_yield();
-}
-
-void ConcurrentGCThread::stsJoin() {
-  assert( Thread::current()->is_ConcurrentGC_thread(),
-          "only a conc GC thread can call this" );
-  _sts.join();
-}
-
-void ConcurrentGCThread::stsLeave() {
-  assert( Thread::current()->is_ConcurrentGC_thread(),
-          "only a conc GC thread can call this" );
-  _sts.leave();
-}
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,55 +26,8 @@
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_CONCURRENTGCTHREAD_HPP
 
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
+#include "gc_implementation/shared/suspendibleThreadSet.hpp"
 #include "runtime/thread.hpp"
-#endif // INCLUDE_ALL_GCS
-
-class VoidClosure;
-
-// A SuspendibleThreadSet is (obviously) a set of threads that can be
-// suspended.  A thread can join and later leave the set, and periodically
-// yield.  If some thread (not in the set) requests, via suspend_all, that
-// the threads be suspended, then the requesting thread is blocked until
-// all the threads in the set have yielded or left the set.  (Threads may
-// not enter the set when an attempted suspension is in progress.)  The
-// suspending thread later calls resume_all, allowing the suspended threads
-// to continue.
-
-class SuspendibleThreadSet {
-  Monitor* _m;
-  int      _async;
-  bool     _async_stop;
-  int      _async_stopped;
-  bool     _initialized;
-  double   _suspend_all_start;
-
-  void initialize_work();
-
- public:
-  SuspendibleThreadSet() : _initialized(false) {}
-
-  // Add the current thread to the set.  May block if a suspension
-  // is in progress.
-  void join();
-  // Removes the current thread from the set.
-  void leave();
-  // Returns "true" iff an suspension is in progress.
-  bool should_yield() { return _async_stop; }
-  // Suspends the current thread if a suspension is in progress (for
-  // the duration of the suspension.)
-  void yield(const char* id);
-  // Return when all threads in the set are suspended.
-  void suspend_all();
-  // Allow suspended threads to resume.
-  void resume_all();
-  // Redundant initializations okay.
-  void initialize() {
-    // Double-check dirty read idiom.
-    if (!_initialized) initialize_work();
-  }
-};
-
 
 class ConcurrentGCThread: public NamedThread {
   friend class VMStructs;
@@ -96,9 +49,6 @@
   static int set_CGC_flag(int b)           { return _CGC_flag |= b; }
   static int reset_CGC_flag(int b)         { return _CGC_flag &= ~b; }
 
-  // All instances share this one set.
-  static SuspendibleThreadSet _sts;
-
   // Create and start the thread (setting it's priority high.)
   void create_and_start();
 
@@ -121,25 +71,6 @@
 
   // Tester
   bool is_ConcurrentGC_thread() const          { return true;       }
-
-  static void safepoint_synchronize();
-  static void safepoint_desynchronize();
-
-  // All overridings should probably do _sts::yield, but we allow
-  // overriding for distinguished debugging messages.  Default is to do
-  // nothing.
-  virtual void yield() {}
-
-  bool should_yield() { return _sts.should_yield(); }
-
-  // they are prefixed by sts since there are already yield() and
-  // should_yield() (non-static) methods in this class and it was an
-  // easy way to differentiate them.
-  static void stsYield(const char* id);
-  static bool stsShouldYield();
-  static void stsJoin();
-  static void stsLeave();
-
 };
 
 // The SurrogateLockerThread is used by concurrent GC threads for
@@ -162,6 +93,9 @@
  public:
   static SurrogateLockerThread* make(TRAPS);
 
+  // Terminate VM with error message that SLT needed but not yet created.
+  static void report_missing_slt();
+
   SurrogateLockerThread();
 
   bool is_hidden_from_external_view() const     { return true; }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/gcId.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/gcId.hpp"
+#include "runtime/safepoint.hpp"
+
+uint GCId::_next_id = 0;
+
+const GCId GCId::create() {
+  return GCId(_next_id++);
+}
+const GCId GCId::peek() {
+  return GCId(_next_id);
+}
+const GCId GCId::undefined() {
+  return GCId(UNDEFINED);
+}
+bool GCId::is_undefined() const {
+  return _id == UNDEFINED;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/gcId.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCID_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCID_HPP
+
+#include "memory/allocation.hpp"
+
+class GCId VALUE_OBJ_CLASS_SPEC {
+ private:
+  uint _id;
+  GCId(uint id) : _id(id) {}
+  GCId() { } // Unused
+
+  static uint _next_id;
+  static const uint UNDEFINED = (uint)-1;
+
+ public:
+  uint id() const {
+    assert(_id != UNDEFINED, "Using undefined GC ID");
+    return _id;
+  }
+  bool is_undefined() const;
+
+  static const GCId create();
+  static const GCId peek();
+  static const GCId undefined();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCID_HPP
--- a/src/share/vm/gc_implementation/shared/gcTrace.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcId.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/objectCountEventSender.hpp"
@@ -38,19 +39,14 @@
 #include "gc_implementation/g1/evacuationInfo.hpp"
 #endif
 
-#define assert_unset_gc_id() assert(_shared_gc_info.id() == SharedGCInfo::UNSET_GCID, "GC already started?")
-#define assert_set_gc_id() assert(_shared_gc_info.id() != SharedGCInfo::UNSET_GCID, "GC not started?")
-
-static GCId GCTracer_next_gc_id = 0;
-static GCId create_new_gc_id() {
-  return GCTracer_next_gc_id++;
-}
+#define assert_unset_gc_id() assert(_shared_gc_info.gc_id().is_undefined(), "GC already started?")
+#define assert_set_gc_id() assert(!_shared_gc_info.gc_id().is_undefined(), "GC not started?")
 
 void GCTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) {
   assert_unset_gc_id();
 
-  GCId gc_id = create_new_gc_id();
-  _shared_gc_info.set_id(gc_id);
+  GCId gc_id = GCId::create();
+  _shared_gc_info.set_gc_id(gc_id);
   _shared_gc_info.set_cause(cause);
   _shared_gc_info.set_start_timestamp(timestamp);
 }
@@ -62,7 +58,7 @@
 }
 
 bool GCTracer::has_reported_gc_start() const {
-  return _shared_gc_info.id() != SharedGCInfo::UNSET_GCID;
+  return !_shared_gc_info.gc_id().is_undefined();
 }
 
 void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
@@ -81,7 +77,7 @@
 
   report_gc_end_impl(timestamp, time_partitions);
 
-  _shared_gc_info.set_id(SharedGCInfo::UNSET_GCID);
+  _shared_gc_info.set_gc_id(GCId::undefined());
 }
 
 void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const {
@@ -132,7 +128,7 @@
     if (!cit.allocation_failed()) {
       HeapInspection hi(false, false, false, NULL);
       hi.populate_table(&cit, is_alive_cl);
-      ObjectCountEventSenderClosure event_sender(_shared_gc_info.id(), cit.size_of_instances_in_words(), Ticks::now());
+      ObjectCountEventSenderClosure event_sender(_shared_gc_info.gc_id(), cit.size_of_instances_in_words(), Ticks::now());
       cit.iterate(&event_sender);
     }
   }
--- a/src/share/vm/gc_implementation/shared/gcTrace.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -27,6 +27,7 @@
 
 #include "gc_interface/gcCause.hpp"
 #include "gc_interface/gcName.hpp"
+#include "gc_implementation/shared/gcId.hpp"
 #include "gc_implementation/shared/gcWhen.hpp"
 #include "gc_implementation/shared/copyFailedInfo.hpp"
 #include "memory/allocation.hpp"
@@ -38,7 +39,6 @@
 #include "utilities/macros.hpp"
 #include "utilities/ticks.hpp"
 
-typedef uint GCId;
 
 class EvacuationInfo;
 class GCHeapSummary;
@@ -50,11 +50,8 @@
 class BoolObjectClosure;
 
 class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
- public:
-  static const GCId UNSET_GCID = (GCId)-1;
-
  private:
-  GCId _id;
+  GCId _gc_id;
   GCName _name;
   GCCause::Cause _cause;
   Ticks     _start_timestamp;
@@ -64,7 +61,7 @@
 
  public:
   SharedGCInfo(GCName name) :
-    _id(UNSET_GCID),
+    _gc_id(GCId::undefined()),
     _name(name),
     _cause(GCCause::_last_gc_cause),
     _start_timestamp(),
@@ -73,8 +70,8 @@
     _longest_pause() {
   }
 
-  void set_id(GCId id) { _id = id; }
-  GCId id() const { return _id; }
+  void set_gc_id(GCId gc_id) { _gc_id = gc_id; }
+  const GCId& gc_id() const { return _gc_id; }
 
   void set_start_timestamp(const Ticks& timestamp) { _start_timestamp = timestamp; }
   const Ticks start_timestamp() const { return _start_timestamp; }
@@ -131,10 +128,11 @@
   void report_gc_reference_stats(const ReferenceProcessorStats& rp) const;
   void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN;
   bool has_reported_gc_start() const;
+  const GCId& gc_id() { return _shared_gc_info.gc_id(); }
 
  protected:
   GCTracer(GCName name) : _shared_gc_info(name) {}
-  virtual void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp);
+  void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp);
   virtual void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions);
 
  private:
--- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -43,7 +43,7 @@
 void GCTracer::send_garbage_collection_event() const {
   EventGCGarbageCollection event(UNTIMED);
   if (event.should_commit()) {
-    event.set_gcId(_shared_gc_info.id());
+    event.set_gcId(_shared_gc_info.gc_id().id());
     event.set_name(_shared_gc_info.name());
     event.set_cause((u2) _shared_gc_info.cause());
     event.set_sumOfPauses(_shared_gc_info.sum_of_pauses());
@@ -57,7 +57,7 @@
 void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
   EventGCReferenceStatistics e;
   if (e.should_commit()) {
-      e.set_gcId(_shared_gc_info.id());
+      e.set_gcId(_shared_gc_info.gc_id().id());
       e.set_type((u1)type);
       e.set_count(count);
       e.commit();
@@ -68,7 +68,7 @@
                                                       const MetaspaceChunkFreeListSummary& summary) const {
   EventMetaspaceChunkFreeListSummary e;
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.id());
+    e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_when(when);
     e.set_metadataType(mdtype);
 
@@ -91,7 +91,7 @@
 void ParallelOldTracer::send_parallel_old_event() const {
   EventGCParallelOld e(UNTIMED);
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.id());
+    e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
     e.set_starttime(_shared_gc_info.start_timestamp());
     e.set_endtime(_shared_gc_info.end_timestamp());
@@ -102,7 +102,7 @@
 void YoungGCTracer::send_young_gc_event() const {
   EventGCYoungGarbageCollection e(UNTIMED);
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.id());
+    e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_tenuringThreshold(_tenuring_threshold);
     e.set_starttime(_shared_gc_info.start_timestamp());
     e.set_endtime(_shared_gc_info.end_timestamp());
@@ -113,7 +113,7 @@
 void OldGCTracer::send_old_gc_event() const {
   EventGCOldGarbageCollection e(UNTIMED);
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.id());
+    e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_starttime(_shared_gc_info.start_timestamp());
     e.set_endtime(_shared_gc_info.end_timestamp());
     e.commit();
@@ -132,7 +132,7 @@
 void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
   EventPromotionFailed e;
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.id());
+    e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_data(to_trace_struct(pf_info));
     e.set_thread(pf_info.thread()->thread_id());
     e.commit();
@@ -143,7 +143,7 @@
 void OldGCTracer::send_concurrent_mode_failure_event() {
   EventConcurrentModeFailure e;
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.id());
+    e.set_gcId(_shared_gc_info.gc_id().id());
     e.commit();
   }
 }
@@ -152,7 +152,7 @@
 void G1NewTracer::send_g1_young_gc_event() {
   EventGCG1GarbageCollection e(UNTIMED);
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.id());
+    e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_type(_g1_young_gc_info.type());
     e.set_starttime(_shared_gc_info.start_timestamp());
     e.set_endtime(_shared_gc_info.end_timestamp());
@@ -163,7 +163,7 @@
 void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
   EventEvacuationInfo e;
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.id());
+    e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_cSetRegions(info->collectionset_regions());
     e.set_cSetUsedBefore(info->collectionset_used_before());
     e.set_cSetUsedAfter(info->collectionset_used_after());
@@ -179,7 +179,7 @@
 void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
   EventEvacuationFailed e;
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.id());
+    e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_data(to_trace_struct(ef_info));
     e.commit();
   }
@@ -206,17 +206,17 @@
 }
 
 class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
-  GCId _id;
+  GCId _gc_id;
   GCWhen::Type _when;
  public:
-  GCHeapSummaryEventSender(GCId id, GCWhen::Type when) : _id(id), _when(when) {}
+  GCHeapSummaryEventSender(GCId gc_id, GCWhen::Type when) : _gc_id(gc_id), _when(when) {}
 
   void visit(const GCHeapSummary* heap_summary) const {
     const VirtualSpaceSummary& heap_space = heap_summary->heap();
 
     EventGCHeapSummary e;
     if (e.should_commit()) {
-      e.set_gcId(_id);
+      e.set_gcId(_gc_id.id());
       e.set_when((u1)_when);
       e.set_heapSpace(to_trace_struct(heap_space));
       e.set_heapUsed(heap_summary->used());
@@ -236,7 +236,7 @@
 
     EventPSHeapSummary e;
     if (e.should_commit()) {
-      e.set_gcId(_id);
+      e.set_gcId(_gc_id.id());
       e.set_when((u1)_when);
 
       e.set_oldSpace(to_trace_struct(ps_heap_summary->old()));
@@ -251,7 +251,7 @@
 };
 
 void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
-  GCHeapSummaryEventSender visitor(_shared_gc_info.id(), when);
+  GCHeapSummaryEventSender visitor(_shared_gc_info.gc_id(), when);
   heap_summary.accept(&visitor);
 }
 
@@ -268,7 +268,7 @@
 void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
   EventMetaspaceSummary e;
   if (e.should_commit()) {
-    e.set_gcId(_shared_gc_info.id());
+    e.set_gcId(_shared_gc_info.gc_id().id());
     e.set_when((u1) when);
     e.set_gcThreshold(meta_space_summary.capacity_until_GC());
     e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
@@ -287,7 +287,7 @@
   void send_phase(PausePhase* pause) {
     T event(UNTIMED);
     if (event.should_commit()) {
-      event.set_gcId(_gc_id);
+      event.set_gcId(_gc_id.id());
       event.set_name(pause->name());
       event.set_starttime(pause->start());
       event.set_endtime(pause->end());
@@ -311,7 +311,7 @@
 };
 
 void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
-  PhaseSender phase_reporter(_shared_gc_info.id());
+  PhaseSender phase_reporter(_shared_gc_info.gc_id());
 
   TimePartitionPhasesIterator iter(time_partitions);
   while (iter.has_next()) {
--- a/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcTraceTime.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/os.hpp"
@@ -34,7 +35,7 @@
 #include "utilities/ticks.inline.hpp"
 
 
-GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) :
+GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id) :
     _title(title), _doit(doit), _print_cr(print_cr), _timer(timer), _start_counter() {
   if (_doit || _timer != NULL) {
     _start_counter.stamp();
@@ -52,6 +53,9 @@
       gclog_or_tty->stamp();
       gclog_or_tty->print(": ");
     }
+    if (PrintGCID) {
+      gclog_or_tty->print("#%u: ", gc_id.id());
+    }
     gclog_or_tty->print("[%s", title);
     gclog_or_tty->flush();
   }
--- a/src/share/vm/gc_implementation/shared/gcTraceTime.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/gcTraceTime.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
 
+#include "gc_implementation/shared/gcTrace.hpp"
 #include "prims/jni_md.h"
 #include "utilities/ticks.hpp"
 
@@ -38,7 +39,7 @@
   Ticks _start_counter;
 
  public:
-  GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer);
+  GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id);
   ~GCTraceTime();
 };
 
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -49,27 +49,19 @@
 SerialOldTracer*        MarkSweep::_gc_tracer       = NULL;
 
 MarkSweep::FollowRootClosure  MarkSweep::follow_root_closure;
-CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true);
 
 void MarkSweep::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
 void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
 
 MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
-MarkSweep::FollowKlassClosure MarkSweep::follow_klass_closure;
-MarkSweep::AdjustKlassClosure MarkSweep::adjust_klass_closure;
+CLDToOopClosure               MarkSweep::follow_cld_closure(&mark_and_push_closure);
+CLDToOopClosure               MarkSweep::adjust_cld_closure(&adjust_pointer_closure);
 
 void MarkSweep::MarkAndPushClosure::do_oop(oop* p)       { mark_and_push(p); }
 void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
 
-void MarkSweep::FollowKlassClosure::do_klass(Klass* klass) {
-  klass->oops_do(&MarkSweep::mark_and_push_closure);
-}
-void MarkSweep::AdjustKlassClosure::do_klass(Klass* klass) {
-  klass->oops_do(&MarkSweep::adjust_pointer_closure);
-}
-
 void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
-  cld->oops_do(&MarkSweep::mark_and_push_closure, &MarkSweep::follow_klass_closure, true);
+  MarkSweep::follow_cld_closure.do_cld(cld);
 }
 
 void MarkSweep::follow_stack() {
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -65,17 +65,6 @@
     virtual void do_oop(narrowOop* p);
   };
 
-  // The one and only place to start following the classes.
-  // Should only be applied to the ClassLoaderData klasses list.
-  class FollowKlassClosure : public KlassClosure {
-   public:
-    void do_klass(Klass* klass);
-  };
-  class AdjustKlassClosure : public KlassClosure {
-   public:
-    void do_klass(Klass* klass);
-  };
-
   class FollowStackClosure: public VoidClosure {
    public:
     virtual void do_void();
@@ -143,12 +132,11 @@
   // Public closures
   static IsAliveClosure       is_alive;
   static FollowRootClosure    follow_root_closure;
-  static CodeBlobToOopClosure follow_code_root_closure; // => follow_root_closure
   static MarkAndPushClosure   mark_and_push_closure;
-  static FollowKlassClosure   follow_klass_closure;
   static FollowStackClosure   follow_stack_closure;
+  static CLDToOopClosure      follow_cld_closure;
   static AdjustPointerClosure adjust_pointer_closure;
-  static AdjustKlassClosure   adjust_klass_closure;
+  static CLDToOopClosure      adjust_cld_closure;
 
   // Accessors
   static uint total_invocations() { return _total_invocations; }
--- a/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -24,12 +24,13 @@
 
 
 #include "precompiled.hpp"
+#include "gc_implementation/shared/gcId.hpp"
 #include "gc_implementation/shared/objectCountEventSender.hpp"
 #include "memory/heapInspection.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/ticks.hpp"
-
 #if INCLUDE_SERVICES
 
 void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) {
@@ -38,7 +39,7 @@
          "Only call this method if the event is enabled");
 
   EventObjectCountAfterGC event(UNTIMED);
-  event.set_gcId(gc_id);
+  event.set_gcId(gc_id.id());
   event.set_class(entry->klass());
   event.set_count(entry->count());
   event.set_totalSize(entry->words() * BytesPerWord);
--- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -24,7 +24,7 @@
 
 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
-
+#include "gc_interface/collectedHeap.hpp"
 #include "memory/allocation.hpp"
 #include "memory/blockOffsetTable.hpp"
 #include "memory/threadLocalAllocBuffer.hpp"
@@ -60,9 +60,11 @@
   // Initializes the buffer to be empty, but with the given "word_sz".
   // Must get initialized with "set_buf" for an allocation to succeed.
   ParGCAllocBuffer(size_t word_sz);
+  virtual ~ParGCAllocBuffer() {}
 
   static const size_t min_size() {
-    return ThreadLocalAllocBuffer::min_size();
+    // Make sure that we return something that is larger than AlignmentReserve
+    return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
   }
 
   static const size_t max_size() {
@@ -83,6 +85,9 @@
     }
   }
 
+  // Allocate the object aligned to "alignment_in_bytes".
+  HeapWord* allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes);
+
   // Undo the last allocation in the buffer, which is required to be of the
   // "obj" of the given "word_sz".
   void undo_allocation(HeapWord* obj, size_t word_sz) {
@@ -113,7 +118,7 @@
   }
 
   // Sets the space of the buffer to be [buf, space+word_sz()).
-  void set_buf(HeapWord* buf) {
+  virtual void set_buf(HeapWord* buf) {
     _bottom   = buf;
     _top      = _bottom;
     _hard_end = _bottom + word_sz();
@@ -158,7 +163,7 @@
   // Fills in the unallocated portion of the buffer with a garbage object.
   // If "end_of_gc" is TRUE, is after the last use in the GC.  IF "retain"
   // is true, attempt to re-use the unused portion in the next GC.
-  void retire(bool end_of_gc, bool retain);
+  virtual void retire(bool end_of_gc, bool retain);
 
   void print() PRODUCT_RETURN;
 };
@@ -238,14 +243,14 @@
 
   void undo_allocation(HeapWord* obj, size_t word_sz);
 
-  void set_buf(HeapWord* buf_start) {
+  virtual void set_buf(HeapWord* buf_start) {
     ParGCAllocBuffer::set_buf(buf_start);
     _true_end = _hard_end;
     _bt.set_region(MemRegion(buf_start, word_sz()));
     _bt.initialize_threshold();
   }
 
-  void retire(bool end_of_gc, bool retain);
+  virtual void retire(bool end_of_gc, bool retain);
 
   MemRegion range() {
     return MemRegion(_top, _true_end);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP
+
+#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
+
+HeapWord* ParGCAllocBuffer::allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes) {
+
+  HeapWord* res = CollectedHeap::align_allocation_or_fail(_top, _end, alignment_in_bytes);
+  if (res == NULL) {
+    return NULL;
+  }
+
+  // Set _top so that allocate(), which expects _top to be correctly set,
+  // can be used below.
+  _top = res;
+  return allocate(word_sz);
+}
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_PARGCALLOCBUFFER_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/suspendibleThreadSet.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/suspendibleThreadSet.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/thread.inline.hpp"
+
+uint   SuspendibleThreadSet::_nthreads          = 0;
+uint   SuspendibleThreadSet::_nthreads_stopped  = 0;
+bool   SuspendibleThreadSet::_suspend_all       = false;
+double SuspendibleThreadSet::_suspend_all_start = 0.0;
+
+void SuspendibleThreadSet::join() {
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  while (_suspend_all) {
+    ml.wait(Mutex::_no_safepoint_check_flag);
+  }
+  _nthreads++;
+}
+
+void SuspendibleThreadSet::leave() {
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  assert(_nthreads > 0, "Invalid");
+  _nthreads--;
+  if (_suspend_all) {
+    ml.notify_all();
+  }
+}
+
+void SuspendibleThreadSet::yield() {
+  if (_suspend_all) {
+    MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+    if (_suspend_all) {
+      _nthreads_stopped++;
+      if (_nthreads_stopped == _nthreads) {
+        if (ConcGCYieldTimeout > 0) {
+          double now = os::elapsedTime();
+          guarantee((now - _suspend_all_start) * 1000.0 < (double)ConcGCYieldTimeout, "Long delay");
+        }
+      }
+      ml.notify_all();
+      while (_suspend_all) {
+        ml.wait(Mutex::_no_safepoint_check_flag);
+      }
+      assert(_nthreads_stopped > 0, "Invalid");
+      _nthreads_stopped--;
+      ml.notify_all();
+    }
+  }
+}
+
+void SuspendibleThreadSet::synchronize() {
+  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
+  if (ConcGCYieldTimeout > 0) {
+    _suspend_all_start = os::elapsedTime();
+  }
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  assert(!_suspend_all, "Only one at a time");
+  _suspend_all = true;
+  while (_nthreads_stopped < _nthreads) {
+    ml.wait(Mutex::_no_safepoint_check_flag);
+  }
+}
+
+void SuspendibleThreadSet::desynchronize() {
+  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  assert(_nthreads_stopped == _nthreads, "Invalid");
+  _suspend_all = false;
+  ml.notify_all();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/suspendibleThreadSet.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_SUSPENDIBLETHREADSET_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_SUSPENDIBLETHREADSET_HPP
+
+#include "memory/allocation.hpp"
+
+// A SuspendibleThreadSet is a set of threads that can be suspended.
+// A thread can join and later leave the set, and periodically yield.
+// If some thread (not in the set) requests, via synchronize(), that
+// the threads be suspended, then the requesting thread is blocked
+// until all the threads in the set have yielded or left the set. Threads
+// may not enter the set when an attempted suspension is in progress. The
+// suspending thread later calls desynchronize(), allowing the suspended
+// threads to continue.
+class SuspendibleThreadSet : public AllStatic {
+private:
+  static uint   _nthreads;
+  static uint   _nthreads_stopped;
+  static bool   _suspend_all;
+  static double _suspend_all_start;
+
+public:
+  // Add the current thread to the set. May block if a suspension is in progress.
+  static void join();
+
+  // Removes the current thread from the set.
+  static void leave();
+
+  // Returns true if an suspension is in progress.
+  static bool should_yield() { return _suspend_all; }
+
+  // Suspends the current thread if a suspension is in progress.
+  static void yield();
+
+  // Returns when all threads in the set are suspended.
+  static void synchronize();
+
+  // Resumes all suspended threads in the set.
+  static void desynchronize();
+};
+
+class SuspendibleThreadSetJoiner : public StackObj {
+public:
+  SuspendibleThreadSetJoiner() {
+    SuspendibleThreadSet::join();
+  }
+
+  ~SuspendibleThreadSetJoiner() {
+    SuspendibleThreadSet::leave();
+  }
+
+  bool should_yield() {
+    return SuspendibleThreadSet::should_yield();
+  }
+
+  void yield() {
+    SuspendibleThreadSet::yield();
+  }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_SUSPENDIBLETHREADSET_HPP
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -209,6 +209,45 @@
   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
 }
 
+// Returns true iff concurrent GCs unloads metadata.
+bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
+#if INCLUDE_ALL_GCS
+  if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
+    MetaspaceGC::set_should_concurrent_collect(true);
+    return true;
+  }
+
+  if (UseG1GC && ClassUnloadingWithConcurrentMark) {
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    g1h->g1_policy()->set_initiate_conc_mark_if_possible();
+
+    GCCauseSetter x(g1h, _gc_cause);
+
+    // At this point we are supposed to start a concurrent cycle. We
+    // will do so if one is not already in progress.
+    bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
+
+    if (should_start) {
+      double pause_target = g1h->g1_policy()->max_pause_time_ms();
+      g1h->do_collection_pause_at_safepoint(pause_target);
+    }
+    return true;
+  }
+#endif
+
+  return false;
+}
+
+static void log_metaspace_alloc_failure_for_concurrent_GC() {
+  if (Verbose && PrintGCDetails) {
+    if (UseConcMarkSweepGC) {
+      gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
+    } else if (UseG1GC) {
+      gclog_or_tty->print_cr("\nG1 full GC for Metaspace");
+    }
+  }
+}
+
 void VM_CollectForMetadataAllocation::doit() {
   SvcGCMarker sgcm(SvcGCMarker::FULL);
 
@@ -220,54 +259,57 @@
   // a GC that freed space for the allocation.
   if (!MetadataAllocationFailALot) {
     _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
-  }
-
-  if (_result == NULL) {
-    if (UseConcMarkSweepGC) {
-      if (CMSClassUnloadingEnabled) {
-        MetaspaceGC::set_should_concurrent_collect(true);
-      }
-      // For CMS expand since the collection is going to be concurrent.
-      _result =
-        _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
-    }
-    if (_result == NULL) {
-      // Don't clear the soft refs yet.
-      if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
-        gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
-      }
-      heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
-      // After a GC try to allocate without expanding.  Could fail
-      // and expansion will be tried below.
-      _result =
-        _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
-    }
-    if (_result == NULL) {
-      // If still failing, allow the Metaspace to expand.
-      // See delta_capacity_until_GC() for explanation of the
-      // amount of the expansion.
-      // This should work unless there really is no more space
-      // or a MaxMetaspaceSize has been specified on the command line.
-      _result =
-        _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
-      if (_result == NULL) {
-        // If expansion failed, do a last-ditch collection and try allocating
-        // again.  A last-ditch collection will clear softrefs.  This
-        // behavior is similar to the last-ditch collection done for perm
-        // gen when it was full and a collection for failed allocation
-        // did not free perm gen space.
-        heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
-        _result =
-          _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
-      }
-    }
-    if (Verbose && PrintGCDetails && _result == NULL) {
-      gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
-                             SIZE_FORMAT, _size);
+    if (_result != NULL) {
+      return;
     }
   }
 
-  if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
+  if (initiate_concurrent_GC()) {
+    // For CMS and G1 expand since the collection is going to be concurrent.
+    _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
+    if (_result != NULL) {
+      return;
+    }
+
+    log_metaspace_alloc_failure_for_concurrent_GC();
+  }
+
+  // Don't clear the soft refs yet.
+  heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
+  // After a GC try to allocate without expanding.  Could fail
+  // and expansion will be tried below.
+  _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
+  if (_result != NULL) {
+    return;
+  }
+
+  // If still failing, allow the Metaspace to expand.
+  // See delta_capacity_until_GC() for explanation of the
+  // amount of the expansion.
+  // This should work unless there really is no more space
+  // or a MaxMetaspaceSize has been specified on the command line.
+  _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
+  if (_result != NULL) {
+    return;
+  }
+
+  // If expansion failed, do a last-ditch collection and try allocating
+  // again.  A last-ditch collection will clear softrefs.  This
+  // behavior is similar to the last-ditch collection done for perm
+  // gen when it was full and a collection for failed allocation
+  // did not free perm gen space.
+  heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
+  _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
+  if (_result != NULL) {
+    return;
+  }
+
+  if (Verbose && PrintGCDetails) {
+    gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
+                           SIZE_FORMAT, _size);
+  }
+
+  if (GC_locker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
 }
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -217,6 +217,8 @@
   virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
   virtual void doit();
   MetaWord* result() const       { return _result; }
+
+  bool initiate_concurrent_GC();
 };
 
 class SvcGCMarker : public StackObj {
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -561,13 +561,13 @@
 
 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
   if (HeapDumpBeforeFullGC) {
-    GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer);
+    GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create());
     // We are doing a "major" collection and a heap dump before
     // major collection has been requested.
     HeapDumper::dump_heap();
   }
   if (PrintClassHistogramBeforeFullGC) {
-    GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer);
+    GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer, GCId::create());
     VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
     inspector.doit();
   }
@@ -575,11 +575,11 @@
 
 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
   if (HeapDumpAfterFullGC) {
-    GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer);
+    GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer, GCId::create());
     HeapDumper::dump_heap();
   }
   if (PrintClassHistogramAfterFullGC) {
-    GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer);
+    GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer, GCId::create());
     VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
     inspector.doit();
   }
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -351,6 +351,12 @@
     fill_with_object(start, pointer_delta(end, start), zap);
   }
 
+  // Return the address "addr" aligned by "alignment_in_bytes" if such
+  // an address is below "end".  Return NULL otherwise.
+  inline static HeapWord* align_allocation_or_fail(HeapWord* addr,
+                                                   HeapWord* end,
+                                                   unsigned short alignment_in_bytes);
+
   // Some heaps may offer a contiguous region for shared non-blocking
   // allocation, via inlined code (by exporting the address of the top and
   // end fields defining the extent of the contiguous allocation region.)
@@ -389,15 +395,6 @@
   // allocation from them and necessitating allocation of new TLABs.
   virtual void ensure_parsability(bool retire_tlabs);
 
-  // Return an estimate of the maximum allocation that could be performed
-  // without triggering any collection or expansion activity.  In a
-  // generational collector, for example, this is probably the largest
-  // allocation that could be supported (without expansion) in the youngest
-  // generation.  It is "unsafe" because no locks are taken; the result
-  // should be treated as an approximation, not a guarantee, for use in
-  // heuristic resizing decisions.
-  virtual size_t unsafe_max_alloc() = 0;
-
   // Section on thread-local allocation buffers (TLABs)
   // If the heap supports thread-local allocation buffers, it should override
   // the following methods:
@@ -640,6 +637,18 @@
   // actual number may be germane.
   static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
 
+  // Copy the current allocation context statistics for the specified contexts.
+  // For each context in contexts, set the corresponding entries in the totals
+  // and accuracy arrays to the current values held by the statistics.  Each
+  // array should be of length len.
+  // Returns true if there are more stats available.
+  virtual bool copy_allocation_context_stats(const jint* contexts,
+                                             jlong* totals,
+                                             jbyte* accuracy,
+                                             jint len) {
+    return false;
+  }
+
   /////////////// Unit tests ///////////////
 
   NOT_PRODUCT(static void test_is_in();)
--- a/src/share/vm/gc_interface/collectedHeap.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -241,6 +241,44 @@
   oop_iterate(&no_header_cl);
 }
 
+
+inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
+                                                         HeapWord* end,
+                                                         unsigned short alignment_in_bytes) {
+  if (alignment_in_bytes <= ObjectAlignmentInBytes) {
+    return addr;
+  }
+
+  assert(is_ptr_aligned(addr, HeapWordSize),
+    err_msg("Address " PTR_FORMAT " is not properly aligned.", p2i(addr)));
+  assert(is_size_aligned(alignment_in_bytes, HeapWordSize),
+    err_msg("Alignment size %u is incorrect.", alignment_in_bytes));
+
+  HeapWord* new_addr = (HeapWord*) align_pointer_up(addr, alignment_in_bytes);
+  size_t padding = pointer_delta(new_addr, addr);
+
+  if (padding == 0) {
+    return addr;
+  }
+
+  if (padding < CollectedHeap::min_fill_size()) {
+    padding += alignment_in_bytes / HeapWordSize;
+    assert(padding >= CollectedHeap::min_fill_size(),
+      err_msg("alignment_in_bytes %u is expect to be larger "
+      "than the minimum object size", alignment_in_bytes));
+    new_addr = addr + padding;
+  }
+
+  assert(new_addr > addr, err_msg("Unexpected arithmetic overflow "
+    PTR_FORMAT " not greater than " PTR_FORMAT, p2i(new_addr), p2i(addr)));
+  if(new_addr < end) {
+    CollectedHeap::fill_with_object(addr, padding);
+    return new_addr;
+  } else {
+    return NULL;
+  }
+}
+
 #ifndef PRODUCT
 
 inline bool
--- a/src/share/vm/gc_interface/gcCause.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_interface/gcCause.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -51,6 +51,13 @@
     case _heap_dump:
       return "Heap Dump Initiated GC";
 
+    case _wb_young_gc:
+      return "WhiteBox Initiated Young GC";
+
+    case _update_allocation_context_stats_inc:
+    case _update_allocation_context_stats_full:
+      return "Update Allocation Context Stats";
+
     case _no_gc:
       return "No GC";
 
--- a/src/share/vm/gc_interface/gcCause.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/gc_interface/gcCause.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -46,6 +46,9 @@
     _gc_locker,
     _heap_inspection,
     _heap_dump,
+    _wb_young_gc,
+    _update_allocation_context_stats_inc,
+    _update_allocation_context_stats_full,
 
     /* implementation independent, but reserved for GC use */
     _no_gc,
--- a/src/share/vm/graal/graalCodeInstaller.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/graal/graalCodeInstaller.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -107,7 +107,7 @@
 
 public:
 
-  CodeInstaller() {}
+  CodeInstaller() : _arena(mtCompiler) {}
   GraalEnv::CodeInstallResult install(Handle& compiled_code, CodeBlob*& cb, Handle installed_code, Handle speculation_log);
 
   static address runtime_call_target_address(oop runtime_call);
--- a/src/share/vm/graal/graalCompilerToVM.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/graal/graalCompilerToVM.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -720,14 +720,8 @@
   return klass;
 C2V_END
 
-C2V_VMENTRY(jobject, readUnsafeOop, (JNIEnv*, jobject, jobject base, jlong displacement, jboolean compressed))
-  address addr = (address)JNIHandles::resolve(base);
-  oop ret;
-  if (compressed) {
-    ret = oopDesc::load_decode_heap_oop((narrowOop*)(addr + displacement));
-  } else {
-    ret = oopDesc::load_decode_heap_oop((oop*)(addr + displacement));
-  }
+C2V_VMENTRY(jobject, readUncompressedOop, (JNIEnv*, jobject, jlong addr))
+  oop ret = oopDesc::load_decode_heap_oop((oop*)(address)addr);
   return JNIHandles::make_local(ret);
 C2V_END
 
@@ -837,9 +831,7 @@
             bool reallocated = false;
             if (objects != NULL) {
               reallocated = Deoptimization::realloc_objects(thread, fst.current(), objects, THREAD);
-              if (reallocated) {
-                Deoptimization::reassign_fields(fst.current(), fst.register_map(), objects);
-              }
+              Deoptimization::reassign_fields(fst.current(), fst.register_map(), objects, reallocated);
 
               GrowableArray<ScopeValue*>* local_values = cvf->scope()->locals();
               typeArrayHandle array = oopFactory::new_boolArray(local_values->length(), thread);
@@ -984,38 +976,36 @@
   }
 
   bool reallocated = Deoptimization::realloc_objects(thread, fst.current(), objects, THREAD);
-  if (reallocated) {
-    Deoptimization::reassign_fields(fst.current(), fst.register_map(), objects);
+  Deoptimization::reassign_fields(fst.current(), fst.register_map(), objects, reallocated);
 
-    for (int frame_index = 0; frame_index < virtualFrames->length(); frame_index++) {
-      compiledVFrame* cvf = virtualFrames->at(frame_index);
+  for (int frame_index = 0; frame_index < virtualFrames->length(); frame_index++) {
+    compiledVFrame* cvf = virtualFrames->at(frame_index);
 
-      GrowableArray<ScopeValue*>* scopeLocals = cvf->scope()->locals();
-      StackValueCollection* locals = cvf->locals();
+    GrowableArray<ScopeValue*>* scopeLocals = cvf->scope()->locals();
+    StackValueCollection* locals = cvf->locals();
 
-      if (locals != NULL) {
-        for (int i2 = 0; i2 < locals->size(); i2++) {
-          StackValue* var = locals->at(i2);
-          if (var->type() == T_OBJECT && scopeLocals->at(i2)->is_object()) {
-            jvalue val;
-            val.l = (jobject) locals->at(i2)->get_obj()();
-            cvf->update_local(T_OBJECT, i2, val);
-          }
+    if (locals != NULL) {
+      for (int i2 = 0; i2 < locals->size(); i2++) {
+        StackValue* var = locals->at(i2);
+        if (var->type() == T_OBJECT && scopeLocals->at(i2)->is_object()) {
+          jvalue val;
+          val.l = (jobject) locals->at(i2)->get_obj()();
+          cvf->update_local(T_OBJECT, i2, val);
         }
       }
     }
+  }
 
-    // all locals are materialized by now
-    HotSpotStackFrameReference::set_localIsVirtual(hs_frame, NULL);
+  // all locals are materialized by now
+  HotSpotStackFrameReference::set_localIsVirtual(hs_frame, NULL);
 
-    // update the locals array
-    objArrayHandle array = HotSpotStackFrameReference::locals(hs_frame);
-    StackValueCollection* locals = virtualFrames->at(last_frame_number)->locals();
-    for (int i = 0; i < locals->size(); i++) {
-      StackValue* var = locals->at(i);
-      if (var->type() == T_OBJECT) {
-        array->obj_at_put(i, locals->at(i)->get_obj()());
-      }
+  // update the locals array
+  objArrayHandle array = HotSpotStackFrameReference::locals(hs_frame);
+  StackValueCollection* locals = virtualFrames->at(last_frame_number)->locals();
+  for (int i = 0; i < locals->size(); i++) {
+    StackValue* var = locals->at(i);
+    if (var->type() == T_OBJECT) {
+      array->obj_at_put(i, locals->at(i)->get_obj()());
     }
   }
 C2V_END
@@ -1098,7 +1088,7 @@
   {CC"invalidateInstalledCode",                      CC"("INSTALLED_CODE")V",                                                  FN_PTR(invalidateInstalledCode)},
   {CC"getJavaMirror",                                CC"("METASPACE_KLASS")"CLASS,                                             FN_PTR(getJavaMirror)},
   {CC"readUnsafeKlassPointer",                       CC"("OBJECT")J",                                                          FN_PTR(readUnsafeKlassPointer)},
-  {CC"readUnsafeOop",                                CC"("OBJECT"JZ)"OBJECT,                                                   FN_PTR(readUnsafeOop)},
+  {CC"readUncompressedOop",                          CC"(J)"OBJECT,                                                            FN_PTR(readUncompressedOop)},
   {CC"collectCounters",                              CC"()[J",                                                                 FN_PTR(collectCounters)},
   {CC"allocateCompileId",                            CC"("METASPACE_METHOD"I)I",                                               FN_PTR(allocateCompileId)},
   {CC"isMature",                                     CC"("METASPACE_METHOD_DATA")Z",                                           FN_PTR(isMature)},
--- a/src/share/vm/graal/graalRuntime.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/graal/graalRuntime.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -652,18 +652,8 @@
 
 // private static NativeFunctionInterfaceRuntime.createInterface()
 JVM_ENTRY(jobject, JVM_CreateNativeFunctionInterface(JNIEnv *env, jclass c))
-  const char* backendName = NULL;
-  #ifdef TARGET_ARCH_x86
-  #ifdef _LP64
-    backendName = "com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend";
-  #endif
-  #endif
-
-  if (backendName == NULL) {
-    return NULL;
-  }
   GraalRuntime::ensure_graal_class_loader_is_initialized();
-  TempNewSymbol name = SymbolTable::new_symbol(backendName, CHECK_NULL);
+  TempNewSymbol name = SymbolTable::new_symbol("com/oracle/graal/truffle/hotspot/HotSpotTruffleRuntime", CHECK_NULL);
   KlassHandle klass = GraalRuntime::resolve_or_fail(name, CHECK_NULL);
 
   TempNewSymbol makeInstance = SymbolTable::new_symbol("createNativeFunctionInterface", CHECK_NULL);
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,43 +41,10 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/threadCritical.hpp"
 #include "utilities/exceptions.hpp"
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "orderAccess_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "orderAccess_linux_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "orderAccess_linux_zero.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "orderAccess_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "orderAccess_solaris_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "orderAccess_windows_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "orderAccess_linux_arm.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "orderAccess_linux_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_aix_ppc
-# include "orderAccess_aix_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "orderAccess_bsd_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "orderAccess_bsd_zero.inline.hpp"
-#endif
-
 
 // no precompiled headers
 #ifdef CC_INTERP
@@ -2846,11 +2813,11 @@
       if (TraceExceptions) {
         ttyLocker ttyl;
         ResourceMark rm;
-        tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop());
+        tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop()));
         tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
         tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
-                      istate->bcp() - (intptr_t)METHOD->code_base(),
-                      continuation_bci, THREAD);
+                      (int)(istate->bcp() - METHOD->code_base()),
+                      (int)continuation_bci, p2i(THREAD));
       }
       // for AbortVMOnException flag
       NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
@@ -2862,11 +2829,11 @@
     if (TraceExceptions) {
       ttyLocker ttyl;
       ResourceMark rm;
-      tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop());
+      tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop()));
       tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
       tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
-                    istate->bcp() - (intptr_t)METHOD->code_base(),
-                    THREAD);
+                    (int)(istate->bcp() - METHOD->code_base()),
+                    p2i(THREAD));
     }
     // for AbortVMOnException flag
     NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
@@ -3465,7 +3432,7 @@
   tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
   tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
   tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
-  tty->print_cr("native_mirror: " INTPTR_FORMAT, (void*) this->_oop_temp);
+  tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp);
   tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
   tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
   tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
--- a/src/share/vm/interpreter/bytecodeInterpreterProfiling.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/interpreter/bytecodeInterpreterProfiling.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -86,11 +86,11 @@
                   " \t-> " PTR_FORMAT "(%d)",                                  \
                 (int) THREAD->osthread()->thread_id(),                         \
                 BCI(),                                                         \
-                MDX(),                                                         \
+                p2i(MDX()),                                                    \
                 (MDX() == NULL                                                 \
                  ? 0                                                           \
                  : istate->method()->method_data()->dp_to_di((address)MDX())), \
-                mdx,                                                           \
+                p2i(mdx),                                                      \
                 istate->method()->method_data()->dp_to_di((address)mdx)        \
                 );                                                             \
   };                                                                           \
@@ -107,7 +107,7 @@
     MethodData *md = istate->method()->method_data();                          \
     tty->cr();                                                                 \
     tty->print("method data at mdx " PTR_FORMAT "(0) for",                     \
-               md->data_layout_at(md->bci_to_di(0)));                          \
+               p2i(md->data_layout_at(md->bci_to_di(0))));                     \
     istate->method()->print_short_name(tty);                                   \
     tty->cr();                                                                 \
     if (md != NULL) {                                                          \
@@ -115,7 +115,7 @@
       address mdx = (address) MDX();                                           \
       if (mdx != NULL) {                                                       \
         tty->print_cr("current mdx " PTR_FORMAT "(%d)",                        \
-                      mdx,                                                     \
+                      p2i(mdx),                                                \
                       istate->method()->method_data()->dp_to_di(mdx));         \
       }                                                                        \
     } else {                                                                   \
--- a/src/share/vm/interpreter/bytecodes.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/interpreter/bytecodes.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -420,8 +420,10 @@
   static bool        is_astore      (Code code)    { return (code == _astore || code == _astore_0 || code == _astore_1
                                                                              || code == _astore_2 || code == _astore_3); }
 
+  static bool        is_const       (Code code)    { return (_aconst_null <= code && code <= _ldc2_w); }
   static bool        is_zero_const  (Code code)    { return (code == _aconst_null || code == _iconst_0
                                                            || code == _fconst_0 || code == _dconst_0); }
+  static bool        is_return      (Code code)    { return (_ireturn <= code && code <= _return); }
   static bool        is_invoke      (Code code)    { return (_invokevirtual <= code && code <= _invokedynamic); }
   static bool        has_receiver   (Code code)    { assert(is_invoke(code), "");  return code == _invokevirtual ||
                                                                                           code == _invokespecial ||
--- a/src/share/vm/interpreter/interpreter.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/interpreter/interpreter.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -53,7 +53,9 @@
  public:
   // Initialization/finalization
   void    initialize(int size,
-                     CodeStrings& strings)       { _size = size; DEBUG_ONLY(_strings.assign(strings);) }
+                     CodeStrings& strings)       { _size = size;
+                                                   DEBUG_ONLY(::new(&_strings) CodeStrings();)
+                                                   DEBUG_ONLY(_strings.assign(strings);) }
   void    finalize()                             { ShouldNotCallThis(); }
 
   // General info/converters
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -398,6 +398,18 @@
   int                handler_bci;
   int                current_bci = bci(thread);
 
+  if (thread->frames_to_pop_failed_realloc() > 0) {
+    // Allocation of scalar replaced object used in this frame
+    // failed. Unconditionally pop the frame.
+    thread->dec_frames_to_pop_failed_realloc();
+    thread->set_vm_result(h_exception());
+    // If the method is synchronized we already unlocked the monitor
+    // during deoptimization so the interpreter needs to skip it when
+    // the frame is popped.
+    thread->set_do_not_unlock_if_synchronized(true);
+    return Interpreter::remove_activation_entry();
+  }
+
   // Need to do this check first since when _do_not_unlock_if_synchronized
   // is set, we don't want to trigger any classloading which may make calls
   // into java, or surprisingly find a matching exception handler for bci 0
@@ -1280,8 +1292,10 @@
 // This is a support of the JVMTI PopFrame interface.
 // Make sure it is an invokestatic of a polymorphic intrinsic that has a member_name argument
 // and return it as a vm_result so that it can be reloaded in the list of invokestatic parameters.
-// The dmh argument is a reference to a DirectMethoHandle that has a member name field.
-IRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread, address dmh,
+// The member_name argument is a saved reference (in local#0) to the member_name.
+// For backward compatibility with some JDK versions (7, 8) it can also be a direct method handle.
+// FIXME: remove DMH case after j.l.i.InvokerBytecodeGenerator code shape is updated.
+IRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread, address member_name,
                                                             Method* method, address bcp))
   Bytecodes::Code code = Bytecodes::code_at(method, bcp);
   if (code != Bytecodes::_invokestatic) {
@@ -1293,8 +1307,12 @@
   Symbol* mname = cpool->name_ref_at(cp_index);
 
   if (MethodHandles::has_member_arg(cname, mname)) {
-    oop member_name = java_lang_invoke_DirectMethodHandle::member((oop)dmh);
-    thread->set_vm_result(member_name);
+    oop member_name_oop = (oop) member_name;
+    if (java_lang_invoke_DirectMethodHandle::is_instance(member_name_oop)) {
+      // FIXME: remove after j.l.i.InvokerBytecodeGenerator code shape is updated.
+      member_name_oop = java_lang_invoke_DirectMethodHandle::member(member_name_oop);
+    }
+    thread->set_vm_result(member_name_oop);
   }
 IRT_END
 #endif // INCLUDE_JVMTI
--- a/src/share/vm/interpreter/linkResolver.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/interpreter/linkResolver.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -320,7 +320,7 @@
   // First check in default method array
   if (!resolved_method->is_abstract() &&
     (InstanceKlass::cast(klass())->default_methods() != NULL)) {
-    int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(), name, signature, false);
+    int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(), name, signature, false, false);
     if (index >= 0 ) {
       vtable_index = InstanceKlass::cast(klass())->default_vtable_indices()->at(index);
     }
--- a/src/share/vm/interpreter/oopMapCache.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/interpreter/oopMapCache.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -180,7 +180,7 @@
   }
 }
 
-bool InterpreterOopMap::is_empty() {
+bool InterpreterOopMap::is_empty() const {
   bool result = _method == NULL;
   assert(_method != NULL || (_bci == 0 &&
     (_mask_size == 0 || _mask_size == USHRT_MAX) &&
@@ -196,7 +196,7 @@
   for (int i = 0; i < N; i++) _bit_mask[i] = 0;
 }
 
-void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) {
+void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) const {
   int n = number_of_entries();
   int word_index = 0;
   uintptr_t value = 0;
@@ -238,7 +238,7 @@
 #endif
 
 
-void InterpreterOopMap::print() {
+void InterpreterOopMap::print() const {
   int n = number_of_entries();
   tty->print("oop map for ");
   method()->print_value();
@@ -469,7 +469,7 @@
   }
 }
 
-inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) {
+inline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) const {
   // We use method->code_size() rather than method->identity_hash() below since
   // the mark may not be present if a pointer to the method is already reversed.
   return   ((unsigned int) bci)
@@ -522,7 +522,7 @@
 
 void OopMapCache::lookup(methodHandle method,
                          int bci,
-                         InterpreterOopMap* entry_for) {
+                         InterpreterOopMap* entry_for) const {
   MutexLocker x(&_mut);
 
   OopMapCacheEntry* entry = NULL;
--- a/src/share/vm/interpreter/oopMapCache.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/interpreter/oopMapCache.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -101,32 +101,31 @@
 
   // access methods
   Method*        method() const                  { return _method; }
-  void           set_method(Method* v)         { _method = v; }
+  void           set_method(Method* v)           { _method = v; }
   int            bci() const                     { return _bci; }
   void           set_bci(int v)                  { _bci = v; }
   int            mask_size() const               { return _mask_size; }
   void           set_mask_size(int v)            { _mask_size = v; }
-  int            number_of_entries() const       { return mask_size() / bits_per_entry; }
   // Test bit mask size and return either the in-line bit mask or allocated
   // bit mask.
-  uintptr_t*  bit_mask()                         { return (uintptr_t*)(mask_size() <= small_mask_limit ? (intptr_t)_bit_mask : _bit_mask[0]); }
+  uintptr_t*  bit_mask() const                   { return (uintptr_t*)(mask_size() <= small_mask_limit ? (intptr_t)_bit_mask : _bit_mask[0]); }
 
   // return the word size of_bit_mask.  mask_size() <= 4 * MAX_USHORT
-  size_t mask_word_size() {
+  size_t mask_word_size() const {
     return (mask_size() + BitsPerWord - 1) / BitsPerWord;
   }
 
-  uintptr_t entry_at(int offset)            { int i = offset * bits_per_entry; return bit_mask()[i / BitsPerWord] >> (i % BitsPerWord); }
+  uintptr_t entry_at(int offset) const           { int i = offset * bits_per_entry; return bit_mask()[i / BitsPerWord] >> (i % BitsPerWord); }
 
-  void set_expression_stack_size(int sz)    { _expression_stack_size = sz; }
+  void set_expression_stack_size(int sz)         { _expression_stack_size = sz; }
 
 #ifdef ENABLE_ZAP_DEAD_LOCALS
-  bool is_dead(int offset)                       { return (entry_at(offset) & (1 << dead_bit_number)) != 0; }
+  bool is_dead(int offset) const                 { return (entry_at(offset) & (1 << dead_bit_number)) != 0; }
 #endif
 
   // Lookup
-  bool match(methodHandle method, int bci)       { return _method == method() && _bci == bci; }
-  bool is_empty();
+  bool match(methodHandle method, int bci) const { return _method == method() && _bci == bci; }
+  bool is_empty() const;
 
   // Initialization
   void initialize();
@@ -141,12 +140,13 @@
   // in-line), allocate the space from a Resource area.
   void resource_copy(OopMapCacheEntry* from);
 
-  void iterate_oop(OffsetClosure* oop_closure);
-  void print();
+  void iterate_oop(OffsetClosure* oop_closure) const;
+  void print() const;
 
-  bool is_oop  (int offset)                      { return (entry_at(offset) & (1 << oop_bit_number )) != 0; }
+  int number_of_entries() const                  { return mask_size() / bits_per_entry; }
+  bool is_oop (int offset) const                 { return (entry_at(offset) & (1 << oop_bit_number )) != 0; }
 
-  int expression_stack_size()                    { return _expression_stack_size; }
+  int expression_stack_size() const              { return _expression_stack_size; }
 
 #ifdef ENABLE_ZAP_DEAD_LOCALS
   void iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure);
@@ -161,10 +161,10 @@
 
   OopMapCacheEntry* _array;
 
-  unsigned int hash_value_for(methodHandle method, int bci);
+  unsigned int hash_value_for(methodHandle method, int bci) const;
   OopMapCacheEntry* entry_at(int i) const;
 
-  Mutex _mut;
+  mutable Mutex _mut;
 
   void flush();
 
@@ -177,7 +177,7 @@
 
   // Returns the oopMap for (method, bci) in parameter "entry".
   // Returns false if an oop map was not found.
-  void lookup(methodHandle method, int bci, InterpreterOopMap* entry);
+  void lookup(methodHandle method, int bci, InterpreterOopMap* entry) const;
 
   // Compute an oop map without updating the cache or grabbing any locks (for debugging)
   static void compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry);
--- a/src/share/vm/memory/allocation.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/allocation.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -438,24 +438,22 @@
 }
 
 //------------------------------Arena------------------------------------------
-NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
-
-Arena::Arena(size_t init_size) {
+Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0)  {
   size_t round_size = (sizeof (char *)) - 1;
   init_size = (init_size+round_size) & ~round_size;
   _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
   _hwm = _chunk->bottom();      // Save the cached hwm, max
   _max = _chunk->top();
+  MemTracker::record_new_arena(flag);
   set_size_in_bytes(init_size);
-  NOT_PRODUCT(Atomic::inc(&_instance_count);)
 }
 
-Arena::Arena() {
+Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
   _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
   _hwm = _chunk->bottom();      // Save the cached hwm, max
   _max = _chunk->top();
+  MemTracker::record_new_arena(flag);
   set_size_in_bytes(Chunk::init_size);
-  NOT_PRODUCT(Atomic::inc(&_instance_count);)
 }
 
 Arena *Arena::move_contents(Arena *copy) {
@@ -477,7 +475,7 @@
 
 Arena::~Arena() {
   destruct_contents();
-  NOT_PRODUCT(Atomic::dec(&_instance_count);)
+  MemTracker::record_arena_free(_flags);
 }
 
 void* Arena::operator new(size_t size) throw() {
@@ -493,21 +491,21 @@
   // dynamic memory type binding
 void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
 #ifdef ASSERT
-  void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
+  void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
   if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
   return p;
 #else
-  return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
+  return (void *) AllocateHeap(size, flags, CALLER_PC);
 #endif
 }
 
 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
 #ifdef ASSERT
-  void* p = os::malloc(size, flags|otArena, CALLER_PC);
+  void* p = os::malloc(size, flags, CALLER_PC);
   if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
   return p;
 #else
-  return os::malloc(size, flags|otArena, CALLER_PC);
+  return os::malloc(size, flags, CALLER_PC);
 #endif
 }
 
@@ -532,8 +530,9 @@
 // change the size
 void Arena::set_size_in_bytes(size_t size) {
   if (_size_in_bytes != size) {
+    long delta = (long)(size - size_in_bytes());
     _size_in_bytes = size;
-    MemTracker::record_arena_size((address)this, size);
+    MemTracker::record_arena_size_change(delta, _flags);
   }
 }
 
--- a/src/share/vm/memory/allocation.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/allocation.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -133,51 +133,34 @@
 
 
 /*
- * MemoryType bitmap layout:
- * | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 |
- * |      memory type        |   object    | reserved    |
- * |                         |     type    |             |
+ * Memory types
  */
 enum MemoryType {
   // Memory type by sub systems. It occupies lower byte.
-  mtNone              = 0x0000,  // undefined
-  mtClass             = 0x0100,  // memory class for Java classes
-  mtThread            = 0x0200,  // memory for thread objects
-  mtThreadStack       = 0x0300,
-  mtCode              = 0x0400,  // memory for generated code
-  mtGC                = 0x0500,  // memory for GC
-  mtCompiler          = 0x0600,  // memory for compiler
-  mtInternal          = 0x0700,  // memory used by VM, but does not belong to
+  mtJavaHeap          = 0x00,  // Java heap
+  mtClass             = 0x01,  // memory class for Java classes
+  mtThread            = 0x02,  // memory for thread objects
+  mtThreadStack       = 0x03,
+  mtCode              = 0x04,  // memory for generated code
+  mtGC                = 0x05,  // memory for GC
+  mtCompiler          = 0x06,  // memory for compiler
+  mtInternal          = 0x07,  // memory used by VM, but does not belong to
                                  // any of above categories, and not used for
                                  // native memory tracking
-  mtOther             = 0x0800,  // memory not used by VM
-  mtSymbol            = 0x0900,  // symbol
-  mtNMT               = 0x0A00,  // memory used by native memory tracking
-  mtChunk             = 0x0B00,  // chunk that holds content of arenas
-  mtJavaHeap          = 0x0C00,  // Java heap
-  mtClassShared       = 0x0D00,  // class data sharing
-  mtTest              = 0x0E00,  // Test type for verifying NMT
-  mtTracing           = 0x0F00,  // memory used for Tracing
-  mt_number_of_types  = 0x000F,  // number of memory types (mtDontTrack
+  mtOther             = 0x08,  // memory not used by VM
+  mtSymbol            = 0x09,  // symbol
+  mtNMT               = 0x0A,  // memory used by native memory tracking
+  mtClassShared       = 0x0B,  // class data sharing
+  mtChunk             = 0x0C,  // chunk that holds content of arenas
+  mtTest              = 0x0D,  // Test type for verifying NMT
+  mtTracing           = 0x0E,  // memory used for Tracing
+  mtNone              = 0x0F,  // undefined
+  mt_number_of_types  = 0x10   // number of memory types (mtDontTrack
                                  // is not included as validate type)
-  mtDontTrack         = 0x0F00,  // memory we do not or cannot track
-  mt_masks            = 0x7F00,
-
-  // object type mask
-  otArena             = 0x0010, // an arena object
-  otNMTRecorder       = 0x0020, // memory recorder object
-  ot_masks            = 0x00F0
 };
 
-#define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type)
-#define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone)
-#define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks)
+typedef MemoryType MEMFLAGS;
 
-#define IS_ARENA_OBJ(flags)         ((flags & ot_masks) == otArena)
-#define IS_NMT_RECORDER(flags)      ((flags & ot_masks) == otNMTRecorder)
-#define NMT_CAN_TRACK(flags)        (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack)))
-
-typedef unsigned short MEMFLAGS;
 
 #if INCLUDE_NMT
 
@@ -189,27 +172,23 @@
 
 #endif // INCLUDE_NMT
 
-// debug build does not inline
-#if defined(_NMT_NOINLINE_)
-  #define CURRENT_PC       (NMT_track_callsite ? os::get_caller_pc(1) : 0)
-  #define CALLER_PC        (NMT_track_callsite ? os::get_caller_pc(2) : 0)
-  #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0)
-#else
-  #define CURRENT_PC      (NMT_track_callsite? os::get_caller_pc(0) : 0)
-  #define CALLER_PC       (NMT_track_callsite ? os::get_caller_pc(1) : 0)
-  #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
-#endif
-
+class NativeCallStack;
 
 
 template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
  public:
-  _NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw();
+  _NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw();
+  _NOINLINE_ void* operator new(size_t size) throw();
   _NOINLINE_ void* operator new (size_t size, const std::nothrow_t&  nothrow_constant,
-                               address caller_pc = 0) throw();
-  _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw();
+                               const NativeCallStack& stack) throw();
+  _NOINLINE_ void* operator new (size_t size, const std::nothrow_t&  nothrow_constant)
+                               throw();
+  _NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw();
+  _NOINLINE_ void* operator new [](size_t size) throw();
   _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
-                               address caller_pc = 0) throw();
+                               const NativeCallStack& stack) throw();
+  _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant)
+                               throw();
   void  operator delete(void* p);
   void  operator delete [] (void* p);
 };
@@ -286,7 +265,8 @@
   f(ConstantPool) \
   f(ConstantPoolCache) \
   f(Annotation) \
-  f(MethodCounters)
+  f(MethodCounters) \
+  f(Deallocated)
 
 #define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
 #define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
@@ -384,13 +364,15 @@
 
 //------------------------------Arena------------------------------------------
 // Fast allocation of memory
-class Arena : public CHeapObj<mtNone|otArena> {
+class Arena : public CHeapObj<mtNone> {
 protected:
   friend class ResourceMark;
   friend class HandleMark;
   friend class NoHandleMark;
   friend class VMStructs;
 
+  MEMFLAGS    _flags;           // Memory tracking flags
+
   Chunk *_first;                // First chunk
   Chunk *_chunk;                // current chunk
   char *_hwm, *_max;            // High water mark and max in current chunk
@@ -418,8 +400,8 @@
  }
 
  public:
-  Arena();
-  Arena(size_t init_size);
+  Arena(MEMFLAGS memflag);
+  Arena(MEMFLAGS memflag, size_t init_size);
   ~Arena();
   void  destruct_contents();
   char* hwm() const             { return _hwm; }
@@ -518,8 +500,6 @@
   static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2)  PRODUCT_RETURN;
   static void free_all(char** start, char** end)                                     PRODUCT_RETURN;
 
-  // how many arena instances
-  NOT_PRODUCT(static volatile jint _instance_count;)
 private:
   // Reset this Arena to empty, access will trigger grow if necessary
   void   reset(void) {
@@ -681,7 +661,7 @@
   NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
 
 #define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
-  NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL)
+  NEW_C_HEAP_ARRAY3(type, (size), memflags, CURRENT_PC, AllocFailStrategy::RETURN_NULL)
 
 #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
   (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
--- a/src/share/vm/memory/allocation.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/allocation.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 
 #include "runtime/atomic.inline.hpp"
 #include "runtime/os.hpp"
+#include "services/memTracker.hpp"
 
 // Explicit C-heap memory management
 
@@ -49,12 +50,10 @@
 #endif
 
 // allocate using malloc; will fail if no memory available
-inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
+inline char* AllocateHeap(size_t size, MEMFLAGS flags,
+    const NativeCallStack& stack,
     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
-  if (pc == 0) {
-    pc = CURRENT_PC;
-  }
-  char* p = (char*) os::malloc(size, flags, pc);
+  char* p = (char*) os::malloc(size, flags, stack);
   #ifdef ASSERT
   if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
   #endif
@@ -63,10 +62,14 @@
   }
   return p;
 }
+inline char* AllocateHeap(size_t size, MEMFLAGS flags,
+    AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
+  return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
+}
 
-inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags,
+inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
-  char* p = (char*) os::realloc(old, size, flags, CURRENT_PC);
+  char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
   #ifdef ASSERT
   if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
   #endif
@@ -85,8 +88,22 @@
 
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
-      address caller_pc) throw() {
-    void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
+      const NativeCallStack& stack) throw() {
+  void* p = (void*)AllocateHeap(size, F, stack);
+#ifdef ASSERT
+  if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
+#endif
+  return p;
+}
+
+template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() {
+  return CHeapObj<F>::operator new(size, CALLER_PC);
+}
+
+template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
+  const std::nothrow_t&  nothrow_constant, const NativeCallStack& stack) throw() {
+  void* p = (void*)AllocateHeap(size, F, stack,
+      AllocFailStrategy::RETURN_NULL);
 #ifdef ASSERT
     if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
 #endif
@@ -94,23 +111,28 @@
   }
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
-  const std::nothrow_t&  nothrow_constant, address caller_pc) throw() {
-  void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
-      AllocFailStrategy::RETURN_NULL);
-#ifdef ASSERT
-    if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
-#endif
-    return p;
+  const std::nothrow_t& nothrow_constant) throw() {
+  return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
 }
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
-      address caller_pc) throw() {
-    return CHeapObj<F>::operator new(size, caller_pc);
+      const NativeCallStack& stack) throw() {
+  return CHeapObj<F>::operator new(size, stack);
+}
+
+template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size)
+  throw() {
+  return CHeapObj<F>::operator new(size, CALLER_PC);
 }
 
 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
-  const std::nothrow_t&  nothrow_constant, address caller_pc) throw() {
-    return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
+  const std::nothrow_t&  nothrow_constant, const NativeCallStack& stack) throw() {
+  return CHeapObj<F>::operator new(size, nothrow_constant, stack);
+}
+
+template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
+  const std::nothrow_t& nothrow_constant) throw() {
+  return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
 }
 
 template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
--- a/src/share/vm/memory/cardTableModRefBS.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/cardTableModRefBS.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -44,13 +44,6 @@
 // enumerate ref fields that have been modified (since the last
 // enumeration.)
 
-size_t CardTableModRefBS::cards_required(size_t covered_words)
-{
-  // Add one for a guard card, used to detect errors.
-  const size_t words = align_size_up(covered_words, card_size_in_words);
-  return words / card_size_in_words + 1;
-}
-
 size_t CardTableModRefBS::compute_byte_map_size()
 {
   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
@@ -64,27 +57,50 @@
                                      int max_covered_regions):
   ModRefBarrierSet(max_covered_regions),
   _whole_heap(whole_heap),
-  _guard_index(cards_required(whole_heap.word_size()) - 1),
-  _last_valid_index(_guard_index - 1),
+  _guard_index(0),
+  _guard_region(),
+  _last_valid_index(0),
   _page_size(os::vm_page_size()),
-  _byte_map_size(compute_byte_map_size())
+  _byte_map_size(0),
+  _covered(NULL),
+  _committed(NULL),
+  _cur_covered_regions(0),
+  _byte_map(NULL),
+  byte_map_base(NULL),
+  // LNC functionality
+  _lowest_non_clean(NULL),
+  _lowest_non_clean_chunk_size(NULL),
+  _lowest_non_clean_base_chunk_index(NULL),
+  _last_LNC_resizing_collection(NULL)
 {
   _kind = BarrierSet::CardTableModRef;
 
+  assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
+  assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
+
+  assert(card_size <= 512, "card_size must be less than 512"); // why?
+
+  _covered   = new MemRegion[_max_covered_regions];
+  if (_covered == NULL) {
+    vm_exit_during_initialization("Could not allocate card table covered region set.");
+  }
+}
+
+void CardTableModRefBS::initialize() {
+  _guard_index = cards_required(_whole_heap.word_size()) - 1;
+  _last_valid_index = _guard_index - 1;
+
+  _byte_map_size = compute_byte_map_size();
+
   HeapWord* low_bound  = _whole_heap.start();
   HeapWord* high_bound = _whole_heap.end();
-  assert((uintptr_t(low_bound)  & (card_size - 1))  == 0, "heap must start at card boundary");
-  assert((uintptr_t(high_bound) & (card_size - 1))  == 0, "heap must end at card boundary");
 
-  assert(card_size <= 512, "card_size must be less than 512"); // why?
-
-  _covered   = new MemRegion[max_covered_regions];
-  _committed = new MemRegion[max_covered_regions];
-  if (_covered == NULL || _committed == NULL) {
-    vm_exit_during_initialization("couldn't alloc card table covered region set.");
+  _cur_covered_regions = 0;
+  _committed = new MemRegion[_max_covered_regions];
+  if (_committed == NULL) {
+    vm_exit_during_initialization("Could not allocate card table committed region set.");
   }
 
-  _cur_covered_regions = 0;
   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
   ReservedSpace heap_rs(_byte_map_size, rs_align, false);
@@ -114,20 +130,20 @@
                             !ExecMem, "card table last card");
   *guard_card = last_card;
 
-   _lowest_non_clean =
-    NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
+  _lowest_non_clean =
+    NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
   _lowest_non_clean_chunk_size =
-    NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
+    NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
   _lowest_non_clean_base_chunk_index =
-    NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
+    NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
   _last_LNC_resizing_collection =
-    NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
+    NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
   if (_lowest_non_clean == NULL
       || _lowest_non_clean_chunk_size == NULL
       || _lowest_non_clean_base_chunk_index == NULL
       || _last_LNC_resizing_collection == NULL)
     vm_exit_during_initialization("couldn't allocate an LNC array.");
-  for (int i = 0; i < max_covered_regions; i++) {
+  for (int i = 0; i < _max_covered_regions; i++) {
     _lowest_non_clean[i] = NULL;
     _lowest_non_clean_chunk_size[i] = 0;
     _last_LNC_resizing_collection[i] = -1;
@@ -429,7 +445,7 @@
                                                                  OopsInGenClosure* cl,
                                                                  CardTableRS* ct) {
   if (!mr.is_empty()) {
-    // Caller (process_strong_roots()) claims that all GC threads
+    // Caller (process_roots()) claims that all GC threads
     // execute this call.  With UseDynamicNumberOfGCThreads now all
     // active GC threads execute this call.  The number of active GC
     // threads needs to be passed to par_non_clean_card_iterate_work()
@@ -438,7 +454,7 @@
     // This is an example of where n_par_threads() is used instead
     // of workers()->active_workers().  n_par_threads can be set to 0 to
     // turn off parallelism.  For example when this code is called as
-    // part of verification and SharedHeap::process_strong_roots() is being
+    // part of verification and SharedHeap::process_roots() is being
     // used, then n_par_threads() may have been set to 0.  active_workers
     // is not overloaded with the meaning that it is a switch to disable
     // parallelism and so keeps the meaning of the number of
@@ -650,7 +666,7 @@
                                       jbyte val, bool val_equals) {
   jbyte* start    = byte_for(mr.start());
   jbyte* end      = byte_for(mr.last());
-  bool   failures = false;
+  bool failures = false;
   for (jbyte* curr = start; curr <= end; ++curr) {
     jbyte curr_val = *curr;
     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
--- a/src/share/vm/memory/cardTableModRefBS.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/cardTableModRefBS.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -96,12 +96,12 @@
   // The declaration order of these const fields is important; see the
   // constructor before changing.
   const MemRegion _whole_heap;       // the region covered by the card table
-  const size_t    _guard_index;      // index of very last element in the card
+  size_t          _guard_index;      // index of very last element in the card
                                      // table; it is set to a guard value
                                      // (last_card) and should never be modified
-  const size_t    _last_valid_index; // index of the last valid element
+  size_t          _last_valid_index; // index of the last valid element
   const size_t    _page_size;        // page size used when mapping _byte_map
-  const size_t    _byte_map_size;    // in bytes
+  size_t          _byte_map_size;    // in bytes
   jbyte*          _byte_map;         // the card marking array
 
   int _cur_covered_regions;
@@ -123,7 +123,12 @@
  protected:
   // Initialization utilities; covered_words is the size of the covered region
   // in, um, words.
-  inline size_t cards_required(size_t covered_words);
+  inline size_t cards_required(size_t covered_words) {
+    // Add one for a guard card, used to detect errors.
+    const size_t words = align_size_up(covered_words, card_size_in_words);
+    return words / card_size_in_words + 1;
+  }
+
   inline size_t compute_byte_map_size();
 
   // Finds and return the index of the region, if any, to which the given
@@ -137,7 +142,7 @@
   int find_covering_region_containing(HeapWord* addr);
 
   // Resize one of the regions covered by the remembered set.
-  void resize_covered_region(MemRegion new_region);
+  virtual void resize_covered_region(MemRegion new_region);
 
   // Returns the leftmost end of a committed region corresponding to a
   // covered region before covered region "ind", or else "NULL" if "ind" is
@@ -282,6 +287,8 @@
   CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
   ~CardTableModRefBS();
 
+  virtual void initialize();
+
   // *** Barrier set functions.
 
   bool has_write_ref_pre_barrier() { return false; }
--- a/src/share/vm/memory/cardTableRS.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/cardTableRS.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -53,9 +53,10 @@
 #else
   _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
 #endif
+  _ct_bs->initialize();
   set_bs(_ct_bs);
   _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
-                         mtGC, 0, AllocFailStrategy::RETURN_NULL);
+                         mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
   if (_last_cur_val_in_gen == NULL) {
     vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
   }
--- a/src/share/vm/memory/collectorPolicy.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/collectorPolicy.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -183,13 +183,9 @@
   // Requirements of any new remembered set implementations must be added here.
   size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
 
-  // Parallel GC does its own alignment of the generations to avoid requiring a
-  // large page (256M on some platforms) for the permanent generation.  The
-  // other collectors should also be updated to do their own alignment and then
-  // this use of lcm() should be removed.
-  if (UseLargePages && !UseParallelGC) {
-      // in presence of large pages we have to make sure that our
-      // alignment is large page aware
+  if (UseLargePages) {
+      // In presence of large pages we have to make sure that our
+      // alignment is large page aware.
       alignment = lcm(os::large_page_size(), alignment);
   }
 
@@ -969,7 +965,8 @@
 }
 
 void MarkSweepPolicy::initialize_generations() {
-  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
+  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC,
+    AllocFailStrategy::RETURN_NULL);
   if (_generations == NULL) {
     vm_exit_during_initialization("Unable to allocate gen spec");
   }
--- a/src/share/vm/memory/defNewGeneration.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/defNewGeneration.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -42,6 +42,7 @@
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
+#include "runtime/prefetch.inline.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/stack.inline.hpp"
@@ -584,7 +585,7 @@
 
   init_assuming_no_promotion_failure();
 
-  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
+  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
   // Capture heap used before collection (for printing).
   size_t gch_prev_used = gch->used();
 
@@ -612,6 +613,9 @@
 
   KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
                                       gch->rem_set()->klass_rem_set());
+  CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
+                                           &fsc_with_no_gc_barrier,
+                                           false);
 
   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
   FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
@@ -621,18 +625,15 @@
   assert(gch->no_allocs_since_save_marks(0),
          "save marks have not been newly set.");
 
-  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
-
-  gch->gen_process_strong_roots(_level,
-                                true,  // Process younger gens, if any,
-                                       // as strong roots.
-                                true,  // activate StrongRootsScope
-                                true,  // is scavenging
-                                SharedHeap::ScanningOption(so),
-                                &fsc_with_no_gc_barrier,
-                                true,   // walk *all* scavengable nmethods
-                                &fsc_with_gc_barrier,
-                                &klass_scan_closure);
+  gch->gen_process_roots(_level,
+                         true,  // Process younger gens, if any,
+                                // as strong roots.
+                         true,  // activate StrongRootsScope
+                         SharedHeap::SO_ScavengeCodeCache,
+                         GenCollectedHeap::StrongAndWeakRoots,
+                         &fsc_with_no_gc_barrier,
+                         &fsc_with_gc_barrier,
+                         &cld_scan_closure);
 
   // "evacuate followers".
   evacuate_followers.do_void();
@@ -642,7 +643,7 @@
   rp->setup_policy(clear_all_soft_refs);
   const ReferenceProcessorStats& stats =
   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
-                                    NULL, _gc_timer);
+                                    NULL, _gc_timer, gc_tracer.gc_id());
   gc_tracer.report_gc_reference_stats(stats);
 
   if (!_promotion_failed) {
@@ -788,7 +789,7 @@
 
   // Try allocating obj in to-space (unless too old)
   if (old->age() < tenuring_threshold()) {
-    obj = (oop) to()->allocate(s);
+    obj = (oop) to()->allocate_aligned(s);
   }
 
   // Otherwise try allocating obj tenured
--- a/src/share/vm/memory/filemap.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/filemap.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -24,9 +24,14 @@
 
 #include "precompiled.hpp"
 #include "classfile/classLoader.hpp"
+#include "classfile/sharedClassUtil.hpp"
 #include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionaryShared.hpp"
 #include "classfile/altHashing.hpp"
 #include "memory/filemap.hpp"
+#include "memory/metadataFactory.hpp"
+#include "memory/oopFactory.hpp"
+#include "oops/objArrayOop.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
@@ -41,7 +46,6 @@
 #endif
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
-
 extern address JVM_FunctionAtStart();
 extern address JVM_FunctionAtEnd();
 
@@ -77,12 +81,27 @@
 void FileMapInfo::fail_continue(const char *msg, ...) {
   va_list ap;
   va_start(ap, msg);
-  if (RequireSharedSpaces) {
-    fail(msg, ap);
+  MetaspaceShared::set_archive_loading_failed();
+  if (PrintSharedArchiveAndExit && _validating_classpath_entry_table) {
+    // If we are doing PrintSharedArchiveAndExit and some of the classpath entries
+    // do not validate, we can still continue "limping" to validate the remaining
+    // entries. No need to quit.
+    tty->print("[");
+    tty->vprint(msg, ap);
+    tty->print_cr("]");
+  } else {
+    if (RequireSharedSpaces) {
+      fail(msg, ap);
+    } else {
+      if (PrintSharedSpaces) {
+        tty->print_cr("UseSharedSpaces: %s", msg);
+      }
+    }
+    UseSharedSpaces = false;
+    assert(current_info() != NULL, "singleton must be registered");
+    current_info()->close();
   }
   va_end(ap);
-  UseSharedSpaces = false;
-  close();
 }
 
 // Fill in the fileMapInfo structure with data about this VM instance.
@@ -117,74 +136,208 @@
   }
 }
 
+FileMapInfo::FileMapInfo() {
+  assert(_current_info == NULL, "must be singleton"); // not thread safe
+  _current_info = this;
+  memset(this, 0, sizeof(FileMapInfo));
+  _file_offset = 0;
+  _file_open = false;
+  _header = SharedClassUtil::allocate_file_map_header();
+  _header->_version = _invalid_version;
+}
+
+FileMapInfo::~FileMapInfo() {
+  assert(_current_info == this, "must be singleton"); // not thread safe
+  _current_info = NULL;
+}
+
 void FileMapInfo::populate_header(size_t alignment) {
-  _header._magic = 0xf00baba2;
-  _header._version = _current_version;
-  _header._alignment = alignment;
-  _header._obj_alignment = ObjectAlignmentInBytes;
+  _header->populate(this, alignment);
+}
+
+size_t FileMapInfo::FileMapHeader::data_size() {
+  return SharedClassUtil::file_map_header_size() - sizeof(FileMapInfo::FileMapHeaderBase);
+}
+
+void FileMapInfo::FileMapHeader::populate(FileMapInfo* mapinfo, size_t alignment) {
+  _magic = 0xf00baba2;
+  _version = _current_version;
+  _alignment = alignment;
+  _obj_alignment = ObjectAlignmentInBytes;
+  _classpath_entry_table_size = mapinfo->_classpath_entry_table_size;
+  _classpath_entry_table = mapinfo->_classpath_entry_table;
+  _classpath_entry_size = mapinfo->_classpath_entry_size;
 
   // The following fields are for sanity checks for whether this archive
   // will function correctly with this JVM and the bootclasspath it's
   // invoked with.
 
   // JVM version string ... changes on each build.
-  get_header_version(_header._jvm_ident);
+  get_header_version(_jvm_ident);
+}
+
+void FileMapInfo::allocate_classpath_entry_table() {
+  int bytes = 0;
+  int count = 0;
+  char* strptr = NULL;
+  char* strptr_max = NULL;
+  Thread* THREAD = Thread::current();
 
-  // Build checks on classpath and jar files
-  _header._num_jars = 0;
-  ClassPathEntry *cpe = ClassLoader::classpath_entry(0);
-  for ( ; cpe != NULL; cpe = cpe->next()) {
+  ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+  size_t entry_size = SharedClassUtil::shared_class_path_entry_size();
 
-    if (cpe->is_jar_file()) {
-      if (_header._num_jars >= JVM_SHARED_JARS_MAX) {
-        fail_stop("Too many jar files to share.", NULL);
-      }
+  for (int pass=0; pass<2; pass++) {
+    ClassPathEntry *cpe = ClassLoader::classpath_entry(0);
+
+    for (int cur_entry = 0 ; cpe != NULL; cpe = cpe->next(), cur_entry++) {
+      const char *name = cpe->name();
+      int name_bytes = (int)(strlen(name) + 1);
 
-      // Jar file - record timestamp and file size.
-      struct stat st;
-      const char *path = cpe->name();
-      if (os::stat(path, &st) != 0) {
-        // If we can't access a jar file in the boot path, then we can't
-        // make assumptions about where classes get loaded from.
-        fail_stop("Unable to open jar file %s.", path);
-      }
-      _header._jar[_header._num_jars]._timestamp = st.st_mtime;
-      _header._jar[_header._num_jars]._filesize = st.st_size;
-      _header._num_jars++;
-    } else {
+      if (pass == 0) {
+        count ++;
+        bytes += (int)entry_size;
+        bytes += name_bytes;
+        if (TraceClassPaths || (TraceClassLoading && Verbose)) {
+          tty->print_cr("[Add main shared path (%s) %s]", (cpe->is_jar_file() ? "jar" : "dir"), name);
+        }
+      } else {
+        SharedClassPathEntry* ent = shared_classpath(cur_entry);
+        if (cpe->is_jar_file()) {
+          struct stat st;
+          if (os::stat(name, &st) != 0) {
+            // The file/dir must exist, or it would not have been added
+            // into ClassLoader::classpath_entry().
+            //
+            // If we can't access a jar file in the boot path, then we can't
+            // make assumptions about where classes get loaded from.
+            FileMapInfo::fail_stop("Unable to open jar file %s.", name);
+          }
 
-      // If directories appear in boot classpath, they must be empty to
-      // avoid having to verify each individual class file.
-      const char* name = ((ClassPathDirEntry*)cpe)->name();
-      if (!os::dir_is_empty(name)) {
-        fail_stop("Boot classpath directory %s is not empty.", name);
+          EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
+          SharedClassUtil::update_shared_classpath(cpe, ent, st.st_mtime, st.st_size, THREAD);
+        } else {
+          ent->_filesize  = -1;
+          if (!os::dir_is_empty(name)) {
+            ClassLoader::exit_with_path_failure("Cannot have non-empty directory in archived classpaths", name);
+          }
+        }
+        ent->_name = strptr;
+        if (strptr + name_bytes <= strptr_max) {
+          strncpy(strptr, name, (size_t)name_bytes); // name_bytes includes trailing 0.
+          strptr += name_bytes;
+        } else {
+          assert(0, "miscalculated buffer size");
+        }
       }
     }
+
+    if (pass == 0) {
+      EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
+      Array<u8>* arr = MetadataFactory::new_array<u8>(loader_data, (bytes + 7)/8, THREAD);
+      strptr = (char*)(arr->data());
+      strptr_max = strptr + bytes;
+      SharedClassPathEntry* table = (SharedClassPathEntry*)strptr;
+      strptr += entry_size * count;
+
+      _classpath_entry_table_size = count;
+      _classpath_entry_table = table;
+      _classpath_entry_size = entry_size;
+    }
   }
 }
 
+bool FileMapInfo::validate_classpath_entry_table() {
+  _validating_classpath_entry_table = true;
+
+  int count = _header->_classpath_entry_table_size;
+
+  _classpath_entry_table = _header->_classpath_entry_table;
+  _classpath_entry_size = _header->_classpath_entry_size;
+
+  for (int i=0; i<count; i++) {
+    SharedClassPathEntry* ent = shared_classpath(i);
+    struct stat st;
+    const char* name = ent->_name;
+    bool ok = true;
+    if (TraceClassPaths || (TraceClassLoading && Verbose)) {
+      tty->print_cr("[Checking shared classpath entry: %s]", name);
+    }
+    if (os::stat(name, &st) != 0) {
+      fail_continue("Required classpath entry does not exist: %s", name);
+      ok = false;
+    } else if (ent->is_dir()) {
+      if (!os::dir_is_empty(name)) {
+        fail_continue("directory is not empty: %s", name);
+        ok = false;
+      }
+    } else {
+      if (ent->_timestamp != st.st_mtime ||
+          ent->_filesize != st.st_size) {
+        ok = false;
+        if (PrintSharedArchiveAndExit) {
+          fail_continue(ent->_timestamp != st.st_mtime ?
+                        "Timestamp mismatch" :
+                        "File size mismatch");
+        } else {
+          fail_continue("A jar file is not the one used while building"
+                        " the shared archive file: %s", name);
+        }
+      }
+    }
+    if (ok) {
+      if (TraceClassPaths || (TraceClassLoading && Verbose)) {
+        tty->print_cr("[ok]");
+      }
+    } else if (!PrintSharedArchiveAndExit) {
+      _validating_classpath_entry_table = false;
+      return false;
+    }
+  }
+
+  _classpath_entry_table_size = _header->_classpath_entry_table_size;
+  _validating_classpath_entry_table = false;
+  return true;
+}
+
 
 // Read the FileMapInfo information from the file.
 
 bool FileMapInfo::init_from_file(int fd) {
-
-  size_t n = read(fd, &_header, sizeof(struct FileMapHeader));
-  if (n != sizeof(struct FileMapHeader)) {
+  size_t sz = _header->data_size();
+  char* addr = _header->data();
+  size_t n = os::read(fd, addr, (unsigned int)sz);
+  if (n != sz) {
     fail_continue("Unable to read the file header.");
     return false;
   }
-  if (_header._version != current_version()) {
+  if (_header->_version != current_version()) {
     fail_continue("The shared archive file has the wrong version.");
     return false;
   }
+
+  size_t info_size = _header->_paths_misc_info_size;
+  _paths_misc_info = NEW_C_HEAP_ARRAY_RETURN_NULL(char, info_size, mtClass);
+  if (_paths_misc_info == NULL) {
+    fail_continue("Unable to read the file header.");
+    return false;
+  }
+  n = os::read(fd, _paths_misc_info, (unsigned int)info_size);
+  if (n != info_size) {
+    fail_continue("Unable to read the shared path info header.");
+    FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
+    _paths_misc_info = NULL;
+    return false;
+  }
+
   size_t len = lseek(fd, 0, SEEK_END);
   struct FileMapInfo::FileMapHeader::space_info* si =
-    &_header._space[MetaspaceShared::mc];
+    &_header->_space[MetaspaceShared::mc];
   if (si->_file_offset >= len || len - si->_file_offset < si->_used) {
     fail_continue("The shared archive file has been truncated.");
     return false;
   }
-  _file_offset = n;
+
+  _file_offset += (long)n;
   return true;
 }
 
@@ -239,7 +392,16 @@
 // Write the header to the file, seek to the next allocation boundary.
 
 void FileMapInfo::write_header() {
-  write_bytes_aligned(&_header, sizeof(FileMapHeader));
+  int info_size = ClassLoader::get_shared_paths_misc_info_size();
+
+  _header->_paths_misc_info_size = info_size;
+
+  align_file_position();
+  size_t sz = _header->data_size();
+  char* addr = _header->data();
+  write_bytes(addr, (int)sz); // skip the C++ vtable
+  write_bytes(ClassLoader::get_shared_paths_misc_info(), info_size);
+  align_file_position();
 }
 
 
@@ -249,7 +411,7 @@
   align_file_position();
   size_t used = space->used_bytes_slow(Metaspace::NonClassType);
   size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType);
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
   write_region(i, (char*)space->bottom(), used, capacity, read_only, false);
 }
 
@@ -259,7 +421,7 @@
 void FileMapInfo::write_region(int region, char* base, size_t size,
                                size_t capacity, bool read_only,
                                bool allow_exec) {
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[region];
+  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[region];
 
   if (_file_open) {
     guarantee(si->_file_offset == _file_offset, "file offset mismatch.");
@@ -343,7 +505,7 @@
 // JVM/TI RedefineClasses() support:
 // Remap the shared readonly space to shared readwrite, private.
 bool FileMapInfo::remap_shared_readonly_as_readwrite() {
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
+  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
   if (!si->_read_only) {
     // the space is already readwrite so we are done
     return true;
@@ -371,7 +533,7 @@
 
 // Map the whole region at once, assumed to be allocated contiguously.
 ReservedSpace FileMapInfo::reserve_shared_memory() {
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
+  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
   char* requested_addr = si->_base;
 
   size_t size = FileMapInfo::shared_spaces_size();
@@ -393,7 +555,7 @@
 static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode"};
 
 char* FileMapInfo::map_region(int i) {
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
   size_t used = si->_used;
   size_t alignment = os::vm_allocation_granularity();
   size_t size = align_size_up(used, alignment);
@@ -419,10 +581,10 @@
   if (!VerifySharedSpaces) {
     return true;
   }
-  const char* buf = _header._space[i]._base;
-  size_t sz = _header._space[i]._used;
+  const char* buf = _header->_space[i]._base;
+  size_t sz = _header->_space[i]._used;
   int crc = ClassLoader::crc32(0, buf, (jint)sz);
-  if (crc != _header._space[i]._crc) {
+  if (crc != _header->_space[i]._crc) {
     fail_continue("Checksum verification failed.");
     return false;
   }
@@ -432,7 +594,7 @@
 // Unmap a memory region in the address space.
 
 void FileMapInfo::unmap_region(int i) {
-  struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+  struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
   size_t used = si->_used;
   size_t size = align_size_up(used, os::vm_allocation_granularity());
   if (!os::unmap_memory(si->_base, size)) {
@@ -449,12 +611,21 @@
 
 
 FileMapInfo* FileMapInfo::_current_info = NULL;
-
+SharedClassPathEntry* FileMapInfo::_classpath_entry_table = NULL;
+int FileMapInfo::_classpath_entry_table_size = 0;
+size_t FileMapInfo::_classpath_entry_size = 0x1234baad;
+bool FileMapInfo::_validating_classpath_entry_table = false;
 
 // Open the shared archive file, read and validate the header
 // information (version, boot classpath, etc.).  If initialization
 // fails, shared spaces are disabled and the file is closed. [See
 // fail_continue.]
+//
+// Validation of the archive is done in two steps:
+//
+// [1] validate_header() - done here. This checks the header, including _paths_misc_info.
+// [2] validate_classpath_entry_table - this is done later, because the table is in the RW
+//     region of the archive, which is not mapped yet.
 bool FileMapInfo::initialize() {
   assert(UseSharedSpaces, "UseSharedSpaces expected.");
 
@@ -468,104 +639,84 @@
   }
 
   init_from_file(_fd);
-  if (!validate()) {
+  if (!validate_header()) {
     return false;
   }
 
-  SharedReadOnlySize =  _header._space[0]._capacity;
-  SharedReadWriteSize = _header._space[1]._capacity;
-  SharedMiscDataSize =  _header._space[2]._capacity;
-  SharedMiscCodeSize =  _header._space[3]._capacity;
+  SharedReadOnlySize =  _header->_space[0]._capacity;
+  SharedReadWriteSize = _header->_space[1]._capacity;
+  SharedMiscDataSize =  _header->_space[2]._capacity;
+  SharedMiscCodeSize =  _header->_space[3]._capacity;
   return true;
 }
 
-int FileMapInfo::compute_header_crc() {
-  char* header = (char*)&_header;
+int FileMapInfo::FileMapHeader::compute_crc() {
+  char* header = data();
   // start computing from the field after _crc
-  char* buf = (char*)&_header._crc + sizeof(int);
-  size_t sz = sizeof(FileMapInfo::FileMapHeader) - (buf - header);
+  char* buf = (char*)&_crc + sizeof(int);
+  size_t sz = data_size() - (buf - header);
   int crc = ClassLoader::crc32(0, buf, (jint)sz);
   return crc;
 }
 
-bool FileMapInfo::validate() {
-  if (VerifySharedSpaces && compute_header_crc() != _header._crc) {
+int FileMapInfo::compute_header_crc() {
+  return _header->compute_crc();
+}
+
+bool FileMapInfo::FileMapHeader::validate() {
+  if (_magic != (int)0xf00baba2) {
+    FileMapInfo::fail_continue("The shared archive file has a bad magic number.");
+    return false;
+  }
+  if (VerifySharedSpaces && compute_crc() != _crc) {
     fail_continue("Header checksum verification failed.");
     return false;
   }
-  if (_header._version != current_version()) {
-    fail_continue("The shared archive file is the wrong version.");
-    return false;
-  }
-  if (_header._magic != (int)0xf00baba2) {
-    fail_continue("The shared archive file has a bad magic number.");
+  if (_version != current_version()) {
+    FileMapInfo::fail_continue("The shared archive file is the wrong version.");
+
     return false;
   }
   char header_version[JVM_IDENT_MAX];
   get_header_version(header_version);
-  if (strncmp(_header._jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
-    fail_continue("The shared archive file was created by a different"
-                  " version or build of HotSpot.");
-    return false;
-  }
-  if (_header._obj_alignment != ObjectAlignmentInBytes) {
-    fail_continue("The shared archive file's ObjectAlignmentInBytes of %d"
-                  " does not equal the current ObjectAlignmentInBytes of %d.",
-                  _header._obj_alignment, ObjectAlignmentInBytes);
-    return false;
-  }
-
-  // Cannot verify interpreter yet, as it can only be created after the GC
-  // heap has been initialized.
-
-  if (_header._num_jars >= JVM_SHARED_JARS_MAX) {
-    fail_continue("Too many jar files to share.");
+  if (strncmp(_jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
+    if (TraceClassPaths) {
+      tty->print_cr("Expected: %s", header_version);
+      tty->print_cr("Actual:   %s", _jvm_ident);
+    }
+    FileMapInfo::fail_continue("The shared archive file was created by a different"
+                  " version or build of HotSpot");
     return false;
   }
-
-  // Build checks on classpath and jar files
-  int num_jars_now = 0;
-  ClassPathEntry *cpe = ClassLoader::classpath_entry(0);
-  for ( ; cpe != NULL; cpe = cpe->next()) {
-
-    if (cpe->is_jar_file()) {
-      if (num_jars_now < _header._num_jars) {
-
-        // Jar file - verify timestamp and file size.
-        struct stat st;
-        const char *path = cpe->name();
-        if (os::stat(path, &st) != 0) {
-          fail_continue("Unable to open jar file %s.", path);
-          return false;
-        }
-        if (_header._jar[num_jars_now]._timestamp != st.st_mtime ||
-            _header._jar[num_jars_now]._filesize != st.st_size) {
-          fail_continue("A jar file is not the one used while building"
-                        " the shared archive file.");
-          return false;
-        }
-      }
-      ++num_jars_now;
-    } else {
-
-      // If directories appear in boot classpath, they must be empty to
-      // avoid having to verify each individual class file.
-      const char* name = ((ClassPathDirEntry*)cpe)->name();
-      if (!os::dir_is_empty(name)) {
-        fail_continue("Boot classpath directory %s is not empty.", name);
-        return false;
-      }
-    }
-  }
-  if (num_jars_now < _header._num_jars) {
-    fail_continue("The number of jar files in the boot classpath is"
-                  " less than the number the shared archive was created with.");
+  if (_obj_alignment != ObjectAlignmentInBytes) {
+    FileMapInfo::fail_continue("The shared archive file's ObjectAlignmentInBytes of %d"
+                  " does not equal the current ObjectAlignmentInBytes of %d.",
+                  _obj_alignment, ObjectAlignmentInBytes);
     return false;
   }
 
   return true;
 }
 
+bool FileMapInfo::validate_header() {
+  bool status = _header->validate();
+
+  if (status) {
+    if (!ClassLoader::check_shared_paths_misc_info(_paths_misc_info, _header->_paths_misc_info_size)) {
+      if (!PrintSharedArchiveAndExit) {
+        fail_continue("shared class paths mismatch (hint: enable -XX:+TraceClassPaths to diagnose the failure)");
+        status = false;
+      }
+    }
+  }
+
+  if (_paths_misc_info != NULL) {
+    FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
+    _paths_misc_info = NULL;
+  }
+  return status;
+}
+
 // The following method is provided to see whether a given pointer
 // falls in the mapped shared space.
 // Param:
@@ -574,8 +725,8 @@
 // True if the p is within the mapped shared space, otherwise, false.
 bool FileMapInfo::is_in_shared_space(const void* p) {
   for (int i = 0; i < MetaspaceShared::n_regions; i++) {
-    if (p >= _header._space[i]._base &&
-        p < _header._space[i]._base + _header._space[i]._used) {
+    if (p >= _header->_space[i]._base &&
+        p < _header->_space[i]._base + _header->_space[i]._used) {
       return true;
     }
   }
@@ -586,7 +737,7 @@
 void FileMapInfo::print_shared_spaces() {
   gclog_or_tty->print_cr("Shared Spaces:");
   for (int i = 0; i < MetaspaceShared::n_regions; i++) {
-    struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+    struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
     gclog_or_tty->print("  %s " INTPTR_FORMAT "-" INTPTR_FORMAT,
                         shared_region_name[i],
                         si->_base, si->_base + si->_used);
@@ -599,9 +750,9 @@
   if (map_info) {
     map_info->fail_continue(msg);
     for (int i = 0; i < MetaspaceShared::n_regions; i++) {
-      if (map_info->_header._space[i]._base != NULL) {
+      if (map_info->_header->_space[i]._base != NULL) {
         map_info->unmap_region(i);
-        map_info->_header._space[i]._base = NULL;
+        map_info->_header->_space[i]._base = NULL;
       }
     }
   } else if (DumpSharedSpaces) {
--- a/src/share/vm/memory/filemap.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/filemap.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -37,30 +37,55 @@
 //  misc data (block offset table, string table, symbols, dictionary, etc.)
 //  tag(666)
 
-static const int JVM_SHARED_JARS_MAX = 128;
-static const int JVM_SPACENAME_MAX = 128;
 static const int JVM_IDENT_MAX = 256;
-static const int JVM_ARCH_MAX = 12;
-
 
 class Metaspace;
 
+class SharedClassPathEntry VALUE_OBJ_CLASS_SPEC {
+public:
+  const char *_name;
+  time_t _timestamp;          // jar timestamp,  0 if is directory
+  long   _filesize;           // jar file size, -1 if is directory
+  bool is_dir() {
+    return _filesize == -1;
+  }
+};
+
 class FileMapInfo : public CHeapObj<mtInternal> {
 private:
+  friend class ManifestStream;
   enum {
     _invalid_version = -1,
-    _current_version = 1
+    _current_version = 2
   };
 
   bool  _file_open;
   int   _fd;
   size_t  _file_offset;
 
+private:
+  static SharedClassPathEntry* _classpath_entry_table;
+  static int                   _classpath_entry_table_size;
+  static size_t                _classpath_entry_size;
+  static bool                  _validating_classpath_entry_table;
+
   // FileMapHeader describes the shared space data in the file to be
   // mapped.  This structure gets written to a file.  It is not a class, so
   // that the compilers don't add any compiler-private data to it.
 
-  struct FileMapHeader {
+public:
+  struct FileMapHeaderBase : public CHeapObj<mtClass> {
+    virtual bool validate() = 0;
+    virtual void populate(FileMapInfo* info, size_t alignment) = 0;
+  };
+  struct FileMapHeader : FileMapHeaderBase {
+    // Use data() and data_size() to memcopy to/from the FileMapHeader. We need to
+    // avoid read/writing the C++ vtable pointer.
+    static size_t data_size();
+    char* data() {
+      return ((char*)this) + sizeof(FileMapHeaderBase);
+    }
+
     int    _magic;                    // identify file type.
     int    _crc;                      // header crc checksum.
     int    _version;                  // (from enum, above.)
@@ -80,46 +105,67 @@
     // The following fields are all sanity checks for whether this archive
     // will function correctly with this JVM and the bootclasspath it's
     // invoked with.
-    char  _arch[JVM_ARCH_MAX];            // architecture
     char  _jvm_ident[JVM_IDENT_MAX];      // identifier for jvm
-    int   _num_jars;              // Number of jars in bootclasspath
 
-    // Per jar file data:  timestamp, size.
+    // The _paths_misc_info is a variable-size structure that records "miscellaneous"
+    // information during dumping. It is generated and validated by the
+    // SharedPathsMiscInfo class. See SharedPathsMiscInfo.hpp and sharedClassUtil.hpp for
+    // detailed description.
+    //
+    // The _paths_misc_info data is stored as a byte array in the archive file header,
+    // immediately after the _header field. This information is used only when
+    // checking the validity of the archive and is deallocated after the archive is loaded.
+    //
+    // Note that the _paths_misc_info does NOT include information for JAR files
+    // that existed during dump time. Their information is stored in _classpath_entry_table.
+    int _paths_misc_info_size;
 
-    struct {
-      time_t _timestamp;          // jar timestamp.
-      long   _filesize;           // jar file size.
-    } _jar[JVM_SHARED_JARS_MAX];
-  } _header;
+    // The following is a table of all the class path entries that were used
+    // during dumping. At run time, we require these files to exist and have the same
+    // size/modification time, or else the archive will refuse to load.
+    //
+    // All of these entries must be JAR files. The dumping process would fail if a non-empty
+    // directory was specified in the classpaths. If an empty directory was specified
+    // it is checked by the _paths_misc_info as described above.
+    //
+    // FIXME -- if JAR files in the tail of the list were specified but not used during dumping,
+    // they should be removed from this table, to save space and to avoid spurious
+    // loading failures during runtime.
+    int _classpath_entry_table_size;
+    size_t _classpath_entry_size;
+    SharedClassPathEntry* _classpath_entry_table;
+
+    virtual bool validate();
+    virtual void populate(FileMapInfo* info, size_t alignment);
+    int compute_crc();
+  };
+
+  FileMapHeader * _header;
+
   const char* _full_path;
+  char* _paths_misc_info;
 
   static FileMapInfo* _current_info;
 
   bool  init_from_file(int fd);
   void  align_file_position();
+  bool  validate_header_impl();
 
 public:
-  FileMapInfo() {
-    _file_offset = 0;
-    _file_open = false;
-    _header._version = _invalid_version;
-  }
+  FileMapInfo();
+  ~FileMapInfo();
 
   static int current_version()        { return _current_version; }
   int    compute_header_crc();
-  void   set_header_crc(int crc)      { _header._crc = crc; }
+  void   set_header_crc(int crc)      { _header->_crc = crc; }
   void   populate_header(size_t alignment);
-  bool   validate();
+  bool   validate_header();
   void   invalidate();
-  int    version()                    { return _header._version; }
-  size_t alignment()                  { return _header._alignment; }
-  size_t space_capacity(int i)        { return _header._space[i]._capacity; }
-  char*  region_base(int i)           { return _header._space[i]._base; }
-  struct FileMapHeader* header()      { return &_header; }
-
-  static void set_current_info(FileMapInfo* info) {
-    CDS_ONLY(_current_info = info;)
-  }
+  int    version()                    { return _header->_version; }
+  size_t alignment()                  { return _header->_alignment; }
+  size_t space_capacity(int i)        { return _header->_space[i]._capacity; }
+  char*  region_base(int i)           { return _header->_space[i]._base; }
+  struct FileMapHeader* header()      { return _header; }
 
   static FileMapInfo* current_info() {
     CDS_ONLY(return _current_info;)
@@ -151,7 +197,7 @@
 
   // Errors.
   static void fail_stop(const char *msg, ...);
-  void fail_continue(const char *msg, ...);
+  static void fail_continue(const char *msg, ...);
 
   // Return true if given address is in the mapped shared space.
   bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
@@ -165,6 +211,22 @@
 
   // Stop CDS sharing and unmap CDS regions.
   static void stop_sharing_and_unmap(const char* msg);
+
+  static void allocate_classpath_entry_table();
+  bool validate_classpath_entry_table();
+
+  static SharedClassPathEntry* shared_classpath(int index) {
+    char* p = (char*)_classpath_entry_table;
+    p += _classpath_entry_size * index;
+    return (SharedClassPathEntry*)p;
+  }
+  static const char* shared_classpath_name(int index) {
+    return shared_classpath(index)->_name;
+  }
+
+  static int get_number_of_share_classpaths() {
+    return _classpath_entry_table_size;
+  }
 };
 
 #endif // SHARE_VM_MEMORY_FILEMAP_HPP
--- a/src/share/vm/memory/freeList.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/freeList.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -34,7 +34,6 @@
 
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
-#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
 #endif // INCLUDE_ALL_GCS
 
 // Free list.  A FreeList is used to access a linked list of chunks
@@ -333,5 +332,4 @@
 template class FreeList<Metachunk>;
 #if INCLUDE_ALL_GCS
 template class FreeList<FreeChunk>;
-template class FreeList<G1CodeRootChunk>;
 #endif // INCLUDE_ALL_GCS
--- a/src/share/vm/memory/gcLocker.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/gcLocker.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,6 +26,7 @@
 #include "memory/gcLocker.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/sharedHeap.hpp"
+#include "runtime/thread.inline.hpp"
 
 volatile jint GC_locker::_jni_lock_count = 0;
 volatile bool GC_locker::_needs_gc       = false;
--- a/src/share/vm/memory/genCollectedHeap.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,6 +28,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/icBuffer.hpp"
 #include "gc_implementation/shared/collectorCounters.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
@@ -60,8 +61,8 @@
 GenCollectedHeap* GenCollectedHeap::_gch;
 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
 
-// The set of potentially parallel tasks in strong root scanning.
-enum GCH_process_strong_roots_tasks {
+// The set of potentially parallel tasks in root scanning.
+enum GCH_strong_roots_tasks {
   // We probably want to parallelize both of these internally, but for now...
   GCH_PS_younger_gens,
   // Leave this one last.
@@ -71,11 +72,11 @@
 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
   SharedHeap(policy),
   _gen_policy(policy),
-  _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
+  _gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
   _full_collections_completed(0)
 {
-  if (_gen_process_strong_tasks == NULL ||
-      !_gen_process_strong_tasks->valid()) {
+  if (_gen_process_roots_tasks == NULL ||
+      !_gen_process_roots_tasks->valid()) {
     vm_exit_during_initialization("Failed necessary allocation.");
   }
   assert(policy != NULL, "Sanity check");
@@ -385,7 +386,9 @@
     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
+    // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
+    // so we can assume here that the next GC id is what we want.
+    GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
 
     gc_prologue(complete);
     increment_total_collections(complete);
@@ -418,7 +421,9 @@
         }
         // Timer for individual generations. Last argument is false: no CR
         // FIXME: We should try to start the timing earlier to cover more of the GC pause
-        GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL);
+        // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
+        // so we can assume here that the next GC id is what we want.
+        GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek());
         TraceCollectorStats tcs(_gens[i]->counters());
         TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
 
@@ -585,33 +590,29 @@
 
 void GenCollectedHeap::set_par_threads(uint t) {
   SharedHeap::set_par_threads(t);
-  _gen_process_strong_tasks->set_n_threads(t);
+  _gen_process_roots_tasks->set_n_threads(t);
 }
 
 void GenCollectedHeap::
-gen_process_strong_roots(int level,
-                         bool younger_gens_as_roots,
-                         bool activate_scope,
-                         bool is_scavenging,
-                         SharedHeap::ScanningOption so,
-                         OopsInGenClosure* not_older_gens,
-                         bool do_code_roots,
-                         OopsInGenClosure* older_gens,
-                         KlassClosure* klass_closure) {
-  // General strong roots.
+gen_process_roots(int level,
+                  bool younger_gens_as_roots,
+                  bool activate_scope,
+                  SharedHeap::ScanningOption so,
+                  OopsInGenClosure* not_older_gens,
+                  OopsInGenClosure* weak_roots,
+                  OopsInGenClosure* older_gens,
+                  CLDClosure* cld_closure,
+                  CLDClosure* weak_cld_closure,
+                  CodeBlobClosure* code_closure) {
 
-  if (!do_code_roots) {
-    SharedHeap::process_strong_roots(activate_scope, is_scavenging, so,
-                                     not_older_gens, NULL, klass_closure);
-  } else {
-    bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
-    CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
-    SharedHeap::process_strong_roots(activate_scope, is_scavenging, so,
-                                     not_older_gens, &code_roots, klass_closure);
-  }
+  // General roots.
+  SharedHeap::process_roots(activate_scope, so,
+                            not_older_gens, weak_roots,
+                            cld_closure, weak_cld_closure,
+                            code_closure);
 
   if (younger_gens_as_roots) {
-    if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
+    if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
       for (int i = 0; i < level; i++) {
         not_older_gens->set_generation(_gens[i]);
         _gens[i]->oop_iterate(not_older_gens);
@@ -627,12 +628,42 @@
     older_gens->reset_generation();
   }
 
-  _gen_process_strong_tasks->all_tasks_completed();
+  _gen_process_roots_tasks->all_tasks_completed();
 }
 
-void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
-                                              CodeBlobClosure* code_roots) {
-  SharedHeap::process_weak_roots(root_closure, code_roots);
+void GenCollectedHeap::
+gen_process_roots(int level,
+                  bool younger_gens_as_roots,
+                  bool activate_scope,
+                  SharedHeap::ScanningOption so,
+                  bool only_strong_roots,
+                  OopsInGenClosure* not_older_gens,
+                  OopsInGenClosure* older_gens,
+                  CLDClosure* cld_closure) {
+
+  const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
+
+  bool is_moving_collection = false;
+  if (level == 0 || is_adjust_phase) {
+    // young collections are always moving
+    is_moving_collection = true;
+  }
+
+  MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
+  CodeBlobClosure* code_closure = &mark_code_closure;
+
+  gen_process_roots(level,
+                    younger_gens_as_roots,
+                    activate_scope, so,
+                    not_older_gens, only_strong_roots ? NULL : not_older_gens,
+                    older_gens,
+                    cld_closure, only_strong_roots ? NULL : cld_closure,
+                    code_closure);
+
+}
+
+void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
+  SharedHeap::process_weak_roots(root_closure);
   // "Local" "weak" refs
   for (int i = 0; i < _n_gens; i++) {
     _gens[i]->ref_processor()->weak_oops_do(root_closure);
@@ -673,10 +704,6 @@
   return _gens[0]->end_addr();
 }
 
-size_t GenCollectedHeap::unsafe_max_alloc() {
-  return _gens[0]->unsafe_max_alloc_nogc();
-}
-
 // public collection interfaces
 
 void GenCollectedHeap::collect(GCCause::Cause cause) {
@@ -687,15 +714,18 @@
 #else  // INCLUDE_ALL_GCS
     ShouldNotReachHere();
 #endif // INCLUDE_ALL_GCS
+  } else if (cause == GCCause::_wb_young_gc) {
+    // minor collection for WhiteBox API
+    collect(cause, 0);
   } else {
 #ifdef ASSERT
-    if (cause == GCCause::_scavenge_alot) {
-      // minor collection only
-      collect(cause, 0);
-    } else {
-      // Stop-the-world full collection
-      collect(cause, n_gens() - 1);
-    }
+  if (cause == GCCause::_scavenge_alot) {
+    // minor collection only
+    collect(cause, 0);
+  } else {
+    // Stop-the-world full collection
+    collect(cause, n_gens() - 1);
+  }
 #else
     // Stop-the-world full collection
     collect(cause, n_gens() - 1);
@@ -851,12 +881,6 @@
   }
 }
 
-void GenCollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
-  for (int i = 0; i < _n_gens; i++) {
-    _gens[i]->oop_iterate(mr, cl);
-  }
-}
-
 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
   for (int i = 0; i < _n_gens; i++) {
     _gens[i]->object_iterate(cl);
@@ -1074,7 +1098,7 @@
   guarantee(_n_gens = 2, "Wrong number of generations");
   Generation* old_gen = _gens[1];
   // Start by compacting into same gen.
-  CompactPoint cp(old_gen, NULL, NULL);
+  CompactPoint cp(old_gen);
   old_gen->prepare_for_compaction(&cp);
   Generation* young_gen = _gens[0];
   young_gen->prepare_for_compaction(&cp);
--- a/src/share/vm/memory/genCollectedHeap.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -78,9 +78,9 @@
   unsigned int _full_collections_completed;
 
   // Data structure for claiming the (potentially) parallel tasks in
-  // (gen-specific) strong roots processing.
-  SubTasksDone* _gen_process_strong_tasks;
-  SubTasksDone* gen_process_strong_tasks() { return _gen_process_strong_tasks; }
+  // (gen-specific) roots processing.
+  SubTasksDone* _gen_process_roots_tasks;
+  SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; }
 
   // In block contents verification, the number of header words to skip
   NOT_PRODUCT(static size_t _skip_header_HeapWords;)
@@ -166,14 +166,6 @@
   HeapWord** top_addr() const;
   HeapWord** end_addr() const;
 
-  // Return an estimate of the maximum allocation that could be performed
-  // without triggering any collection activity.  In a generational
-  // collector, for example, this is probably the largest allocation that
-  // could be supported in the youngest generation.  It is "unsafe" because
-  // no locks are taken; the result should be treated as an approximation,
-  // not a guarantee.
-  size_t unsafe_max_alloc();
-
   // Does this heap support heap inspection? (+PrintClassHistogram)
   virtual bool supports_heap_inspection() const { return true; }
 
@@ -220,7 +212,6 @@
 
   // Iteration functions.
   void oop_iterate(ExtendedOopClosure* cl);
-  void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
   void object_iterate(ObjectClosure* cl);
   void safe_object_iterate(ObjectClosure* cl);
   Space* space_containing(const void* addr) const;
@@ -412,26 +403,35 @@
   // The "so" argument determines which of the roots
   // the closure is applied to:
   // "SO_None" does none;
-  // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
-  // "SO_SystemClasses" to all the "system" classes and loaders;
-  // "SO_Strings" applies the closure to all entries in the StringTable.
-  void gen_process_strong_roots(int level,
-                                bool younger_gens_as_roots,
-                                // The remaining arguments are in an order
-                                // consistent with SharedHeap::process_strong_roots:
-                                bool activate_scope,
-                                bool is_scavenging,
-                                SharedHeap::ScanningOption so,
-                                OopsInGenClosure* not_older_gens,
-                                bool do_code_roots,
-                                OopsInGenClosure* older_gens,
-                                KlassClosure* klass_closure);
+ private:
+  void gen_process_roots(int level,
+                         bool younger_gens_as_roots,
+                         bool activate_scope,
+                         SharedHeap::ScanningOption so,
+                         OopsInGenClosure* not_older_gens,
+                         OopsInGenClosure* weak_roots,
+                         OopsInGenClosure* older_gens,
+                         CLDClosure* cld_closure,
+                         CLDClosure* weak_cld_closure,
+                         CodeBlobClosure* code_closure);
 
-  // Apply "blk" to all the weak roots of the system.  These include
-  // JNI weak roots, the code cache, system dictionary, symbol table,
-  // string table, and referents of reachable weak refs.
-  void gen_process_weak_roots(OopClosure* root_closure,
-                              CodeBlobClosure* code_roots);
+ public:
+  static const bool StrongAndWeakRoots = false;
+  static const bool StrongRootsOnly    = true;
+
+  void gen_process_roots(int level,
+                         bool younger_gens_as_roots,
+                         bool activate_scope,
+                         SharedHeap::ScanningOption so,
+                         bool only_strong_roots,
+                         OopsInGenClosure* not_older_gens,
+                         OopsInGenClosure* older_gens,
+                         CLDClosure* cld_closure);
+
+  // Apply "root_closure" to all the weak roots of the system.
+  // These include JNI weak roots, string table,
+  // and referents of reachable weak refs.
+  void gen_process_weak_roots(OopClosure* root_closure);
 
   // Set the saved marks of generations, if that makes sense.
   // In particular, if any generation might iterate over the oops
--- a/src/share/vm/memory/genMarkSweep.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/genMarkSweep.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -69,7 +69,7 @@
   _ref_processor = rp;
   rp->setup_policy(clear_all_softrefs);
 
-  GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
+  GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id());
 
   gch->trace_heap_before_gc(_gc_tracer);
 
@@ -193,7 +193,7 @@
 void GenMarkSweep::mark_sweep_phase1(int level,
                                   bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer);
+  GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
   trace(" 1");
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
@@ -207,22 +207,21 @@
   // Need new claim bits before marking starts.
   ClassLoaderDataGraph::clear_claimed_marks();
 
-  gch->gen_process_strong_roots(level,
-                                false, // Younger gens are not roots.
-                                true,  // activate StrongRootsScope
-                                false, // not scavenging
-                                SharedHeap::SO_SystemClasses,
-                                &follow_root_closure,
-                                true,   // walk code active on stacks
-                                &follow_root_closure,
-                                &follow_klass_closure);
+  gch->gen_process_roots(level,
+                         false, // Younger gens are not roots.
+                         true,  // activate StrongRootsScope
+                         SharedHeap::SO_None,
+                         GenCollectedHeap::StrongRootsOnly,
+                         &follow_root_closure,
+                         &follow_root_closure,
+                         &follow_cld_closure);
 
   // Process reference objects found during marking
   {
     ref_processor()->setup_policy(clear_all_softrefs);
     const ReferenceProcessorStats& stats =
       ref_processor()->process_discovered_references(
-        &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer);
+        &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer, _gc_tracer->gc_id());
     gc_tracer()->report_gc_reference_stats(stats);
   }
 
@@ -264,7 +263,7 @@
 
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
-  GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer);
+  GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
   trace("2");
 
   gch->prepare_for_compaction();
@@ -281,7 +280,7 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
   // Adjust the pointers to reflect the new locations
-  GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer);
+  GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
   trace("3");
 
   // Need new claim bits for the pointer adjustment tracing.
@@ -293,22 +292,16 @@
   // are run.
   adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
 
-  gch->gen_process_strong_roots(level,
-                                false, // Younger gens are not roots.
-                                true,  // activate StrongRootsScope
-                                false, // not scavenging
-                                SharedHeap::SO_AllClasses,
-                                &adjust_pointer_closure,
-                                false, // do not walk code
-                                &adjust_pointer_closure,
-                                &adjust_klass_closure);
+  gch->gen_process_roots(level,
+                         false, // Younger gens are not roots.
+                         true,  // activate StrongRootsScope
+                         SharedHeap::SO_AllCodeCache,
+                         GenCollectedHeap::StrongAndWeakRoots,
+                         &adjust_pointer_closure,
+                         &adjust_pointer_closure,
+                         &adjust_cld_closure);
 
-  // Now adjust pointers in remaining weak roots.  (All of which should
-  // have been cleared if they pointed to non-surviving objects.)
-  CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure,
-                                                   /*do_marking=*/ false);
-  gch->gen_process_weak_roots(&adjust_pointer_closure,
-                              &adjust_code_pointer_closure);
+  gch->gen_process_weak_roots(&adjust_pointer_closure);
 
   adjust_marks();
   GenAdjustPointersClosure blk;
@@ -336,7 +329,7 @@
   // to use a higher index (saved from phase2) when verifying perm_gen.
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
-  GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer);
+  GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
   trace("4");
 
   GenCompactClosure blk;
--- a/src/share/vm/memory/generation.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/generation.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -297,22 +297,16 @@
 
 class GenerationOopIterateClosure : public SpaceClosure {
  public:
-  ExtendedOopClosure* cl;
-  MemRegion mr;
+  ExtendedOopClosure* _cl;
   virtual void do_space(Space* s) {
-    s->oop_iterate(mr, cl);
+    s->oop_iterate(_cl);
   }
-  GenerationOopIterateClosure(ExtendedOopClosure* _cl, MemRegion _mr) :
-    cl(_cl), mr(_mr) {}
+  GenerationOopIterateClosure(ExtendedOopClosure* cl) :
+    _cl(cl) {}
 };
 
 void Generation::oop_iterate(ExtendedOopClosure* cl) {
-  GenerationOopIterateClosure blk(cl, _reserved);
-  space_iterate(&blk);
-}
-
-void Generation::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
-  GenerationOopIterateClosure blk(cl, mr);
+  GenerationOopIterateClosure blk(cl);
   space_iterate(&blk);
 }
 
--- a/src/share/vm/memory/generation.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/generation.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -543,10 +543,6 @@
   // generation, calling "cl.do_oop" on each.
   virtual void oop_iterate(ExtendedOopClosure* cl);
 
-  // Same as above, restricted to the intersection of a memory region and
-  // the generation.
-  virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
-
   // Iterate over all objects in the generation, calling "cl.do_object" on
   // each.
   virtual void object_iterate(ObjectClosure* cl);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/memory/guardedMemory.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/guardedMemory.hpp"
+#include "runtime/os.hpp"
+
+void* GuardedMemory::wrap_copy(const void* ptr, const size_t len, const void* tag) {
+  size_t total_sz = GuardedMemory::get_total_size(len);
+  void* outerp = os::malloc(total_sz, mtInternal);
+  if (outerp != NULL) {
+    GuardedMemory guarded(outerp, len, tag);
+    void* innerp = guarded.get_user_ptr();
+    memcpy(innerp, ptr, len);
+    return innerp;
+  }
+  return NULL; // OOM
+}
+
+bool GuardedMemory::free_copy(void* p) {
+  if (p == NULL) {
+    return true;
+  }
+  GuardedMemory guarded((u_char*)p);
+  bool verify_ok = guarded.verify_guards();
+
+  /* always attempt to free, pass problem on to any nested memchecker */
+  os::free(guarded.release_for_freeing());
+
+  return verify_ok;
+}
+
+void GuardedMemory::print_on(outputStream* st) const {
+  if (_base_addr == NULL) {
+    st->print_cr("GuardedMemory(" PTR_FORMAT ") not associated to any memory", p2i(this));
+    return;
+  }
+  st->print_cr("GuardedMemory(" PTR_FORMAT ") base_addr=" PTR_FORMAT
+      " tag=" PTR_FORMAT " user_size=" SIZE_FORMAT " user_data=" PTR_FORMAT,
+      p2i(this), p2i(_base_addr), p2i(get_tag()), get_user_size(), p2i(get_user_ptr()));
+
+  Guard* guard = get_head_guard();
+  st->print_cr("  Header guard @" PTR_FORMAT " is %s", p2i(guard), (guard->verify() ? "OK" : "BROKEN"));
+  guard = get_tail_guard();
+  st->print_cr("  Trailer guard @" PTR_FORMAT " is %s", p2i(guard), (guard->verify() ? "OK" : "BROKEN"));
+
+  u_char udata = *get_user_ptr();
+  switch (udata) {
+  case uninitBlockPad:
+    st->print_cr("  User data appears unused");
+    break;
+  case freeBlockPad:
+    st->print_cr("  User data appears to have been freed");
+    break;
+  default:
+    st->print_cr("  User data appears to be in use");
+    break;
+  }
+}
+
+// test code...
+
+#ifndef PRODUCT
+
+static void guarded_memory_test_check(void* p, size_t sz, void* tag) {
+  assert(p != NULL, "NULL pointer given to check");
+  u_char* c = (u_char*) p;
+  GuardedMemory guarded(c);
+  assert(guarded.get_tag() == tag, "Tag is not the same as supplied");
+  assert(guarded.get_user_ptr() == c, "User pointer is not the same as supplied");
+  assert(guarded.get_user_size() == sz, "User size is not the same as supplied");
+  assert(guarded.verify_guards(), "Guard broken");
+}
+
+void GuardedMemory::test_guarded_memory() {
+  // Test the basic characteristics...
+  size_t total_sz = GuardedMemory::get_total_size(1);
+  assert(total_sz > 1 && total_sz >= (sizeof(GuardHeader) + 1 + sizeof(Guard)), "Unexpected size");
+  u_char* basep = (u_char*) os::malloc(total_sz, mtInternal);
+
+  GuardedMemory guarded(basep, 1, (void*)0xf000f000);
+
+  assert(*basep == badResourceValue, "Expected guard in the form of badResourceValue");
+  u_char* userp = guarded.get_user_ptr();
+  assert(*userp == uninitBlockPad, "Expected uninitialized data in the form of uninitBlockPad");
+  guarded_memory_test_check(userp, 1, (void*)0xf000f000);
+
+  void* freep = guarded.release_for_freeing();
+  assert((u_char*)freep == basep, "Expected the same pointer guard was ");
+  assert(*userp == freeBlockPad, "Expected user data to be free block padded");
+  assert(!guarded.verify_guards(), "Expected failed");
+  os::free(freep);
+
+  // Test a number of odd sizes...
+  size_t sz = 0;
+  do {
+    void* p = os::malloc(GuardedMemory::get_total_size(sz), mtInternal);
+    void* up = guarded.wrap_with_guards(p, sz, (void*)1);
+    memset(up, 0, sz);
+    guarded_memory_test_check(up, sz, (void*)1);
+    os::free(guarded.release_for_freeing());
+    sz = (sz << 4) + 1;
+  } while (sz < (256 * 1024));
+
+  // Test buffer overrun into head...
+  basep = (u_char*) os::malloc(GuardedMemory::get_total_size(1), mtInternal);
+  guarded.wrap_with_guards(basep, 1);
+  *basep = 0;
+  assert(!guarded.verify_guards(), "Expected failure");
+  os::free(basep);
+
+  // Test buffer overrun into tail with a number of odd sizes...
+  sz = 1;
+  do {
+    void* p = os::malloc(GuardedMemory::get_total_size(sz), mtInternal);
+    void* up = guarded.wrap_with_guards(p, sz, (void*)1);
+    memset(up, 0, sz + 1); // Buffer-overwrite (within guard)
+    assert(!guarded.verify_guards(), "Guard was not broken as expected");
+    os::free(guarded.release_for_freeing());
+    sz = (sz << 4) + 1;
+  } while (sz < (256 * 1024));
+
+  // Test wrap_copy/wrap_free...
+  assert(GuardedMemory::free_copy(NULL), "Expected free NULL to be OK");
+
+  const char* str = "Check my bounds out";
+  size_t str_sz = strlen(str) + 1;
+  char* str_copy = (char*) GuardedMemory::wrap_copy(str, str_sz);
+  guarded_memory_test_check(str_copy, str_sz, NULL);
+  assert(strcmp(str, str_copy) == 0, "Not identical copy");
+  assert(GuardedMemory::free_copy(str_copy), "Free copy failed to verify");
+
+  void* no_data = NULL;
+  void* no_data_copy = GuardedMemory::wrap_copy(no_data, 0);
+  assert(GuardedMemory::free_copy(no_data_copy), "Expected valid guards even for no data copy");
+}
+
+#endif // !PRODUCT
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/memory/guardedMemory.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
+#define SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+/**
+ * Guarded memory for detecting buffer overrun.
+ *
+ * Allows allocations to be wrapped with padded bytes of a known byte pattern,
+ * that is a "guard". Guard patterns may be verified to detect buffer overruns.
+ *
+ * Primarily used by "debug malloc" and "checked JNI".
+ *
+ * Memory layout:
+ *
+ * |Offset             | Content              | Description    |
+ * |------------------------------------------------------------
+ * |base_addr          | 0xABABABABABABABAB   | Head guard     |
+ * |+16                | <size_t:user_size>   | User data size |
+ * |+sizeof(uintptr_t) | <tag>                | Tag word       |
+ * |+sizeof(void*)     | 0xF1 <user_data> (   | User data      |
+ * |+user_size         | 0xABABABABABABABAB   | Tail guard     |
+ * -------------------------------------------------------------
+ *
+ * Where:
+ *  - guard padding uses "badResourceValue" (0xAB)
+ *  - tag word is general purpose
+ *  - user data
+ *    -- initially padded with "uninitBlockPad" (0xF1),
+ *    -- to "freeBlockPad" (0xBA), when freed
+ *
+ * Usage:
+ *
+ * * Allocations: one may wrap allocations with guard memory:
+ * <code>
+ *   Thing* alloc_thing() {
+ *     void* mem = user_alloc_fn(GuardedMemory::get_total_size(sizeof(thing)));
+ *     GuardedMemory guarded(mem, sizeof(thing));
+ *     return (Thing*) guarded.get_user_ptr();
+ *   }
+ * </code>
+ * * Verify: memory guards are still in tact
+ * <code>
+ *   bool verify_thing(Thing* thing) {
+ *     GuardedMemory guarded((void*)thing);
+ *     return guarded.verify_guards();
+ *   }
+ * </code>
+ * * Free: one may mark bytes as freed (further debugging support)
+ * <code>
+ *   void free_thing(Thing* thing) {
+ *    GuardedMemory guarded((void*)thing);
+ *    assert(guarded.verify_guards(), "Corrupt thing");
+ *    user_free_fn(guards.release_for_freeing();
+ *   }
+ * </code>
+ */
+class GuardedMemory : StackObj { // Wrapper on stack
+
+  // Private inner classes for memory layout...
+
+protected:
+
+  /**
+   * Guard class for header and trailer known pattern to test for overwrites.
+   */
+  class Guard { // Class for raw memory (no vtbl allowed)
+    friend class GuardedMemory;
+   protected:
+    enum {
+      GUARD_SIZE = 16
+    };
+
+    u_char _guard[GUARD_SIZE];
+
+   public:
+
+    void build() {
+      u_char* c = _guard; // Possibly unaligned if tail guard
+      u_char* end = c + GUARD_SIZE;
+      while (c < end) {
+        *c = badResourceValue;
+        c++;
+      }
+    }
+
+    bool verify() const {
+      u_char* c = (u_char*) _guard;
+      u_char* end = c + GUARD_SIZE;
+      while (c < end) {
+        if (*c != badResourceValue) {
+          return false;
+        }
+        c++;
+      }
+      return true;
+    }
+
+  }; // GuardedMemory::Guard
+
+  /**
+   * Header guard and size
+   */
+  class GuardHeader : Guard {
+    friend class GuardedMemory;
+   protected:
+    // Take care in modifying fields here, will effect alignment
+    // e.g. x86 ABI 16 byte stack alignment
+    union {
+      uintptr_t __unused_full_word1;
+      size_t _user_size;
+    };
+    void* _tag;
+   public:
+    void set_user_size(const size_t usz) { _user_size = usz; }
+    size_t get_user_size() const { return _user_size; }
+
+    void set_tag(const void* tag) { _tag = (void*) tag; }
+    void* get_tag() const { return _tag; }
+
+  }; // GuardedMemory::GuardHeader
+
+  // Guarded Memory...
+
+ protected:
+  u_char* _base_addr;
+
+ public:
+
+  /**
+   * Create new guarded memory.
+   *
+   * Wraps, starting at the given "base_ptr" with guards. Use "get_user_ptr()"
+   * to return a pointer suitable for user data.
+   *
+   * @param base_ptr  allocation wishing to be wrapped, must be at least "GuardedMemory::get_total_size()" bytes.
+   * @param user_size the size of the user data to be wrapped.
+   * @param tag       optional general purpose tag.
+   */
+  GuardedMemory(void* base_ptr, const size_t user_size, const void* tag = NULL) {
+    wrap_with_guards(base_ptr, user_size, tag);
+  }
+
+  /**
+   * Wrap existing guarded memory.
+   *
+   * To use this constructor, one must have created guarded memory with
+   * "GuardedMemory(void*, size_t, void*)" (or indirectly via helper, e.g. "wrap_copy()").
+   *
+   * @param user_p  existing wrapped memory.
+   */
+  GuardedMemory(void* userp) {
+    u_char* user_ptr = (u_char*) userp;
+    assert((uintptr_t)user_ptr > (sizeof(GuardHeader) + 0x1000), "Invalid pointer");
+    _base_addr = (user_ptr - sizeof(GuardHeader));
+  }
+
+  /**
+   * Create new guarded memory.
+   *
+   * Wraps, starting at the given "base_ptr" with guards. Allows reuse of stack allocated helper.
+   *
+   * @param base_ptr  allocation wishing to be wrapped, must be at least "GuardedMemory::get_total_size()" bytes.
+   * @param user_size the size of the user data to be wrapped.
+   * @param tag       optional general purpose tag.
+   *
+   * @return user data pointer (inner pointer to supplied "base_ptr").
+   */
+  void* wrap_with_guards(void* base_ptr, size_t user_size, const void* tag = NULL) {
+    assert(base_ptr != NULL, "Attempt to wrap NULL with memory guard");
+    _base_addr = (u_char*)base_ptr;
+    get_head_guard()->build();
+    get_head_guard()->set_user_size(user_size);
+    get_tail_guard()->build();
+    set_tag(tag);
+    set_user_bytes(uninitBlockPad);
+    assert(verify_guards(), "Expected valid memory guards");
+    return get_user_ptr();
+  }
+
+  /**
+   * Verify head and tail guards.
+   *
+   * @return true if guards are intact, false would indicate a buffer overrun.
+   */
+  bool verify_guards() const {
+    if (_base_addr != NULL) {
+      return (get_head_guard()->verify() && get_tail_guard()->verify());
+    }
+    return false;
+  }
+
+  /**
+   * Set the general purpose tag.
+   *
+   * @param tag general purpose tag.
+   */
+  void set_tag(const void* tag) { get_head_guard()->set_tag(tag); }
+
+  /**
+   * Return the general purpose tag.
+   *
+   * @return the general purpose tag, defaults to NULL.
+   */
+  void* get_tag() const { return get_head_guard()->get_tag(); }
+
+  /**
+   * Return the size of the user data.
+   *
+   * @return the size of the user data.
+   */
+  size_t get_user_size() const {
+    assert(_base_addr, "Not wrapping any memory");
+    return get_head_guard()->get_user_size();
+  }
+
+  /**
+   * Return the user data pointer.
+   *
+   * @return the user data pointer.
+   */
+  u_char* get_user_ptr() const {
+    assert(_base_addr, "Not wrapping any memory");
+    return _base_addr + sizeof(GuardHeader);
+  }
+
+  /**
+   * Release the wrapped pointer for resource freeing.
+   *
+   * Pads the user data with "freeBlockPad", and dis-associates the helper.
+   *
+   * @return the original base pointer used to wrap the data.
+   */
+  void* release_for_freeing() {
+    set_user_bytes(freeBlockPad);
+    return release();
+  }
+
+  /**
+   * Dis-associate the help from the original base address.
+   *
+   * @return the original base pointer used to wrap the data.
+   */
+  void* release() {
+    void* p = (void*) _base_addr;
+    _base_addr = NULL;
+    return p;
+  }
+
+  virtual void print_on(outputStream* st) const;
+
+ protected:
+  GuardHeader*  get_head_guard() const { return (GuardHeader*) _base_addr; }
+  Guard*        get_tail_guard() const { return (Guard*) (get_user_ptr() + get_user_size()); };
+  void set_user_bytes(u_char ch) {
+    memset(get_user_ptr(), ch, get_user_size());
+  }
+
+public:
+  /**
+   * Return the total size required for wrapping the given user size.
+   *
+   * @return the total size required for wrapping the given user size.
+   */
+  static size_t get_total_size(size_t user_size) {
+    size_t total_size = sizeof(GuardHeader) + user_size + sizeof(Guard);
+    assert(total_size > user_size, "Unexpected wrap-around");
+    return total_size;
+  }
+
+  // Helper functions...
+
+  /**
+   * Wrap a copy of size "len" of "ptr".
+   *
+   * @param ptr the memory to be copied
+   * @param len the length of the copy
+   * @param tag optional general purpose tag (see GuardedMemory::get_tag())
+   *
+   * @return guarded wrapped memory pointer to the user area, or NULL if OOM.
+   */
+  static void* wrap_copy(const void* p, const size_t len, const void* tag = NULL);
+
+  /**
+   * Free wrapped copy.
+   *
+   * Frees memory copied with "wrap_copy()".
+   *
+   * @param p memory returned by "wrap_copy()".
+   *
+   * @return true if guards were verified as intact. false indicates a buffer overrun.
+   */
+  static bool free_copy(void* p);
+
+  // Testing...
+#ifndef PRODUCT
+  static void test_guarded_memory(void);
+#endif
+}; // GuardedMemory
+
+#endif // SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
--- a/src/share/vm/memory/heapInspection.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/heapInspection.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -135,7 +135,7 @@
   _ref = (HeapWord*) Universe::boolArrayKlassObj();
   _buckets =
     (KlassInfoBucket*)  AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
-                                            mtInternal, 0, AllocFailStrategy::RETURN_NULL);
+       mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
   if (_buckets != NULL) {
     _size = _num_buckets;
     for (int index = 0; index < _size; index++) {
--- a/src/share/vm/memory/iterator.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/iterator.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -27,6 +27,7 @@
 #include "oops/oop.inline.hpp"
 
 void KlassToOopClosure::do_klass(Klass* k) {
+  assert(_oop_closure != NULL, "Not initialized?");
   k->oops_do(_oop_closure);
 }
 
@@ -34,6 +35,10 @@
   cld->oops_do(_oop_closure, &_klass_closure, _must_claim_cld);
 }
 
+void CLDToKlassAndOopClosure::do_cld(ClassLoaderData* cld) {
+  cld->oops_do(_oop_closure, _klass_closure, _must_claim_cld);
+}
+
 void ObjectToOopClosure::do_object(oop obj) {
   obj->oop_iterate(_cl);
 }
@@ -42,6 +47,20 @@
   ShouldNotCallThis();
 }
 
+void CodeBlobToOopClosure::do_nmethod(nmethod* nm) {
+  nm->oops_do(_cl);
+  if (_fix_relocations) {
+    nm->fix_oop_relocations();
+  }
+}
+
+void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
+  nmethod* nm = cb->as_nmethod_or_null();
+  if (nm != NULL) {
+    do_nmethod(nm);
+  }
+}
+
 MarkingCodeBlobClosure::MarkScope::MarkScope(bool activate)
   : _active(activate)
 {
@@ -54,32 +73,7 @@
 
 void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
   nmethod* nm = cb->as_nmethod_or_null();
-  if (nm == NULL)  return;
-  if (!nm->test_set_oops_do_mark()) {
-    NOT_PRODUCT(if (TraceScavenge)  nm->print_on(tty, "oops_do, 1st visit\n"));
-    do_newly_marked_nmethod(nm);
-  } else {
-    NOT_PRODUCT(if (TraceScavenge)  nm->print_on(tty, "oops_do, skipped on 2nd visit\n"));
+  if (nm != NULL && !nm->test_set_oops_do_mark()) {
+    do_nmethod(nm);
   }
 }
-
-void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) {
-  nm->oops_do(_cl, /*allow_zombie=*/ false);
-}
-
-void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
-  if (!_do_marking) {
-    nmethod* nm = cb->as_nmethod_or_null();
-    NOT_PRODUCT(if (TraceScavenge && Verbose && nm != NULL)  nm->print_on(tty, "oops_do, unmarked visit\n"));
-    // This assert won't work, since there are lots of mini-passes
-    // (mostly in debug mode) that co-exist with marking phases.
-    //assert(!(cb->is_nmethod() && ((nmethod*)cb)->test_oops_do_mark()), "found marked nmethod during mark-free phase");
-    if (nm != NULL) {
-      nm->oops_do(_cl);
-    }
-  } else {
-    MarkingCodeBlobClosure::do_code_blob(cb);
-  }
-}
-
-
--- a/src/share/vm/memory/iterator.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/iterator.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -84,8 +84,8 @@
   //
   // Providing default implementations of the _nv functions unfortunately
   // removes the compile-time safeness, but reduces the clutter for the
-  // ExtendedOopClosures that don't need to walk the metadata. Currently,
-  // only CMS needs these.
+  // ExtendedOopClosures that don't need to walk the metadata.
+  // Currently, only CMS and G1 need these.
 
   virtual bool do_metadata() { return do_metadata_nv(); }
   bool do_metadata_v()       { return do_metadata(); }
@@ -128,17 +128,33 @@
   virtual void do_klass(Klass* k) = 0;
 };
 
+class CLDClosure : public Closure {
+ public:
+  virtual void do_cld(ClassLoaderData* cld) = 0;
+};
+
 class KlassToOopClosure : public KlassClosure {
+  friend class MetadataAwareOopClosure;
+  friend class MetadataAwareOopsInGenClosure;
+
   OopClosure* _oop_closure;
+
+  // Used when _oop_closure couldn't be set in an initialization list.
+  void initialize(OopClosure* oop_closure) {
+    assert(_oop_closure == NULL, "Should only be called once");
+    _oop_closure = oop_closure;
+  }
+
  public:
-  KlassToOopClosure(OopClosure* oop_closure) : _oop_closure(oop_closure) {}
+  KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
+
   virtual void do_klass(Klass* k);
 };
 
-class CLDToOopClosure {
-  OopClosure* _oop_closure;
+class CLDToOopClosure : public CLDClosure {
+  OopClosure*       _oop_closure;
   KlassToOopClosure _klass_closure;
-  bool _must_claim_cld;
+  bool              _must_claim_cld;
 
  public:
   CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
@@ -149,6 +165,46 @@
   void do_cld(ClassLoaderData* cld);
 };
 
+class CLDToKlassAndOopClosure : public CLDClosure {
+  friend class SharedHeap;
+  friend class G1CollectedHeap;
+ protected:
+  OopClosure*   _oop_closure;
+  KlassClosure* _klass_closure;
+  bool          _must_claim_cld;
+ public:
+  CLDToKlassAndOopClosure(KlassClosure* klass_closure,
+                          OopClosure* oop_closure,
+                          bool must_claim_cld) :
+                              _oop_closure(oop_closure),
+                              _klass_closure(klass_closure),
+                              _must_claim_cld(must_claim_cld) {}
+  void do_cld(ClassLoaderData* cld);
+};
+
+// The base class for all concurrent marking closures,
+// that participates in class unloading.
+// It's used to proxy through the metadata to the oops defined in them.
+class MetadataAwareOopClosure: public ExtendedOopClosure {
+  KlassToOopClosure _klass_closure;
+
+ public:
+  MetadataAwareOopClosure() : ExtendedOopClosure() {
+    _klass_closure.initialize(this);
+  }
+  MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
+    _klass_closure.initialize(this);
+  }
+
+  virtual bool do_metadata()    { return do_metadata_nv(); }
+  inline  bool do_metadata_nv() { return true; }
+
+  virtual void do_klass(Klass* k);
+  void do_klass_nv(Klass* k);
+
+  virtual void do_class_loader_data(ClassLoaderData* cld);
+};
+
 // ObjectClosure is used for iterating through an object space
 
 class ObjectClosure : public Closure {
@@ -172,19 +228,6 @@
   ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
 };
 
-// A version of ObjectClosure with "memory" (see _previous_address below)
-class UpwardsObjectClosure: public BoolObjectClosure {
-  HeapWord* _previous_address;
- public:
-  UpwardsObjectClosure() : _previous_address(NULL) { }
-  void set_previous(HeapWord* addr) { _previous_address = addr; }
-  HeapWord* previous()              { return _previous_address; }
-  // A return value of "true" can be used by the caller to decide
-  // if this object's end should *NOT* be recorded in
-  // _previous_address above.
-  virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
-};
-
 // A version of ObjectClosure that is expected to be robust
 // in the face of possibly uninitialized objects.
 class ObjectClosureCareful : public ObjectClosure {
@@ -240,14 +283,26 @@
   virtual void do_code_blob(CodeBlob* cb) = 0;
 };
 
-
-class MarkingCodeBlobClosure : public CodeBlobClosure {
+// Applies an oop closure to all ref fields in code blobs
+// iterated over in an object iteration.
+class CodeBlobToOopClosure : public CodeBlobClosure {
+  OopClosure* _cl;
+  bool _fix_relocations;
+ protected:
+  void do_nmethod(nmethod* nm);
  public:
+  CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
+  virtual void do_code_blob(CodeBlob* cb);
+
+  const static bool FixRelocations = true;
+};
+
+class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
+ public:
+  MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {}
   // Called for each code blob, but at most once per unique blob.
-  virtual void do_newly_marked_nmethod(nmethod* nm) = 0;
 
   virtual void do_code_blob(CodeBlob* cb);
-    // = { if (!nmethod(cb)->test_set_oops_do_mark())  do_newly_marked_nmethod(cb); }
 
   class MarkScope : public StackObj {
   protected:
@@ -260,23 +315,6 @@
   };
 };
 
-
-// Applies an oop closure to all ref fields in code blobs
-// iterated over in an object iteration.
-class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
-  OopClosure* _cl;
-  bool _do_marking;
-public:
-  virtual void do_newly_marked_nmethod(nmethod* cb);
-    // = { cb->oops_do(_cl); }
-  virtual void do_code_blob(CodeBlob* cb);
-    // = { if (_do_marking)  super::do_code_blob(cb); else cb->oops_do(_cl); }
-  CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
-    : _cl(cl), _do_marking(do_marking) {}
-};
-
-
-
 // MonitorClosure is used for iterating over monitors in the monitors cache
 
 class ObjectMonitor;
@@ -345,4 +383,16 @@
   }
 };
 
+
+// Helper defines for ExtendOopClosure
+
+#define if_do_metadata_checked(closure, nv_suffix)       \
+  /* Make sure the non-virtual and the virtual versions match. */     \
+  assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
+      "Inconsistency in do_metadata");                                \
+  if (closure->do_metadata##nv_suffix())
+
+#define assert_should_ignore_metadata(closure, nv_suffix)                                  \
+  assert(!closure->do_metadata##nv_suffix(), "Code to handle metadata is not implemented")
+
 #endif // SHARE_VM_MEMORY_ITERATOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/memory/iterator.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_ITERATOR_INLINE_HPP
+#define SHARE_VM_MEMORY_ITERATOR_INLINE_HPP
+
+#include "classfile/classLoaderData.hpp"
+#include "memory/iterator.hpp"
+#include "oops/klass.hpp"
+#include "utilities/debug.hpp"
+
+inline void MetadataAwareOopClosure::do_class_loader_data(ClassLoaderData* cld) {
+  assert(_klass_closure._oop_closure == this, "Must be");
+
+  bool claim = true;  // Must claim the class loader data before processing.
+  cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim);
+}
+
+inline void MetadataAwareOopClosure::do_klass_nv(Klass* k) {
+  ClassLoaderData* cld = k->class_loader_data();
+  do_class_loader_data(cld);
+}
+
+inline void MetadataAwareOopClosure::do_klass(Klass* k)       { do_klass_nv(k); }
+
+#endif // SHARE_VM_MEMORY_ITERATOR_INLINE_HPP
--- a/src/share/vm/memory/memRegion.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/memRegion.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -103,11 +103,13 @@
 }
 
 void* MemRegion::operator new(size_t size) throw() {
-  return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
+  return (address)AllocateHeap(size, mtGC, CURRENT_PC,
+    AllocFailStrategy::RETURN_NULL);
 }
 
 void* MemRegion::operator new [](size_t size) throw() {
-  return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
+  return (address)AllocateHeap(size, mtGC, CURRENT_PC,
+    AllocFailStrategy::RETURN_NULL);
 }
 void  MemRegion::operator delete(void* p) {
   FreeHeap(p, mtGC);
--- a/src/share/vm/memory/metadataFactory.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/metadataFactory.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_MEMORY_METADATAFACTORY_HPP
 #define SHARE_VM_MEMORY_METADATAFACTORY_HPP
 
+#include "classfile/classLoaderData.hpp"
 #include "utilities/array.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -63,6 +64,12 @@
 
   template <typename T>
   static void free_array(ClassLoaderData* loader_data, Array<T>* data) {
+    if (DumpSharedSpaces) {
+      // FIXME: the freeing code is buggy, especially when PrintSharedSpaces is enabled.
+      // Disable for now -- this means if you specify bad classes in your classlist you
+      // may have wasted space inside the archive.
+      return;
+    }
     if (data != NULL) {
       assert(loader_data != NULL, "shouldn't pass null");
       assert(!data->is_shared(), "cannot deallocate array in shared spaces");
@@ -78,6 +85,12 @@
   // Deallocation method for metadata
   template <class T>
   static void free_metadata(ClassLoaderData* loader_data, T md) {
+    if (DumpSharedSpaces) {
+      // FIXME: the freeing code is buggy, especially when PrintSharedSpaces is enabled.
+      // Disable for now -- this means if you specify bad classes in your classlist you
+      // may have wasted space inside the archive.
+      return;
+    }
     if (md != NULL) {
       assert(loader_data != NULL, "shouldn't pass null");
       int size = md->size();
--- a/src/share/vm/memory/metaspace.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/metaspace.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -42,7 +42,7 @@
 #include "runtime/init.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutex.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "services/memTracker.hpp"
 #include "services/memoryService.hpp"
 #include "utilities/copy.hpp"
@@ -413,6 +413,7 @@
 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 
+#if INCLUDE_CDS
   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
   // configurable address, generally at the top of the Java heap so other
   // memory addresses don't conflict.
@@ -428,7 +429,9 @@
       _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
     }
     MetaspaceShared::set_shared_rs(&_rs);
-  } else {
+  } else
+#endif
+  {
     bool large_pages = should_commit_large_pages_when_reserving(bytes);
 
     _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
@@ -1411,10 +1414,31 @@
   return value;
 }
 
-size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
+bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
   assert_is_size_aligned(v, Metaspace::commit_alignment());
 
-  return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
+  size_t capacity_until_GC = (size_t) _capacity_until_GC;
+  size_t new_value = capacity_until_GC + v;
+
+  if (new_value < capacity_until_GC) {
+    // The addition wrapped around, set new_value to aligned max value.
+    new_value = align_size_down(max_uintx, Metaspace::commit_alignment());
+  }
+
+  intptr_t expected = (intptr_t) capacity_until_GC;
+  intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
+
+  if (expected != actual) {
+    return false;
+  }
+
+  if (new_cap_until_GC != NULL) {
+    *new_cap_until_GC = new_value;
+  }
+  if (old_cap_until_GC != NULL) {
+    *old_cap_until_GC = capacity_until_GC;
+  }
+  return true;
 }
 
 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
@@ -1514,7 +1538,10 @@
     expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
     // Don't expand unless it's significant
     if (expand_bytes >= MinMetaspaceExpansion) {
-      size_t new_capacity_until_GC = MetaspaceGC::inc_capacity_until_GC(expand_bytes);
+      size_t new_capacity_until_GC = 0;
+      bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
+      assert(succeeded, "Should always succesfully increment HWM when at safepoint");
+
       Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
                                                new_capacity_until_GC,
                                                MetaspaceGCThresholdUpdater::ComputeNewSize);
@@ -2937,11 +2964,14 @@
   // between the lower base and higher address.
   address lower_base;
   address higher_address;
+#if INCLUDE_CDS
   if (UseSharedSpaces) {
     higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
                           (address)(metaspace_base + compressed_class_space_size()));
     lower_base = MIN2(metaspace_base, cds_base);
-  } else {
+  } else
+#endif
+  {
     higher_address = metaspace_base + compressed_class_space_size();
     lower_base = metaspace_base;
 
@@ -2962,6 +2992,7 @@
   }
 }
 
+#if INCLUDE_CDS
 // Return TRUE if the specified metaspace_base and cds_base are close enough
 // to work with compressed klass pointers.
 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
@@ -2972,6 +3003,7 @@
                                 (address)(metaspace_base + compressed_class_space_size()));
   return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
 }
+#endif
 
 // Try to allocate the metaspace at the requested addr.
 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
@@ -2991,6 +3023,7 @@
                                              large_pages,
                                              requested_addr, 0);
   if (!metaspace_rs.is_reserved()) {
+#if INCLUDE_CDS
     if (UseSharedSpaces) {
       size_t increment = align_size_up(1*G, _reserve_alignment);
 
@@ -3005,7 +3038,7 @@
                                      _reserve_alignment, large_pages, addr, 0);
       }
     }
-
+#endif
     // If no successful allocation then try to allocate the space anywhere.  If
     // that fails then OOM doom.  At this point we cannot try allocating the
     // metaspace as if UseCompressedClassPointers is off because too much
@@ -3024,12 +3057,13 @@
   // If we got here then the metaspace got allocated.
   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
 
+#if INCLUDE_CDS
   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
     FileMapInfo::stop_sharing_and_unmap(
         "Could not allocate metaspace at a compatible address");
   }
-
+#endif
   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
                                   UseSharedSpaces ? (address)cds_base : 0);
 
@@ -3107,17 +3141,30 @@
   MetaspaceGC::initialize();
 
   // Initialize the alignment for shared spaces.
-  int max_alignment = os::vm_page_size();
+  int max_alignment = os::vm_allocation_granularity();
   size_t cds_total = 0;
 
   MetaspaceShared::set_max_alignment(max_alignment);
 
   if (DumpSharedSpaces) {
+#if INCLUDE_CDS
+    MetaspaceShared::estimate_regions_size();
+
     SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
 
+    // the min_misc_code_size estimate is based on MetaspaceShared::generate_vtable_methods()
+    uintx min_misc_code_size = align_size_up(
+      (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) *
+        (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size,
+          max_alignment);
+
+    if (SharedMiscCodeSize < min_misc_code_size) {
+      report_out_of_shared_space(SharedMiscCode);
+    }
+
     // Initialize with the sum of the shared space sizes.  The read-only
     // and read write metaspace chunks will be allocated out of this and the
     // remainder is the misc code and data chunks.
@@ -3150,23 +3197,22 @@
     }
 
     Universe::set_narrow_klass_shift(0);
-#endif
-
+#endif // _LP64
+#endif // INCLUDE_CDS
   } else {
+#if INCLUDE_CDS
     // If using shared space, open the file that contains the shared space
     // and map in the memory before initializing the rest of metaspace (so
     // the addresses don't conflict)
     address cds_address = NULL;
     if (UseSharedSpaces) {
       FileMapInfo* mapinfo = new FileMapInfo();
-      memset(mapinfo, 0, sizeof(FileMapInfo));
 
       // Open the shared archive file, read and validate the header. If
       // initialization fails, shared spaces [UseSharedSpaces] are
       // disabled and the file is closed.
       // Map in spaces now also
       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
-        FileMapInfo::set_current_info(mapinfo);
         cds_total = FileMapInfo::shared_spaces_size();
         cds_address = (address)mapinfo->region_base(0);
       } else {
@@ -3174,21 +3220,23 @@
                "archive file not closed or shared spaces not disabled.");
       }
     }
-
+#endif // INCLUDE_CDS
 #ifdef _LP64
     // If UseCompressedClassPointers is set then allocate the metaspace area
     // above the heap and above the CDS area (if it exists).
     if (using_class_space()) {
       if (UseSharedSpaces) {
+#if INCLUDE_CDS
         char* cds_end = (char*)(cds_address + cds_total);
         cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
         allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
+#endif
       } else {
         char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
         allocate_metaspace_compressed_klass_ptrs(base, 0);
       }
     }
-#endif
+#endif // _LP64
 
     // Initialize these before initializing the VirtualSpaceList
     _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
@@ -3305,19 +3353,29 @@
   size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
   assert(delta_bytes > 0, "Must be");
 
-  size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
-
-  // capacity_until_GC might be updated concurrently, must calculate previous value.
-  size_t before_inc = after_inc - delta_bytes;
-
-  tracer()->report_gc_threshold(before_inc, after_inc,
-                                MetaspaceGCThresholdUpdater::ExpandAndAllocate);
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
-        " to " SIZE_FORMAT, before_inc, after_inc);
+  size_t before = 0;
+  size_t after = 0;
+  MetaWord* res;
+  bool incremented;
+
+  // Each thread increments the HWM at most once. Even if the thread fails to increment
+  // the HWM, an allocation is still attempted. This is because another thread must then
+  // have incremented the HWM and therefore the allocation might still succeed.
+  do {
+    incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
+    res = allocate(word_size, mdtype);
+  } while (!incremented && res == NULL);
+
+  if (incremented) {
+    tracer()->report_gc_threshold(before, after,
+                                  MetaspaceGCThresholdUpdater::ExpandAndAllocate);
+    if (PrintGCDetails && Verbose) {
+      gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
+          " to " SIZE_FORMAT, before, after);
+    }
   }
 
-  return allocate(word_size, mdtype);
+  return res;
 }
 
 // Space allocated in the Metaspace.  This may
@@ -3366,6 +3424,10 @@
 
 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
   if (SafepointSynchronize::is_at_safepoint()) {
+    if (DumpSharedSpaces && PrintSharedSpaces) {
+      record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
+    }
+
     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
     // Don't take Heap_lock
     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
@@ -3420,8 +3482,9 @@
     if (result == NULL) {
       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
     }
-
-    space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
+    if (PrintSharedSpaces) {
+      space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
+    }
 
     // Zero initialize.
     Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
@@ -3520,15 +3583,55 @@
 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
   assert(DumpSharedSpaces, "sanity");
 
-  AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
+  int byte_size = (int)word_size * HeapWordSize;
+  AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
+
   if (_alloc_record_head == NULL) {
     _alloc_record_head = _alloc_record_tail = rec;
-  } else {
+  } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
     _alloc_record_tail->_next = rec;
     _alloc_record_tail = rec;
+  } else {
+    // slow linear search, but this doesn't happen that often, and only when dumping
+    for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
+      if (old->_ptr == ptr) {
+        assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
+        int remain_bytes = old->_byte_size - byte_size;
+        assert(remain_bytes >= 0, "sanity");
+        old->_type = type;
+
+        if (remain_bytes == 0) {
+          delete(rec);
+        } else {
+          address remain_ptr = address(ptr) + byte_size;
+          rec->_ptr = remain_ptr;
+          rec->_byte_size = remain_bytes;
+          rec->_type = MetaspaceObj::DeallocatedType;
+          rec->_next = old->_next;
+          old->_byte_size = byte_size;
+          old->_next = rec;
+        }
+        return;
+      }
+    }
+    assert(0, "reallocating a freed pointer that was not recorded");
   }
 }
 
+void Metaspace::record_deallocation(void* ptr, size_t word_size) {
+  assert(DumpSharedSpaces, "sanity");
+
+  for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
+    if (rec->_ptr == ptr) {
+      assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
+      rec->_type = MetaspaceObj::DeallocatedType;
+      return;
+    }
+  }
+
+  assert(0, "deallocating a pointer that was not recorded");
+}
+
 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
   assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
 
--- a/src/share/vm/memory/metaspace.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/metaspace.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -171,9 +171,10 @@
   static const MetaspaceTracer* tracer() { return _tracer; }
 
  private:
-  // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
+  // These 2 methods are used by DumpSharedSpaces only, where only _vsm is used. So we will
   // maintain a single list for now.
   void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
+  void record_deallocation(void* ptr, size_t word_size);
 
 #ifdef _LP64
   static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);
@@ -403,7 +404,9 @@
   static void post_initialize();
 
   static size_t capacity_until_GC();
-  static size_t inc_capacity_until_GC(size_t v);
+  static bool inc_capacity_until_GC(size_t v,
+                                    size_t* new_cap_until_GC = NULL,
+                                    size_t* old_cap_until_GC = NULL);
   static size_t dec_capacity_until_GC(size_t v);
 
   static bool should_concurrent_collect() { return _should_concurrent_collect; }
--- a/src/share/vm/memory/metaspaceShared.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/metaspaceShared.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -24,8 +24,10 @@
 
 #include "precompiled.hpp"
 #include "classfile/dictionary.hpp"
+#include "classfile/classLoaderExt.hpp"
 #include "classfile/loaderConstraints.hpp"
 #include "classfile/placeholders.hpp"
+#include "classfile/sharedClassUtil.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
@@ -38,6 +40,7 @@
 #include "runtime/signature.hpp"
 #include "runtime/vm_operations.hpp"
 #include "runtime/vmThread.hpp"
+#include "utilities/hashtable.hpp"
 #include "utilities/hashtable.inline.hpp"
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
@@ -46,6 +49,10 @@
 
 ReservedSpace* MetaspaceShared::_shared_rs = NULL;
 
+bool MetaspaceShared::_link_classes_made_progress;
+bool MetaspaceShared::_check_classes_made_progress;
+bool MetaspaceShared::_has_error_classes;
+bool MetaspaceShared::_archive_loading_failed = false;
 // Read/write a data stream for restoring/preserving metadata pointers and
 // miscellaneous data from/to the shared archive file.
 
@@ -445,6 +452,23 @@
   SystemDictionary::classes_do(collect_classes);
 
   tty->print_cr("Number of classes %d", _global_klass_objects->length());
+  {
+    int num_type_array = 0, num_obj_array = 0, num_inst = 0;
+    for (int i = 0; i < _global_klass_objects->length(); i++) {
+      Klass* k = _global_klass_objects->at(i);
+      if (k->oop_is_instance()) {
+        num_inst ++;
+      } else if (k->oop_is_objArray()) {
+        num_obj_array ++;
+      } else {
+        assert(k->oop_is_typeArray(), "sanity");
+        num_type_array ++;
+      }
+    }
+    tty->print_cr("    instance classes   = %5d", num_inst);
+    tty->print_cr("    obj array classes  = %5d", num_obj_array);
+    tty->print_cr("    type array classes = %5d", num_type_array);
+  }
 
   // Update all the fingerprints in the shared methods.
   tty->print("Calculating fingerprints ... ");
@@ -511,6 +535,8 @@
   ClassLoader::copy_package_info_table(&md_top, md_end);
   ClassLoader::verify();
 
+  ClassLoaderExt::copy_lookup_cache_to_archive(&md_top, md_end);
+
   // Write the other data to the output array.
   WriteClosure wc(md_top, md_end);
   MetaspaceShared::serialize(&wc);
@@ -611,38 +637,58 @@
 #undef fmt_space
 }
 
-static void link_shared_classes(Klass* obj, TRAPS) {
+
+void MetaspaceShared::link_one_shared_class(Klass* obj, TRAPS) {
   Klass* k = obj;
   if (k->oop_is_instance()) {
     InstanceKlass* ik = (InstanceKlass*) k;
     // Link the class to cause the bytecodes to be rewritten and the
-    // cpcache to be created.
-    if (ik->init_state() < InstanceKlass::linked) {
-      ik->link_class(THREAD);
-      guarantee(!HAS_PENDING_EXCEPTION, "exception in class rewriting");
+    // cpcache to be created. Class verification is done according
+    // to -Xverify setting.
+    _link_classes_made_progress |= try_link_class(ik, THREAD);
+    guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
+  }
+}
+
+void MetaspaceShared::check_one_shared_class(Klass* k) {
+  if (k->oop_is_instance() && InstanceKlass::cast(k)->check_sharing_error_state()) {
+    _check_classes_made_progress = true;
+  }
+}
+
+void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) {
+  // We need to iterate because verification may cause additional classes
+  // to be loaded.
+  do {
+    _link_classes_made_progress = false;
+    SystemDictionary::classes_do(link_one_shared_class, THREAD);
+    guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
+  } while (_link_classes_made_progress);
+
+  if (_has_error_classes) {
+    // Mark all classes whose super class or interfaces failed verification.
+    do {
+      // Not completely sure if we need to do this iteratively. Anyway,
+      // we should come here only if there are unverifiable classes, which
+      // shouldn't happen in normal cases. So better safe than sorry.
+      _check_classes_made_progress = false;
+      SystemDictionary::classes_do(check_one_shared_class);
+    } while (_check_classes_made_progress);
+
+    if (IgnoreUnverifiableClassesDuringDump) {
+      // This is useful when running JCK or SQE tests. You should not
+      // enable this when running real apps.
+      SystemDictionary::remove_classes_in_error_state();
+    } else {
+      tty->print_cr("Please remove the unverifiable classes from your class list and try again");
+      exit(1);
     }
   }
 }
 
-
-// Support for a simple checksum of the contents of the class list
-// file to prevent trivial tampering. The algorithm matches that in
-// the MakeClassList program used by the J2SE build process.
-#define JSUM_SEED ((jlong)CONST64(0xcafebabebabecafe))
-static jlong
-jsum(jlong start, const char *buf, const int len)
-{
-    jlong h = start;
-    char *p = (char *)buf, *e = p + len;
-    while (p < e) {
-        char c = *p++;
-        if (c <= ' ') {
-            /* Skip spaces and control characters */
-            continue;
-        }
-        h = 31 * h + c;
-    }
-    return h;
+void MetaspaceShared::prepare_for_dumping() {
+  ClassLoader::initialize_shared_path();
+  FileMapInfo::allocate_classpath_entry_table();
 }
 
 // Preload classes from a list, populate the shared spaces and dump to a
@@ -651,72 +697,114 @@
   TraceTime timer("Dump Shared Spaces", TraceStartupTime);
   ResourceMark rm;
 
+  tty->print_cr("Allocated shared space: %d bytes at " PTR_FORMAT,
+                MetaspaceShared::shared_rs()->size(),
+                MetaspaceShared::shared_rs()->base());
+
   // Preload classes to be shared.
   // Should use some os:: method rather than fopen() here. aB.
-  // Construct the path to the class list (in jre/lib)
-  // Walk up two directories from the location of the VM and
-  // optionally tack on "lib" (depending on platform)
-  char class_list_path[JVM_MAXPATHLEN];
-  os::jvm_path(class_list_path, sizeof(class_list_path));
-  for (int i = 0; i < 3; i++) {
-    char *end = strrchr(class_list_path, *os::file_separator());
-    if (end != NULL) *end = '\0';
+  const char* class_list_path;
+  if (SharedClassListFile == NULL) {
+    // Construct the path to the class list (in jre/lib)
+    // Walk up two directories from the location of the VM and
+    // optionally tack on "lib" (depending on platform)
+    char class_list_path_str[JVM_MAXPATHLEN];
+    os::jvm_path(class_list_path_str, sizeof(class_list_path_str));
+    for (int i = 0; i < 3; i++) {
+      char *end = strrchr(class_list_path_str, *os::file_separator());
+      if (end != NULL) *end = '\0';
+    }
+    int class_list_path_len = (int)strlen(class_list_path_str);
+    if (class_list_path_len >= 3) {
+      if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
+        strcat(class_list_path_str, os::file_separator());
+        strcat(class_list_path_str, "lib");
+      }
+    }
+    strcat(class_list_path_str, os::file_separator());
+    strcat(class_list_path_str, "classlist");
+    class_list_path = class_list_path_str;
+  } else {
+    class_list_path = SharedClassListFile;
   }
-  int class_list_path_len = (int)strlen(class_list_path);
-  if (class_list_path_len >= 3) {
-    if (strcmp(class_list_path + class_list_path_len - 3, "lib") != 0) {
-      strcat(class_list_path, os::file_separator());
-      strcat(class_list_path, "lib");
-    }
+
+  int class_count = 0;
+  GrowableArray<Klass*>* class_promote_order = new GrowableArray<Klass*>();
+
+  // sun.io.Converters
+  static const char obj_array_sig[] = "[[Ljava/lang/Object;";
+  SymbolTable::new_permanent_symbol(obj_array_sig, THREAD);
+
+  // java.util.HashMap
+  static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;";
+  SymbolTable::new_permanent_symbol(map_entry_array_sig, THREAD);
+
+  tty->print_cr("Loading classes to share ...");
+  _has_error_classes = false;
+  class_count += preload_and_dump(class_list_path, class_promote_order,
+                                  THREAD);
+  if (ExtraSharedClassListFile) {
+    class_count += preload_and_dump(ExtraSharedClassListFile, class_promote_order,
+                                    THREAD);
+  }
+  tty->print_cr("Loading classes to share: done.");
+
+  ClassLoaderExt::init_lookup_cache(THREAD);
+
+  if (PrintSharedSpaces) {
+    tty->print_cr("Shared spaces: preloaded %d classes", class_count);
   }
-  strcat(class_list_path, os::file_separator());
-  strcat(class_list_path, "classlist");
+
+  // Rewrite and link classes
+  tty->print_cr("Rewriting and linking classes ...");
+
+  // Link any classes which got missed. This would happen if we have loaded classes that
+  // were not explicitly specified in the classlist. E.g., if an interface implemented by class K
+  // fails verification, all other interfaces that were not specified in the classlist but
+  // are implemented by K are not verified.
+  link_and_cleanup_shared_classes(CATCH);
+  tty->print_cr("Rewriting and linking classes: done");
 
+  // Create and dump the shared spaces.   Everything so far is loaded
+  // with the null class loader.
+  ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+  VM_PopulateDumpSharedSpace op(loader_data, class_promote_order);
+  VMThread::execute(&op);
+
+  // Since various initialization steps have been undone by this process,
+  // it is not reasonable to continue running a java process.
+  exit(0);
+}
+
+int MetaspaceShared::preload_and_dump(const char * class_list_path,
+                                      GrowableArray<Klass*>* class_promote_order,
+                                      TRAPS) {
   FILE* file = fopen(class_list_path, "r");
+  char class_name[256];
+  int class_count = 0;
+
   if (file != NULL) {
-    jlong computed_jsum  = JSUM_SEED;
-    jlong file_jsum      = 0;
-
-    char class_name[256];
-    int class_count = 0;
-    GrowableArray<Klass*>* class_promote_order = new GrowableArray<Klass*>();
-
-    // sun.io.Converters
-    static const char obj_array_sig[] = "[[Ljava/lang/Object;";
-    SymbolTable::new_permanent_symbol(obj_array_sig, THREAD);
-
-    // java.util.HashMap
-    static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;";
-    SymbolTable::new_permanent_symbol(map_entry_array_sig, THREAD);
-
-    tty->print("Loading classes to share ... ");
     while ((fgets(class_name, sizeof class_name, file)) != NULL) {
-      if (*class_name == '#') {
-        jint fsh, fsl;
-        if (sscanf(class_name, "# %8x%8x\n", &fsh, &fsl) == 2) {
-          file_jsum = ((jlong)(fsh) << 32) | (fsl & 0xffffffff);
-        }
-
+      if (*class_name == '#') { // comment
         continue;
       }
       // Remove trailing newline
       size_t name_len = strlen(class_name);
-      class_name[name_len-1] = '\0';
-
-      computed_jsum = jsum(computed_jsum, class_name, (const int)name_len - 1);
+      if (class_name[name_len-1] == '\n') {
+        class_name[name_len-1] = '\0';
+      }
 
       // Got a class name - load it.
       TempNewSymbol class_name_symbol = SymbolTable::new_permanent_symbol(class_name, THREAD);
       guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol.");
       Klass* klass = SystemDictionary::resolve_or_null(class_name_symbol,
                                                          THREAD);
-      guarantee(!HAS_PENDING_EXCEPTION, "Exception resolving a class.");
+      CLEAR_PENDING_EXCEPTION;
       if (klass != NULL) {
         if (PrintSharedSpaces && Verbose && WizardMode) {
           tty->print_cr("Shared spaces preloaded: %s", class_name);
         }
 
-
         InstanceKlass* ik = InstanceKlass::cast(klass);
 
         // Should be class load order as per -XX:+TraceClassLoadingPreorder
@@ -726,52 +814,15 @@
         // cpcache to be created. The linking is done as soon as classes
         // are loaded in order that the related data structures (klass and
         // cpCache) are located together.
-
-        if (ik->init_state() < InstanceKlass::linked) {
-          ik->link_class(THREAD);
-          guarantee(!(HAS_PENDING_EXCEPTION), "exception in class rewriting");
-        }
-
-        // TODO: Resolve klasses in constant pool
-        ik->constants()->resolve_class_constants(THREAD);
+        try_link_class(ik, THREAD);
+        guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
 
         class_count++;
       } else {
-        if (PrintSharedSpaces && Verbose && WizardMode) {
-          tty->cr();
-          tty->print_cr(" Preload failed: %s", class_name);
-        }
+        //tty->print_cr("Preload failed: %s", class_name);
       }
-      file_jsum = 0; // Checksum must be on last line of file
-    }
-    if (computed_jsum != file_jsum) {
-      tty->cr();
-      tty->print_cr("Preload failed: checksum of class list was incorrect.");
-      exit(1);
-    }
-
-    tty->print_cr("done. ");
-
-    if (PrintSharedSpaces) {
-      tty->print_cr("Shared spaces: preloaded %d classes", class_count);
     }
-
-    // Rewrite and unlink classes.
-    tty->print("Rewriting and linking classes ... ");
-
-    // Link any classes which got missed.  (It's not quite clear why
-    // they got missed.)  This iteration would be unsafe if we weren't
-    // single-threaded at this point; however we can't do it on the VM
-    // thread because it requires object allocation.
-    SystemDictionary::classes_do(link_shared_classes, CATCH);
-    tty->print_cr("done. ");
-
-    // Create and dump the shared spaces.   Everything so far is loaded
-    // with the null class loader.
-    ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
-    VM_PopulateDumpSharedSpace op(loader_data, class_promote_order);
-    VMThread::execute(&op);
-
+    fclose(file);
   } else {
     char errmsg[JVM_MAXPATHLEN];
     os::lasterror(errmsg, JVM_MAXPATHLEN);
@@ -779,11 +830,39 @@
     exit(1);
   }
 
-  // Since various initialization steps have been undone by this process,
-  // it is not reasonable to continue running a java process.
-  exit(0);
+  return class_count;
 }
 
+// Returns true if the class's status has changed
+bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
+  assert(DumpSharedSpaces, "should only be called during dumping");
+  if (ik->init_state() < InstanceKlass::linked) {
+    bool saved = BytecodeVerificationLocal;
+    if (!SharedClassUtil::is_shared_boot_class(ik)) {
+      // The verification decision is based on BytecodeVerificationRemote
+      // for non-system classes. Since we are using the NULL classloader
+      // to load non-system classes during dumping, we need to temporarily
+      // change BytecodeVerificationLocal to be the same as
+      // BytecodeVerificationRemote. Note this can cause the parent system
+      // classes also being verified. The extra overhead is acceptable during
+      // dumping.
+      BytecodeVerificationLocal = BytecodeVerificationRemote;
+    }
+    ik->link_class(THREAD);
+    if (HAS_PENDING_EXCEPTION) {
+      ResourceMark rm;
+      tty->print_cr("Preload Warning: Verification failed for %s",
+                    ik->external_name());
+      CLEAR_PENDING_EXCEPTION;
+      ik->set_in_error_state();
+      _has_error_classes = true;
+    }
+    BytecodeVerificationLocal = saved;
+    return true;
+  } else {
+    return false;
+  }
+}
 
 // Closure for serializing initialization data in from a data area
 // (ptr_array) read from the shared file.
@@ -871,7 +950,8 @@
        mapinfo->verify_region_checksum(md) &&
       (_mc_base = mapinfo->map_region(mc)) != NULL &&
        mapinfo->verify_region_checksum(mc) &&
-      (image_alignment == (size_t)max_alignment())) {
+      (image_alignment == (size_t)max_alignment()) &&
+      mapinfo->validate_classpath_entry_table()) {
     // Success (no need to do anything)
     return true;
   } else {
@@ -887,8 +967,8 @@
 #endif
     // If -Xshare:on is specified, print out the error message and exit VM,
     // otherwise, set UseSharedSpaces to false and continue.
-    if (RequireSharedSpaces) {
-      vm_exit_during_initialization("Unable to use shared archive.", NULL);
+    if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
+      vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
     } else {
       FLAG_SET_DEFAULT(UseSharedSpaces, false);
     }
@@ -982,12 +1062,28 @@
   buffer += sizeof(intptr_t);
   buffer += len;
 
+  buffer = ClassLoaderExt::restore_lookup_cache_from_archive(buffer);
+
   intptr_t* array = (intptr_t*)buffer;
   ReadClosure rc(&array);
   serialize(&rc);
 
   // Close the mapinfo file
   mapinfo->close();
+
+  if (PrintSharedArchiveAndExit) {
+    if (PrintSharedDictionary) {
+      tty->print_cr("\nShared classes:\n");
+      SystemDictionary::print_shared(false);
+    }
+    if (_archive_loading_failed) {
+      tty->print_cr("archive is invalid");
+      vm_exit(1);
+    } else {
+      tty->print_cr("archive is valid");
+      vm_exit(0);
+    }
+  }
 }
 
 // JVM/TI RedefineClasses() support:
@@ -1003,3 +1099,49 @@
   }
   return true;
 }
+
+int MetaspaceShared::count_class(const char* classlist_file) {
+  if (classlist_file == NULL) {
+    return 0;
+  }
+  char class_name[256];
+  int class_count = 0;
+  FILE* file = fopen(classlist_file, "r");
+  if (file != NULL) {
+    while ((fgets(class_name, sizeof class_name, file)) != NULL) {
+      if (*class_name == '#') { // comment
+        continue;
+      }
+      class_count++;
+    }
+    fclose(file);
+  } else {
+    char errmsg[JVM_MAXPATHLEN];
+    os::lasterror(errmsg, JVM_MAXPATHLEN);
+    tty->print_cr("Loading classlist failed: %s", errmsg);
+    exit(1);
+  }
+
+  return class_count;
+}
+
+// the sizes are good for typical large applications that have a lot of shared
+// classes
+void MetaspaceShared::estimate_regions_size() {
+  int class_count = count_class(SharedClassListFile);
+  class_count += count_class(ExtraSharedClassListFile);
+
+  if (class_count > LargeThresholdClassCount) {
+    if (class_count < HugeThresholdClassCount) {
+      SET_ESTIMATED_SIZE(Large, ReadOnly);
+      SET_ESTIMATED_SIZE(Large, ReadWrite);
+      SET_ESTIMATED_SIZE(Large, MiscData);
+      SET_ESTIMATED_SIZE(Large, MiscCode);
+    } else {
+      SET_ESTIMATED_SIZE(Huge,  ReadOnly);
+      SET_ESTIMATED_SIZE(Huge,  ReadWrite);
+      SET_ESTIMATED_SIZE(Huge,  MiscData);
+      SET_ESTIMATED_SIZE(Huge,  MiscCode);
+    }
+  }
+}
--- a/src/share/vm/memory/metaspaceShared.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/metaspaceShared.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -30,6 +30,19 @@
 #include "utilities/exceptions.hpp"
 #include "utilities/macros.hpp"
 
+#define LargeSharedArchiveSize    (300*M)
+#define HugeSharedArchiveSize     (800*M)
+#define ReadOnlyRegionPercentage  0.39
+#define ReadWriteRegionPercentage 0.50
+#define MiscDataRegionPercentage  0.09
+#define MiscCodeRegionPercentage  0.02
+#define LargeThresholdClassCount  5000
+#define HugeThresholdClassCount   40000
+
+#define SET_ESTIMATED_SIZE(type, region)                              \
+  Shared ##region## Size  = FLAG_IS_DEFAULT(Shared ##region## Size) ? \
+    (uintx)(type ## SharedArchiveSize *  region ## RegionPercentage) : Shared ## region ## Size
+
 class FileMapInfo;
 
 // Class Data Sharing Support
@@ -38,14 +51,22 @@
   // CDS support
   static ReservedSpace* _shared_rs;
   static int _max_alignment;
-
+  static bool _link_classes_made_progress;
+  static bool _check_classes_made_progress;
+  static bool _has_error_classes;
+  static bool _archive_loading_failed;
  public:
   enum {
-    vtbl_list_size = 17, // number of entries in the shared space vtable list.
-    num_virtuals = 200   // maximum number of virtual functions
-                         // If virtual functions are added to Metadata,
-                         // this number needs to be increased.  Also,
-                         // SharedMiscCodeSize will need to be increased.
+    vtbl_list_size         = 17,   // number of entries in the shared space vtable list.
+    num_virtuals           = 200,  // maximum number of virtual functions
+                                   // If virtual functions are added to Metadata,
+                                   // this number needs to be increased.  Also,
+                                   // SharedMiscCodeSize will need to be increased.
+                                   // The following 2 sizes were based on
+                                   // MetaspaceShared::generate_vtable_methods()
+    vtbl_method_size       = 16,   // conservative size of the mov1 and jmp instructions
+                                   // for the x64 platform
+    vtbl_common_code_size  = (1*K) // conservative size of the "common_code" for the x64 platform
   };
 
   enum {
@@ -67,7 +88,11 @@
     NOT_CDS(return 0);
   }
 
+  static void prepare_for_dumping() NOT_CDS_RETURN;
   static void preload_and_dump(TRAPS) NOT_CDS_RETURN;
+  static int preload_and_dump(const char * class_list_path,
+                              GrowableArray<Klass*>* class_promote_order,
+                              TRAPS) NOT_CDS_RETURN;
 
   static ReservedSpace* shared_rs() {
     CDS_ONLY(return _shared_rs);
@@ -78,6 +103,9 @@
     CDS_ONLY(_shared_rs = rs;)
   }
 
+  static void set_archive_loading_failed() {
+    _archive_loading_failed = true;
+  }
   static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false);
   static void initialize_shared_spaces() NOT_CDS_RETURN;
 
@@ -97,5 +125,13 @@
   static bool remap_shared_readonly_as_readwrite() NOT_CDS_RETURN_(true);
 
   static void print_shared_spaces();
+
+  static bool try_link_class(InstanceKlass* ik, TRAPS);
+  static void link_one_shared_class(Klass* obj, TRAPS);
+  static void check_one_shared_class(Klass* obj);
+  static void link_and_cleanup_shared_classes(TRAPS);
+
+  static int count_class(const char* classlist_file);
+  static void estimate_regions_size() NOT_CDS_RETURN;
 };
 #endif // SHARE_VM_MEMORY_METASPACE_SHARED_HPP
--- a/src/share/vm/memory/referenceProcessor.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/referenceProcessor.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -118,6 +118,7 @@
   _discoveredWeakRefs    = &_discoveredSoftRefs[_max_num_q];
   _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_q];
   _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
+  _discoveredCleanerRefs = &_discoveredPhantomRefs[_max_num_q];
 
   // Initialize all entries to NULL
   for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
@@ -190,7 +191,8 @@
   OopClosure*                  keep_alive,
   VoidClosure*                 complete_gc,
   AbstractRefProcTaskExecutor* task_executor,
-  GCTimer*                     gc_timer) {
+  GCTimer*                     gc_timer,
+  GCId                         gc_id) {
   NOT_PRODUCT(verify_ok_to_handle_reflists());
 
   assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
@@ -212,7 +214,7 @@
   // Soft references
   size_t soft_count = 0;
   {
-    GCTraceTime tt("SoftReference", trace_time, false, gc_timer);
+    GCTraceTime tt("SoftReference", trace_time, false, gc_timer, gc_id);
     soft_count =
       process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
                                  is_alive, keep_alive, complete_gc, task_executor);
@@ -223,7 +225,7 @@
   // Weak references
   size_t weak_count = 0;
   {
-    GCTraceTime tt("WeakReference", trace_time, false, gc_timer);
+    GCTraceTime tt("WeakReference", trace_time, false, gc_timer, gc_id);
     weak_count =
       process_discovered_reflist(_discoveredWeakRefs, NULL, true,
                                  is_alive, keep_alive, complete_gc, task_executor);
@@ -232,7 +234,7 @@
   // Final references
   size_t final_count = 0;
   {
-    GCTraceTime tt("FinalReference", trace_time, false, gc_timer);
+    GCTraceTime tt("FinalReference", trace_time, false, gc_timer, gc_id);
     final_count =
       process_discovered_reflist(_discoveredFinalRefs, NULL, false,
                                  is_alive, keep_alive, complete_gc, task_executor);
@@ -241,10 +243,17 @@
   // Phantom references
   size_t phantom_count = 0;
   {
-    GCTraceTime tt("PhantomReference", trace_time, false, gc_timer);
+    GCTraceTime tt("PhantomReference", trace_time, false, gc_timer, gc_id);
     phantom_count =
       process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
                                  is_alive, keep_alive, complete_gc, task_executor);
+
+    // Process cleaners, but include them in phantom statistics.  We expect
+    // Cleaner references to be temporary, and don't want to deal with
+    // possible incompatibilities arising from making it more visible.
+    phantom_count +=
+      process_discovered_reflist(_discoveredCleanerRefs, NULL, false,
+                                 is_alive, keep_alive, complete_gc, task_executor);
   }
 
   // Weak global JNI references. It would make more sense (semantically) to
@@ -253,7 +262,7 @@
   // thus use JNI weak references to circumvent the phantom references and
   // resurrect a "post-mortem" object.
   {
-    GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer);
+    GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer, gc_id);
     if (task_executor != NULL) {
       task_executor->set_single_threaded_mode();
     }
@@ -882,6 +891,7 @@
   balance_queues(_discoveredWeakRefs);
   balance_queues(_discoveredFinalRefs);
   balance_queues(_discoveredPhantomRefs);
+  balance_queues(_discoveredCleanerRefs);
 }
 
 size_t
@@ -1041,6 +1051,9 @@
     case REF_PHANTOM:
       list = &_discoveredPhantomRefs[id];
       break;
+    case REF_CLEANER:
+      list = &_discoveredCleanerRefs[id];
+      break;
     case REF_NONE:
       // we should not reach here if we are an InstanceRefKlass
     default:
@@ -1251,14 +1264,15 @@
   OopClosure* keep_alive,
   VoidClosure* complete_gc,
   YieldClosure* yield,
-  GCTimer* gc_timer) {
+  GCTimer* gc_timer,
+  GCId     gc_id) {
 
   NOT_PRODUCT(verify_ok_to_handle_reflists());
 
   // Soft references
   {
     GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
-              false, gc_timer);
+              false, gc_timer, gc_id);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1271,7 +1285,7 @@
   // Weak references
   {
     GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
-              false, gc_timer);
+              false, gc_timer, gc_id);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1284,7 +1298,7 @@
   // Final references
   {
     GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
-              false, gc_timer);
+              false, gc_timer, gc_id);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1297,7 +1311,7 @@
   // Phantom references
   {
     GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
-              false, gc_timer);
+              false, gc_timer, gc_id);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1305,6 +1319,17 @@
       preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
                                   keep_alive, complete_gc, yield);
     }
+
+    // Cleaner references.  Included in timing for phantom references.  We
+    // expect Cleaner references to be temporary, and don't want to deal with
+    // possible incompatibilities arising from making it more visible.
+    for (uint i = 0; i < _max_num_q; i++) {
+      if (yield->should_return()) {
+        return;
+      }
+      preclean_discovered_reflist(_discoveredCleanerRefs[i], is_alive,
+                                  keep_alive, complete_gc, yield);
+    }
   }
 }
 
@@ -1373,6 +1398,7 @@
      case 1: return "WeakRef";
      case 2: return "FinalRef";
      case 3: return "PhantomRef";
+     case 4: return "CleanerRef";
    }
    ShouldNotReachHere();
    return NULL;
--- a/src/share/vm/memory/referenceProcessor.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/referenceProcessor.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
 
+#include "gc_implementation/shared/gcTrace.hpp"
 #include "memory/referencePolicy.hpp"
 #include "memory/referenceProcessorStats.hpp"
 #include "memory/referenceType.hpp"
@@ -263,9 +264,10 @@
   DiscoveredList* _discoveredWeakRefs;
   DiscoveredList* _discoveredFinalRefs;
   DiscoveredList* _discoveredPhantomRefs;
+  DiscoveredList* _discoveredCleanerRefs;
 
  public:
-  static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
+  static int number_of_subclasses_of_ref() { return (REF_CLEANER - REF_OTHER); }
 
   uint num_q()                             { return _num_q; }
   uint max_num_q()                         { return _max_num_q; }
@@ -349,7 +351,8 @@
                                       OopClosure*        keep_alive,
                                       VoidClosure*       complete_gc,
                                       YieldClosure*      yield,
-                                      GCTimer*           gc_timer);
+                                      GCTimer*           gc_timer,
+                                      GCId               gc_id);
 
   // Delete entries in the discovered lists that have
   // either a null referent or are not active. Such
@@ -480,7 +483,8 @@
                                 OopClosure*                  keep_alive,
                                 VoidClosure*                 complete_gc,
                                 AbstractRefProcTaskExecutor* task_executor,
-                                GCTimer *gc_timer);
+                                GCTimer *gc_timer,
+                                GCId    gc_id);
 
   // Enqueue references at end of GC (called by the garbage collector)
   bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
--- a/src/share/vm/memory/referenceType.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/referenceType.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -35,7 +35,8 @@
   REF_SOFT,      // Subclass of java/lang/ref/SoftReference
   REF_WEAK,      // Subclass of java/lang/ref/WeakReference
   REF_FINAL,     // Subclass of java/lang/ref/FinalReference
-  REF_PHANTOM    // Subclass of java/lang/ref/PhantomReference
+  REF_PHANTOM,   // Subclass of java/lang/ref/PhantomReference
+  REF_CLEANER    // Subclass of sun/misc/Cleaner
 };
 
 #endif // SHARE_VM_MEMORY_REFRERENCETYPE_HPP
--- a/src/share/vm/memory/resourceArea.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/resourceArea.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,11 +49,11 @@
   debug_only(static int _warned;)       // to suppress multiple warnings
 
 public:
-  ResourceArea() {
+  ResourceArea() : Arena(mtThread) {
     debug_only(_nesting = 0;)
   }
 
-  ResourceArea(size_t init_size) : Arena(init_size) {
+  ResourceArea(size_t init_size) : Arena(mtThread, init_size) {
     debug_only(_nesting = 0;);
   }
 
@@ -64,7 +64,7 @@
     if (UseMallocOnly) {
       // use malloc, but save pointer in res. area for later freeing
       char** save = (char**)internal_malloc_4(sizeof(char*));
-      return (*save = (char*)os::malloc(size, mtThread));
+      return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC));
     }
 #endif
     return (char*)Amalloc(size, alloc_failmode);
--- a/src/share/vm/memory/sharedHeap.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/sharedHeap.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -29,6 +29,7 @@
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/sharedHeap.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "runtime/fprofiler.hpp"
 #include "runtime/java.hpp"
 #include "services/management.hpp"
@@ -39,8 +40,8 @@
 
 SharedHeap* SharedHeap::_sh;
 
-// The set of potentially parallel tasks in strong root scanning.
-enum SH_process_strong_roots_tasks {
+// The set of potentially parallel tasks in root scanning.
+enum SH_process_roots_tasks {
   SH_PS_Universe_oops_do,
   SH_PS_JNIHandles_oops_do,
   SH_PS_ObjectSynchronizer_oops_do,
@@ -58,6 +59,7 @@
   CollectedHeap(),
   _collector_policy(policy_),
   _rem_set(NULL),
+  _strong_roots_scope(NULL),
   _strong_roots_parity(0),
   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
   _workers(NULL)
@@ -114,6 +116,19 @@
 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 #endif
 
+SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
+  return _strong_roots_scope;
+}
+void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
+  assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
+  assert(scope != NULL, "Illegal argument");
+  _strong_roots_scope = scope;
+}
+void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
+  assert(_strong_roots_scope == scope, "Wrong scope unregistered");
+  _strong_roots_scope = NULL;
+}
+
 void SharedHeap::change_strong_roots_parity() {
   // Also set the new collection parity.
   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
@@ -124,122 +139,173 @@
          "Not in range.");
 }
 
-SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
-  : MarkScope(activate)
+SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
+  : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
 {
   if (_active) {
-    outer->change_strong_roots_parity();
+    _sh->register_strong_roots_scope(this);
+    _sh->change_strong_roots_parity();
     // Zero the claimed high water mark in the StringTable
     StringTable::clear_parallel_claimed_index();
   }
 }
 
 SharedHeap::StrongRootsScope::~StrongRootsScope() {
-  // nothing particular
+  if (_active) {
+    _sh->unregister_strong_roots_scope(this);
+  }
+}
+
+Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
+
+void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
+  // The Thread work barrier is only needed by G1 Class Unloading.
+  // No need to use the barrier if this is single-threaded code.
+  if (UseG1GC && ClassUnloadingWithConcurrentMark && n_workers > 0) {
+    uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
+    if (new_value == n_workers) {
+      // This thread is last. Notify the others.
+      MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
+      _lock->notify_all();
+    }
+  }
 }
 
-void SharedHeap::process_strong_roots(bool activate_scope,
-                                      bool is_scavenging,
-                                      ScanningOption so,
-                                      OopClosure* roots,
-                                      CodeBlobClosure* code_roots,
-                                      KlassClosure* klass_closure) {
+void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
+  assert(UseG1GC,                          "Currently only used by G1");
+  assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
+
+  // No need to use the barrier if this is single-threaded code.
+  if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
+    MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
+    while ((uint)_n_workers_done_with_threads != n_workers) {
+      _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
+    }
+  }
+}
+
+void SharedHeap::process_roots(bool activate_scope,
+                               ScanningOption so,
+                               OopClosure* strong_roots,
+                               OopClosure* weak_roots,
+                               CLDClosure* strong_cld_closure,
+                               CLDClosure* weak_cld_closure,
+                               CodeBlobClosure* code_roots) {
   StrongRootsScope srs(this, activate_scope);
 
-  // General strong roots.
+  // General roots.
   assert(_strong_roots_parity != 0, "must have called prologue code");
+  assert(code_roots != NULL, "code root closure should always be set");
   // _n_termination for _process_strong_tasks should be set up stream
   // in a method not running in a GC worker.  Otherwise the GC worker
   // could be trying to change the termination condition while the task
   // is executing in another GC worker.
+
+  // Iterating over the CLDG and the Threads are done early to allow G1 to
+  // first process the strong CLDs and nmethods and then, after a barrier,
+  // let the thread process the weak CLDs and nmethods.
+
+  if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
+    ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
+  }
+
+  // Some CLDs contained in the thread frames should be considered strong.
+  // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
+  CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
+  // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
+  CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
+
+  Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
+
+  // This is the point where this worker thread will not find more strong CLDs/nmethods.
+  // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
+  active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
+
   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
-    Universe::oops_do(roots);
+    Universe::oops_do(strong_roots);
   }
   // Global (strong) JNI handles
   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
-    JNIHandles::oops_do(roots);
-
-  // All threads execute this; the individual threads are task groups.
-  CLDToOopClosure roots_from_clds(roots);
-  CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds);
-  if (CollectedHeap::use_parallel_gc_threads()) {
-    Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots);
-  } else {
-    Threads::oops_do(roots, roots_from_clds_p, code_roots);
-  }
+    JNIHandles::oops_do(strong_roots);
 
   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
-    ObjectSynchronizer::oops_do(roots);
+    ObjectSynchronizer::oops_do(strong_roots);
   if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
-    FlatProfiler::oops_do(roots);
+    FlatProfiler::oops_do(strong_roots);
   if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
-    Management::oops_do(roots);
+    Management::oops_do(strong_roots);
   if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
-    JvmtiExport::oops_do(roots);
+    JvmtiExport::oops_do(strong_roots);
 
   if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
-    if (so & SO_AllClasses) {
-      SystemDictionary::oops_do(roots);
-    } else if (so & SO_SystemClasses) {
-      SystemDictionary::always_strong_oops_do(roots);
-    } else {
-      fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
-    }
-  }
-
-  if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
-    if (so & SO_AllClasses) {
-      ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging);
-    } else if (so & SO_SystemClasses) {
-      ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging);
-    }
+    SystemDictionary::roots_oops_do(strong_roots, weak_roots);
   }
 
   // All threads execute the following. A specific chunk of buckets
   // from the StringTable are the individual tasks.
-  if (so & SO_Strings) {
+  if (weak_roots != NULL) {
     if (CollectedHeap::use_parallel_gc_threads()) {
-      StringTable::possibly_parallel_oops_do(roots);
+      StringTable::possibly_parallel_oops_do(weak_roots);
     } else {
-      StringTable::oops_do(roots);
+      StringTable::oops_do(weak_roots);
     }
   }
 
   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
-    if (so & SO_CodeCache) {
+    if (so & SO_ScavengeCodeCache) {
       assert(code_roots != NULL, "must supply closure for code cache");
 
-      if (is_scavenging) {
-        // We only visit parts of the CodeCache when scavenging.
-        CodeCache::scavenge_root_nmethods_do(code_roots);
-      } else {
-        // CMSCollector uses this to do intermediate-strength collections.
-        // We scan the entire code cache, since CodeCache::do_unloading is not called.
-        CodeCache::blobs_do(code_roots);
-      }
+      // We only visit parts of the CodeCache when scavenging.
+      CodeCache::scavenge_root_nmethods_do(code_roots);
+    }
+    if (so & SO_AllCodeCache) {
+      assert(code_roots != NULL, "must supply closure for code cache");
+
+      // CMSCollector uses this to do intermediate-strength collections.
+      // We scan the entire code cache, since CodeCache::do_unloading is not called.
+      CodeCache::blobs_do(code_roots);
     }
     // Verify that the code cache contents are not subject to
     // movement by a scavenging collection.
-    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
+    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
   }
 
   _process_strong_tasks->all_tasks_completed();
 }
 
+void SharedHeap::process_all_roots(bool activate_scope,
+                                   ScanningOption so,
+                                   OopClosure* roots,
+                                   CLDClosure* cld_closure,
+                                   CodeBlobClosure* code_closure) {
+  process_roots(activate_scope, so,
+                roots, roots,
+                cld_closure, cld_closure,
+                code_closure);
+}
+
+void SharedHeap::process_strong_roots(bool activate_scope,
+                                      ScanningOption so,
+                                      OopClosure* roots,
+                                      CLDClosure* cld_closure,
+                                      CodeBlobClosure* code_closure) {
+  process_roots(activate_scope, so,
+                roots, NULL,
+                cld_closure, NULL,
+                code_closure);
+}
+
+
 class AlwaysTrueClosure: public BoolObjectClosure {
 public:
   bool do_object_b(oop p) { return true; }
 };
 static AlwaysTrueClosure always_true;
 
-void SharedHeap::process_weak_roots(OopClosure* root_closure,
-                                    CodeBlobClosure* code_roots) {
+void SharedHeap::process_weak_roots(OopClosure* root_closure) {
   // Global (weak) JNI handles
   JNIHandles::weak_oops_do(&always_true, root_closure);
-
-  CodeCache::blobs_do(code_roots);
-  StringTable::oops_do(root_closure);
 }
 
 void SharedHeap::set_barrier_set(BarrierSet* bs) {
--- a/src/share/vm/memory/sharedHeap.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/sharedHeap.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -69,14 +69,10 @@
 //    number of active GC workers.  CompactibleFreeListSpace and Space
 //    have SequentialSubTasksDone's.
 // Example of using SubTasksDone and SequentialSubTasksDone
-// G1CollectedHeap::g1_process_strong_roots() calls
-//  process_strong_roots(false, // no scoping; this is parallel code
-//                       is_scavenging, so,
-//                       &buf_scan_non_heap_roots,
-//                       &eager_scan_code_roots);
-//  which delegates to SharedHeap::process_strong_roots() and uses
+// G1CollectedHeap::g1_process_roots()
+//  to SharedHeap::process_roots() and uses
 //  SubTasksDone* _process_strong_tasks to claim tasks.
-//  process_strong_roots() calls
+//  process_roots() calls
 //      rem_set()->younger_refs_iterate()
 //  to scan the card table and which eventually calls down into
 //  CardTableModRefBS::par_non_clean_card_iterate_work().  This method
@@ -163,9 +159,6 @@
   // Iteration functions.
   void oop_iterate(ExtendedOopClosure* cl) = 0;
 
-  // Same as above, restricted to a memory region.
-  virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0;
-
   // Iterate over all spaces in use in the heap, in an undefined order.
   virtual void space_iterate(SpaceClosure* cl) = 0;
 
@@ -185,12 +178,12 @@
   // task.  (This also means that a parallel thread may only call
   // process_strong_roots once.)
   //
-  // For calls to process_strong_roots by sequential code, the parity is
+  // For calls to process_roots by sequential code, the parity is
   // updated automatically.
   //
   // The idea is that objects representing fine-grained tasks, such as
   // threads, will contain a "parity" field.  A task will is claimed in the
-  // current "process_strong_roots" call only if its parity field is the
+  // current "process_roots" call only if its parity field is the
   // same as the "strong_roots_parity"; task claiming is accomplished by
   // updating the parity field to the strong_roots_parity with a CAS.
   //
@@ -201,27 +194,45 @@
   //   c) to never return a distinguished value (zero) with which such
   //      task-claiming variables may be initialized, to indicate "never
   //      claimed".
- private:
-  void change_strong_roots_parity();
  public:
   int strong_roots_parity() { return _strong_roots_parity; }
 
-  // Call these in sequential code around process_strong_roots.
+  // Call these in sequential code around process_roots.
   // strong_roots_prologue calls change_strong_roots_parity, if
   // parallel tasks are enabled.
   class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
-  public:
-    StrongRootsScope(SharedHeap* outer, bool activate = true);
+    // Used to implement the Thread work barrier.
+    static Monitor* _lock;
+
+    SharedHeap*   _sh;
+    volatile jint _n_workers_done_with_threads;
+
+   public:
+    StrongRootsScope(SharedHeap* heap, bool activate = true);
     ~StrongRootsScope();
+
+    // Mark that this thread is done with the Threads work.
+    void mark_worker_done_with_threads(uint n_workers);
+    // Wait until all n_workers are done with the Threads work.
+    void wait_until_all_workers_done_with_threads(uint n_workers);
   };
   friend class StrongRootsScope;
 
+  // The current active StrongRootScope
+  StrongRootsScope* _strong_roots_scope;
+
+  StrongRootsScope* active_strong_roots_scope() const;
+
+ private:
+  void register_strong_roots_scope(StrongRootsScope* scope);
+  void unregister_strong_roots_scope(StrongRootsScope* scope);
+  void change_strong_roots_parity();
+
+ public:
   enum ScanningOption {
-    SO_None                = 0x0,
-    SO_AllClasses          = 0x1,
-    SO_SystemClasses       = 0x2,
-    SO_Strings             = 0x4,
-    SO_CodeCache           = 0x8
+    SO_None                =  0x0,
+    SO_AllCodeCache        =  0x8,
+    SO_ScavengeCodeCache   = 0x10
   };
 
   FlexibleWorkGang* workers() const { return _workers; }
@@ -229,22 +240,29 @@
   // Invoke the "do_oop" method the closure "roots" on all root locations.
   // The "so" argument determines which roots the closure is applied to:
   // "SO_None" does none;
-  // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
-  // "SO_SystemClasses" to all the "system" classes and loaders;
-  // "SO_Strings" applies the closure to all entries in StringTable;
-  // "SO_CodeCache" applies the closure to all elements of the CodeCache.
+  // "SO_AllCodeCache" applies the closure to all elements of the CodeCache.
+  // "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache.
+  void process_roots(bool activate_scope,
+                     ScanningOption so,
+                     OopClosure* strong_roots,
+                     OopClosure* weak_roots,
+                     CLDClosure* strong_cld_closure,
+                     CLDClosure* weak_cld_closure,
+                     CodeBlobClosure* code_roots);
+  void process_all_roots(bool activate_scope,
+                         ScanningOption so,
+                         OopClosure* roots,
+                         CLDClosure* cld_closure,
+                         CodeBlobClosure* code_roots);
   void process_strong_roots(bool activate_scope,
-                            bool is_scavenging,
                             ScanningOption so,
                             OopClosure* roots,
-                            CodeBlobClosure* code_roots,
-                            KlassClosure* klass_closure);
+                            CLDClosure* cld_closure,
+                            CodeBlobClosure* code_roots);
 
-  // Apply "blk" to all the weak roots of the system.  These include
-  // JNI weak roots, the code cache, system dictionary, symbol table,
-  // string table.
-  void process_weak_roots(OopClosure* root_closure,
-                          CodeBlobClosure* code_roots);
+
+  // Apply "root_closure" to the JNI weak roots..
+  void process_weak_roots(OopClosure* root_closure);
 
   // The functions below are helper functions that a subclass of
   // "SharedHeap" can use in the implementation of its virtual
@@ -257,7 +275,7 @@
   virtual void gc_epilogue(bool full) = 0;
 
   // Sets the number of parallel threads that will be doing tasks
-  // (such as process strong roots) subsequently.
+  // (such as process roots) subsequently.
   virtual void set_par_threads(uint t);
 
   int n_termination();
@@ -274,4 +292,8 @@
                              size_t capacity);
 };
 
+inline SharedHeap::ScanningOption operator|(SharedHeap::ScanningOption so0, SharedHeap::ScanningOption so1) {
+  return static_cast<SharedHeap::ScanningOption>(static_cast<int>(so0) | static_cast<int>(so1));
+}
+
 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP
--- a/src/share/vm/memory/space.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/space.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,6 +28,7 @@
 #include "gc_implementation/shared/liveRange.hpp"
 #include "gc_implementation/shared/markSweep.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/blockOffsetTable.inline.hpp"
 #include "memory/defNewGeneration.hpp"
 #include "memory/genCollectedHeap.hpp"
@@ -37,14 +38,13 @@
 #include "oops/oop.inline.hpp"
 #include "oops/oop.inline2.hpp"
 #include "runtime/java.hpp"
+#include "runtime/prefetch.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
 
-void SpaceMemRegionOopsIterClosure::do_oop(oop* p)       { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
-void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
-
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
@@ -307,10 +307,6 @@
   CompactibleSpace::clear(mangle_space);
 }
 
-bool ContiguousSpace::is_in(const void* p) const {
-  return _bottom <= p && p < _top;
-}
-
 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
   return p >= _top;
 }
@@ -552,115 +548,11 @@
   object_iterate(&blk2);
 }
 
-HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
-  guarantee(false, "NYI");
-  return bottom();
-}
-
-HeapWord* Space::object_iterate_careful_m(MemRegion mr,
-                                          ObjectClosureCareful* cl) {
-  guarantee(false, "NYI");
-  return bottom();
-}
-
-
-void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
-  assert(!mr.is_empty(), "Should be non-empty");
-  // We use MemRegion(bottom(), end()) rather than used_region() below
-  // because the two are not necessarily equal for some kinds of
-  // spaces, in particular, certain kinds of free list spaces.
-  // We could use the more complicated but more precise:
-  // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
-  // but the slight imprecision seems acceptable in the assertion check.
-  assert(MemRegion(bottom(), end()).contains(mr),
-         "Should be within used space");
-  HeapWord* prev = cl->previous();   // max address from last time
-  if (prev >= mr.end()) { // nothing to do
-    return;
-  }
-  // This assert will not work when we go from cms space to perm
-  // space, and use same closure. Easy fix deferred for later. XXX YSR
-  // assert(prev == NULL || contains(prev), "Should be within space");
-
-  bool last_was_obj_array = false;
-  HeapWord *blk_start_addr, *region_start_addr;
-  if (prev > mr.start()) {
-    region_start_addr = prev;
-    blk_start_addr    = prev;
-    // The previous invocation may have pushed "prev" beyond the
-    // last allocated block yet there may be still be blocks
-    // in this region due to a particular coalescing policy.
-    // Relax the assertion so that the case where the unallocated
-    // block is maintained and "prev" is beyond the unallocated
-    // block does not cause the assertion to fire.
-    assert((BlockOffsetArrayUseUnallocatedBlock &&
-            (!is_in(prev))) ||
-           (blk_start_addr == block_start(region_start_addr)), "invariant");
-  } else {
-    region_start_addr = mr.start();
-    blk_start_addr    = block_start(region_start_addr);
-  }
-  HeapWord* region_end_addr = mr.end();
-  MemRegion derived_mr(region_start_addr, region_end_addr);
-  while (blk_start_addr < region_end_addr) {
-    const size_t size = block_size(blk_start_addr);
-    if (block_is_obj(blk_start_addr)) {
-      last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
-    } else {
-      last_was_obj_array = false;
-    }
-    blk_start_addr += size;
-  }
-  if (!last_was_obj_array) {
-    assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
-           "Should be within (closed) used space");
-    assert(blk_start_addr > prev, "Invariant");
-    cl->set_previous(blk_start_addr); // min address for next time
-  }
-}
-
 bool Space::obj_is_alive(const HeapWord* p) const {
   assert (block_is_obj(p), "The address should point to an object");
   return true;
 }
 
-void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
-  assert(!mr.is_empty(), "Should be non-empty");
-  assert(used_region().contains(mr), "Should be within used space");
-  HeapWord* prev = cl->previous();   // max address from last time
-  if (prev >= mr.end()) { // nothing to do
-    return;
-  }
-  // See comment above (in more general method above) in case you
-  // happen to use this method.
-  assert(prev == NULL || is_in_reserved(prev), "Should be within space");
-
-  bool last_was_obj_array = false;
-  HeapWord *obj_start_addr, *region_start_addr;
-  if (prev > mr.start()) {
-    region_start_addr = prev;
-    obj_start_addr    = prev;
-    assert(obj_start_addr == block_start(region_start_addr), "invariant");
-  } else {
-    region_start_addr = mr.start();
-    obj_start_addr    = block_start(region_start_addr);
-  }
-  HeapWord* region_end_addr = mr.end();
-  MemRegion derived_mr(region_start_addr, region_end_addr);
-  while (obj_start_addr < region_end_addr) {
-    oop obj = oop(obj_start_addr);
-    const size_t size = obj->size();
-    last_was_obj_array = cl->do_object_bm(obj, derived_mr);
-    obj_start_addr += size;
-  }
-  if (!last_was_obj_array) {
-    assert((bottom() <= obj_start_addr)  && (obj_start_addr <= end()),
-           "Should be within (closed) used space");
-    assert(obj_start_addr > prev, "Invariant");
-    cl->set_previous(obj_start_addr); // min address for next time
-  }
-}
-
 #if INCLUDE_ALL_GCS
 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)         \
                                                                             \
@@ -688,43 +580,6 @@
   }
 }
 
-void ContiguousSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* blk) {
-  if (is_empty()) {
-    return;
-  }
-  MemRegion cur = MemRegion(bottom(), top());
-  mr = mr.intersection(cur);
-  if (mr.is_empty()) {
-    return;
-  }
-  if (mr.equals(cur)) {
-    oop_iterate(blk);
-    return;
-  }
-  assert(mr.end() <= top(), "just took an intersection above");
-  HeapWord* obj_addr = block_start(mr.start());
-  HeapWord* t = mr.end();
-
-  // Handle first object specially.
-  oop obj = oop(obj_addr);
-  SpaceMemRegionOopsIterClosure smr_blk(blk, mr);
-  obj_addr += obj->oop_iterate(&smr_blk);
-  while (obj_addr < t) {
-    oop obj = oop(obj_addr);
-    assert(obj->is_oop(), "expected an oop");
-    obj_addr += obj->size();
-    // If "obj_addr" is not greater than top, then the
-    // entire object "obj" is within the region.
-    if (obj_addr <= t) {
-      obj->oop_iterate(blk);
-    } else {
-      // "obj" extends beyond end of region
-      obj->oop_iterate(&smr_blk);
-      break;
-    }
-  };
-}
-
 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
   if (is_empty()) return;
   WaterMark bm = bottom_mark();
@@ -830,14 +685,8 @@
 // This version requires locking.
 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
                                                 HeapWord* const end_value) {
-  // In G1 there are places where a GC worker can allocates into a
-  // region using this serial allocation code without being prone to a
-  // race with other GC workers (we ensure that no other GC worker can
-  // access the same region at the same time). So the assert below is
-  // too strong in the case of G1.
   assert(Heap_lock->owned_by_self() ||
-         (SafepointSynchronize::is_at_safepoint() &&
-                               (Thread::current()->is_VM_thread() || UseG1GC)),
+         (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
          "not locked");
   HeapWord* obj = top();
   if (pointer_delta(end_value, obj) >= size) {
@@ -871,6 +720,27 @@
   } while (true);
 }
 
+HeapWord* ContiguousSpace::allocate_aligned(size_t size) {
+  assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked");
+  HeapWord* end_value = end();
+
+  HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes);
+  if (obj == NULL) {
+    return NULL;
+  }
+
+  if (pointer_delta(end_value, obj) >= size) {
+    HeapWord* new_top = obj + size;
+    set_top(new_top);
+    assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top),
+      "checking alignment");
+    return obj;
+  } else {
+    set_top(obj);
+    return NULL;
+  }
+}
+
 // Requires locking.
 HeapWord* ContiguousSpace::allocate(size_t size) {
   return allocate_impl(size, end());
--- a/src/share/vm/memory/space.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/space.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -33,24 +33,8 @@
 #include "memory/watermark.hpp"
 #include "oops/markOop.hpp"
 #include "runtime/mutexLocker.hpp"
-#include "runtime/prefetch.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/workgroup.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
 
 // A space is an abstraction for the "storage units" backing
 // up the generation abstraction. It includes specific
@@ -81,31 +65,6 @@
 class CardTableRS;
 class DirtyCardToOopClosure;
 
-// An oop closure that is circumscribed by a filtering memory region.
-class SpaceMemRegionOopsIterClosure: public ExtendedOopClosure {
- private:
-  ExtendedOopClosure* _cl;
-  MemRegion   _mr;
- protected:
-  template <class T> void do_oop_work(T* p) {
-    if (_mr.contains(p)) {
-      _cl->do_oop(p);
-    }
-  }
- public:
-  SpaceMemRegionOopsIterClosure(ExtendedOopClosure* cl, MemRegion mr):
-    _cl(cl), _mr(mr) {}
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-  virtual bool do_metadata() {
-    // _cl is of type ExtendedOopClosure instead of OopClosure, so that we can check this.
-    assert(!_cl->do_metadata(), "I've checked all call paths, this shouldn't happen.");
-    return false;
-  }
-  virtual void do_klass(Klass* k)                         { ShouldNotReachHere(); }
-  virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
-};
-
 // A Space describes a heap area. Class Space is an abstract
 // base class.
 //
@@ -145,6 +104,12 @@
 
   void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
 
+  // Returns true if this object has been allocated since a
+  // generation's "save_marks" call.
+  virtual bool obj_allocated_since_save_marks(const oop obj) const {
+    return (HeapWord*)obj >= saved_mark_word();
+  }
+
   MemRegionClosure* preconsumptionDirtyCardClosure() const {
     return _preconsumptionDirtyCardClosure;
   }
@@ -152,9 +117,9 @@
     _preconsumptionDirtyCardClosure = cl;
   }
 
-  // Returns a subregion of the space containing all the objects in
+  // Returns a subregion of the space containing only the allocated objects in
   // the space.
-  virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
+  virtual MemRegion used_region() const = 0;
 
   // Returns a region that is guaranteed to contain (at least) all objects
   // allocated at the time of the last call to "save_marks".  If the space
@@ -164,7 +129,7 @@
   // saved mark.  Otherwise, the "obj_allocated_since_save_marks" method of
   // the space must distiguish between objects in the region allocated before
   // and after the call to save marks.
-  virtual MemRegion used_region_at_save_marks() const {
+  MemRegion used_region_at_save_marks() const {
     return MemRegion(bottom(), saved_mark_word());
   }
 
@@ -197,7 +162,9 @@
   // expensive operation. To prevent performance problems
   // on account of its inadvertent use in product jvm's,
   // we restrict its use to assertion checks only.
-  virtual bool is_in(const void* p) const = 0;
+  bool is_in(const void* p) const {
+    return used_region().contains(p);
+  }
 
   // Returns true iff the given reserved memory of the space contains the
   // given address.
@@ -221,11 +188,6 @@
   // applications of the closure are not included in the iteration.
   virtual void oop_iterate(ExtendedOopClosure* cl);
 
-  // Same as above, restricted to the intersection of a memory region and
-  // the space.  Fields in objects allocated by applications of the closure
-  // are not included in the iteration.
-  virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0;
-
   // Iterate over all objects in the space, calling "cl.do_object" on
   // each.  Objects allocated by applications of the closure are not
   // included in the iteration.
@@ -234,24 +196,6 @@
   // objects whose internal references point to objects in the space.
   virtual void safe_object_iterate(ObjectClosure* blk) = 0;
 
-  // Iterate over all objects that intersect with mr, calling "cl->do_object"
-  // on each.  There is an exception to this: if this closure has already
-  // been invoked on an object, it may skip such objects in some cases.  This is
-  // Most likely to happen in an "upwards" (ascending address) iteration of
-  // MemRegions.
-  virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
-
-  // Iterate over as many initialized objects in the space as possible,
-  // calling "cl.do_object_careful" on each. Return NULL if all objects
-  // in the space (at the start of the iteration) were iterated over.
-  // Return an address indicating the extent of the iteration in the
-  // event that the iteration had to return because of finding an
-  // uninitialized object in the space, or if the closure "cl"
-  // signalled early termination.
-  virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
-  virtual HeapWord* object_iterate_careful_m(MemRegion mr,
-                                             ObjectClosureCareful* cl);
-
   // Create and return a new dirty card to oop closure. Can be
   // overriden to return the appropriate type of closure
   // depending on the type of space in which the closure will
@@ -292,10 +236,6 @@
   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
   virtual HeapWord* par_allocate(size_t word_size) = 0;
 
-  // Returns true if this object has been allocated since a
-  // generation's "save_marks" call.
-  virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
-
   // Mark-sweep-compact support: all spaces can update pointers to objects
   // moving as a part of compaction.
   virtual void adjust_pointers();
@@ -390,12 +330,11 @@
   Generation* gen;
   CompactibleSpace* space;
   HeapWord* threshold;
-  CompactPoint(Generation* _gen, CompactibleSpace* _space,
-               HeapWord* _threshold) :
-    gen(_gen), space(_space), threshold(_threshold) {}
+
+  CompactPoint(Generation* g = NULL) :
+    gen(g), space(NULL), threshold(0) {}
 };
 
-
 // A space that supports compaction operations.  This is usually, but not
 // necessarily, a space that is normally contiguous.  But, for example, a
 // free-list-based space whose normal collection is a mark-sweep without
@@ -427,7 +366,7 @@
 
   // Perform operations on the space needed after a compaction
   // has been performed.
-  virtual void reset_after_compaction() {}
+  virtual void reset_after_compaction() = 0;
 
   // Returns the next space (in the current generation) to be compacted in
   // the global compaction order.  Also is used to select the next
@@ -492,7 +431,7 @@
   HeapWord* _end_of_live;
 
   // Minimum size of a free block.
-  virtual size_t minimum_free_block_size() const = 0;
+  virtual size_t minimum_free_block_size() const { return 0; }
 
   // This the function is invoked when an allocation of an object covering
   // "start" to "end occurs crosses the threshold; returns the next
@@ -512,272 +451,6 @@
                         size_t word_len);
 };
 
-#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) {            \
-  /* Compute the new addresses for the live objects and store it in the mark \
-   * Used by universe::mark_sweep_phase2()                                   \
-   */                                                                        \
-  HeapWord* compact_top; /* This is where we are currently compacting to. */ \
-                                                                             \
-  /* We're sure to be here before any objects are compacted into this        \
-   * space, so this is a good time to initialize this:                       \
-   */                                                                        \
-  set_compaction_top(bottom());                                              \
-                                                                             \
-  if (cp->space == NULL) {                                                   \
-    assert(cp->gen != NULL, "need a generation");                            \
-    assert(cp->threshold == NULL, "just checking");                          \
-    assert(cp->gen->first_compaction_space() == this, "just checking");      \
-    cp->space = cp->gen->first_compaction_space();                           \
-    compact_top = cp->space->bottom();                                       \
-    cp->space->set_compaction_top(compact_top);                              \
-    cp->threshold = cp->space->initialize_threshold();                       \
-  } else {                                                                   \
-    compact_top = cp->space->compaction_top();                               \
-  }                                                                          \
-                                                                             \
-  /* We allow some amount of garbage towards the bottom of the space, so     \
-   * we don't start compacting before there is a significant gain to be made.\
-   * Occasionally, we want to ensure a full compaction, which is determined  \
-   * by the MarkSweepAlwaysCompactCount parameter.                           \
-   */                                                                        \
-  uint invocations = MarkSweep::total_invocations();                         \
-  bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);       \
-                                                                             \
-  size_t allowed_deadspace = 0;                                              \
-  if (skip_dead) {                                                           \
-    const size_t ratio = allowed_dead_ratio();                               \
-    allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize;           \
-  }                                                                          \
-                                                                             \
-  HeapWord* q = bottom();                                                    \
-  HeapWord* t = scan_limit();                                                \
-                                                                             \
-  HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last \
-                                   live object. */                           \
-  HeapWord*  first_dead = end();/* The first dead object. */                 \
-  LiveRange* liveRange  = NULL; /* The current live range, recorded in the   \
-                                   first header of preceding free area. */   \
-  _first_dead = first_dead;                                                  \
-                                                                             \
-  const intx interval = PrefetchScanIntervalInBytes;                         \
-                                                                             \
-  while (q < t) {                                                            \
-    assert(!block_is_obj(q) ||                                               \
-           oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||   \
-           oop(q)->mark()->has_bias_pattern(),                               \
-           "these are the only valid states during a mark sweep");           \
-    if (block_is_obj(q) && oop(q)->is_gc_marked()) {                         \
-      /* prefetch beyond q */                                                \
-      Prefetch::write(q, interval);                                          \
-      size_t size = block_size(q);                                           \
-      compact_top = cp->space->forward(oop(q), size, cp, compact_top);       \
-      q += size;                                                             \
-      end_of_live = q;                                                       \
-    } else {                                                                 \
-      /* run over all the contiguous dead objects */                         \
-      HeapWord* end = q;                                                     \
-      do {                                                                   \
-        /* prefetch beyond end */                                            \
-        Prefetch::write(end, interval);                                      \
-        end += block_size(end);                                              \
-      } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
-                                                                             \
-      /* see if we might want to pretend this object is alive so that        \
-       * we don't have to compact quite as often.                            \
-       */                                                                    \
-      if (allowed_deadspace > 0 && q == compact_top) {                       \
-        size_t sz = pointer_delta(end, q);                                   \
-        if (insert_deadspace(allowed_deadspace, q, sz)) {                    \
-          compact_top = cp->space->forward(oop(q), sz, cp, compact_top);     \
-          q = end;                                                           \
-          end_of_live = end;                                                 \
-          continue;                                                          \
-        }                                                                    \
-      }                                                                      \
-                                                                             \
-      /* otherwise, it really is a free region. */                           \
-                                                                             \
-      /* for the previous LiveRange, record the end of the live objects. */  \
-      if (liveRange) {                                                       \
-        liveRange->set_end(q);                                               \
-      }                                                                      \
-                                                                             \
-      /* record the current LiveRange object.                                \
-       * liveRange->start() is overlaid on the mark word.                    \
-       */                                                                    \
-      liveRange = (LiveRange*)q;                                             \
-      liveRange->set_start(end);                                             \
-      liveRange->set_end(end);                                               \
-                                                                             \
-      /* see if this is the first dead region. */                            \
-      if (q < first_dead) {                                                  \
-        first_dead = q;                                                      \
-      }                                                                      \
-                                                                             \
-      /* move on to the next object */                                       \
-      q = end;                                                               \
-    }                                                                        \
-  }                                                                          \
-                                                                             \
-  assert(q == t, "just checking");                                           \
-  if (liveRange != NULL) {                                                   \
-    liveRange->set_end(q);                                                   \
-  }                                                                          \
-  _end_of_live = end_of_live;                                                \
-  if (end_of_live < first_dead) {                                            \
-    first_dead = end_of_live;                                                \
-  }                                                                          \
-  _first_dead = first_dead;                                                  \
-                                                                             \
-  /* save the compaction_top of the compaction space. */                     \
-  cp->space->set_compaction_top(compact_top);                                \
-}
-
-#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {                             \
-  /* adjust all the interior pointers to point at the new locations of objects  \
-   * Used by MarkSweep::mark_sweep_phase3() */                                  \
-                                                                                \
-  HeapWord* q = bottom();                                                       \
-  HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */   \
-                                                                                \
-  assert(_first_dead <= _end_of_live, "Stands to reason, no?");                 \
-                                                                                \
-  if (q < t && _first_dead > q &&                                               \
-      !oop(q)->is_gc_marked()) {                                                \
-    /* we have a chunk of the space which hasn't moved and we've                \
-     * reinitialized the mark word during the previous pass, so we can't        \
-     * use is_gc_marked for the traversal. */                                   \
-    HeapWord* end = _first_dead;                                                \
-                                                                                \
-    while (q < end) {                                                           \
-      /* I originally tried to conjoin "block_start(q) == q" to the             \
-       * assertion below, but that doesn't work, because you can't              \
-       * accurately traverse previous objects to get to the current one         \
-       * after their pointers have been                                         \
-       * updated, until the actual compaction is done.  dld, 4/00 */            \
-      assert(block_is_obj(q),                                                   \
-             "should be at block boundaries, and should be looking at objs");   \
-                                                                                \
-      /* point all the oops to the new location */                              \
-      size_t size = oop(q)->adjust_pointers();                                  \
-      size = adjust_obj_size(size);                                             \
-                                                                                \
-      q += size;                                                                \
-    }                                                                           \
-                                                                                \
-    if (_first_dead == t) {                                                     \
-      q = t;                                                                    \
-    } else {                                                                    \
-      /* $$$ This is funky.  Using this to read the previously written          \
-       * LiveRange.  See also use below. */                                     \
-      q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();                \
-    }                                                                           \
-  }                                                                             \
-                                                                                \
-  const intx interval = PrefetchScanIntervalInBytes;                            \
-                                                                                \
-  debug_only(HeapWord* prev_q = NULL);                                          \
-  while (q < t) {                                                               \
-    /* prefetch beyond q */                                                     \
-    Prefetch::write(q, interval);                                               \
-    if (oop(q)->is_gc_marked()) {                                               \
-      /* q is alive */                                                          \
-      /* point all the oops to the new location */                              \
-      size_t size = oop(q)->adjust_pointers();                                  \
-      size = adjust_obj_size(size);                                             \
-      debug_only(prev_q = q);                                                   \
-      q += size;                                                                \
-    } else {                                                                    \
-      /* q is not a live object, so its mark should point at the next           \
-       * live object */                                                         \
-      debug_only(prev_q = q);                                                   \
-      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
-      assert(q > prev_q, "we should be moving forward through memory");         \
-    }                                                                           \
-  }                                                                             \
-                                                                                \
-  assert(q == t, "just checking");                                              \
-}
-
-#define SCAN_AND_COMPACT(obj_size) {                                            \
-  /* Copy all live objects to their new location                                \
-   * Used by MarkSweep::mark_sweep_phase4() */                                  \
-                                                                                \
-  HeapWord*       q = bottom();                                                 \
-  HeapWord* const t = _end_of_live;                                             \
-  debug_only(HeapWord* prev_q = NULL);                                          \
-                                                                                \
-  if (q < t && _first_dead > q &&                                               \
-      !oop(q)->is_gc_marked()) {                                                \
-    debug_only(                                                                 \
-    /* we have a chunk of the space which hasn't moved and we've reinitialized  \
-     * the mark word during the previous pass, so we can't use is_gc_marked for \
-     * the traversal. */                                                        \
-    HeapWord* const end = _first_dead;                                          \
-                                                                                \
-    while (q < end) {                                                           \
-      size_t size = obj_size(q);                                                \
-      assert(!oop(q)->is_gc_marked(),                                           \
-             "should be unmarked (special dense prefix handling)");             \
-      debug_only(prev_q = q);                                                   \
-      q += size;                                                                \
-    }                                                                           \
-    )  /* debug_only */                                                         \
-                                                                                \
-    if (_first_dead == t) {                                                     \
-      q = t;                                                                    \
-    } else {                                                                    \
-      /* $$$ Funky */                                                           \
-      q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();               \
-    }                                                                           \
-  }                                                                             \
-                                                                                \
-  const intx scan_interval = PrefetchScanIntervalInBytes;                       \
-  const intx copy_interval = PrefetchCopyIntervalInBytes;                       \
-  while (q < t) {                                                               \
-    if (!oop(q)->is_gc_marked()) {                                              \
-      /* mark is pointer to next marked oop */                                  \
-      debug_only(prev_q = q);                                                   \
-      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
-      assert(q > prev_q, "we should be moving forward through memory");         \
-    } else {                                                                    \
-      /* prefetch beyond q */                                                   \
-      Prefetch::read(q, scan_interval);                                         \
-                                                                                \
-      /* size and destination */                                                \
-      size_t size = obj_size(q);                                                \
-      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();                \
-                                                                                \
-      /* prefetch beyond compaction_top */                                      \
-      Prefetch::write(compaction_top, copy_interval);                           \
-                                                                                \
-      /* copy object and reinit its mark */                                     \
-      assert(q != compaction_top, "everything in this pass should be moving");  \
-      Copy::aligned_conjoint_words(q, compaction_top, size);                    \
-      oop(compaction_top)->init_mark();                                         \
-      assert(oop(compaction_top)->klass() != NULL, "should have a class");      \
-                                                                                \
-      debug_only(prev_q = q);                                                   \
-      q += size;                                                                \
-    }                                                                           \
-  }                                                                             \
-                                                                                \
-  /* Let's remember if we were empty before we did the compaction. */           \
-  bool was_empty = used_region().is_empty();                                    \
-  /* Reset space after compaction is complete */                                \
-  reset_after_compaction();                                                     \
-  /* We do this clear, below, since it has overloaded meanings for some */      \
-  /* space subtypes.  For example, OffsetTableContigSpace's that were   */      \
-  /* compacted into will have had their offset table thresholds updated */      \
-  /* continuously, but those that weren't need to have their thresholds */      \
-  /* re-initialized.  Also mangles unused area for debugging.           */      \
-  if (used_region().is_empty()) {                                               \
-    if (!was_empty) clear(SpaceDecorator::Mangle);                              \
-  } else {                                                                      \
-    if (ZapUnusedHeapArea) mangle_unused_area();                                \
-  }                                                                             \
-}
-
 class GenSpaceMangler;
 
 // A space in which the free area is contiguous.  It therefore supports
@@ -808,7 +481,7 @@
   HeapWord* top() const            { return _top;    }
   void set_top(HeapWord* value)    { _top = value; }
 
-  virtual void set_saved_mark()    { _saved_mark_word = top();    }
+  void set_saved_mark()            { _saved_mark_word = top();    }
   void reset_saved_mark()          { _saved_mark_word = bottom(); }
 
   WaterMark bottom_mark()     { return WaterMark(this, bottom()); }
@@ -843,36 +516,31 @@
   size_t used() const            { return byte_size(bottom(), top()); }
   size_t free() const            { return byte_size(top(),    end()); }
 
-  // Override from space.
-  bool is_in(const void* p) const;
-
   virtual bool is_free_block(const HeapWord* p) const;
 
   // In a contiguous space we have a more obvious bound on what parts
   // contain objects.
   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 
-  MemRegion used_region_at_save_marks() const {
-    return MemRegion(bottom(), saved_mark_word());
-  }
-
   // Allocation (return NULL if full)
   virtual HeapWord* allocate(size_t word_size);
   virtual HeapWord* par_allocate(size_t word_size);
-
-  virtual bool obj_allocated_since_save_marks(const oop obj) const {
-    return (HeapWord*)obj >= saved_mark_word();
-  }
+  HeapWord* allocate_aligned(size_t word_size);
 
   // Iteration
   void oop_iterate(ExtendedOopClosure* cl);
-  void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
   void object_iterate(ObjectClosure* blk);
   // For contiguous spaces this method will iterate safely over objects
   // in the space (i.e., between bottom and top) when at a safepoint.
   void safe_object_iterate(ObjectClosure* blk);
-  void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
-  // iterates on objects up to the safe limit
+
+  // Iterate over as many initialized objects in the space as possible,
+  // calling "cl.do_object_careful" on each. Return NULL if all objects
+  // in the space (at the start of the iteration) were iterated over.
+  // Return an address indicating the extent of the iteration in the
+  // event that the iteration had to return because of finding an
+  // uninitialized object in the space, or if the closure "cl"
+  // signaled early termination.
   HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
   HeapWord* concurrent_iteration_safe_limit() {
     assert(_concurrent_iteration_safe_limit <= top(),
@@ -903,7 +571,6 @@
     // set new iteration safe limit
     set_concurrent_iteration_safe_limit(compaction_top());
   }
-  virtual size_t minimum_free_block_size() const { return 0; }
 
   // Override.
   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
--- a/src/share/vm/memory/space.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/space.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,12 +28,279 @@
 #include "gc_interface/collectedHeap.hpp"
 #include "memory/space.hpp"
 #include "memory/universe.hpp"
+#include "runtime/prefetch.inline.hpp"
 #include "runtime/safepoint.hpp"
 
 inline HeapWord* Space::block_start(const void* p) {
   return block_start_const(p);
 }
 
+#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) {            \
+  /* Compute the new addresses for the live objects and store it in the mark \
+   * Used by universe::mark_sweep_phase2()                                   \
+   */                                                                        \
+  HeapWord* compact_top; /* This is where we are currently compacting to. */ \
+                                                                             \
+  /* We're sure to be here before any objects are compacted into this        \
+   * space, so this is a good time to initialize this:                       \
+   */                                                                        \
+  set_compaction_top(bottom());                                              \
+                                                                             \
+  if (cp->space == NULL) {                                                   \
+    assert(cp->gen != NULL, "need a generation");                            \
+    assert(cp->threshold == NULL, "just checking");                          \
+    assert(cp->gen->first_compaction_space() == this, "just checking");      \
+    cp->space = cp->gen->first_compaction_space();                           \
+    compact_top = cp->space->bottom();                                       \
+    cp->space->set_compaction_top(compact_top);                              \
+    cp->threshold = cp->space->initialize_threshold();                       \
+  } else {                                                                   \
+    compact_top = cp->space->compaction_top();                               \
+  }                                                                          \
+                                                                             \
+  /* We allow some amount of garbage towards the bottom of the space, so     \
+   * we don't start compacting before there is a significant gain to be made.\
+   * Occasionally, we want to ensure a full compaction, which is determined  \
+   * by the MarkSweepAlwaysCompactCount parameter.                           \
+   */                                                                        \
+  uint invocations = MarkSweep::total_invocations();                         \
+  bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);       \
+                                                                             \
+  size_t allowed_deadspace = 0;                                              \
+  if (skip_dead) {                                                           \
+    const size_t ratio = allowed_dead_ratio();                               \
+    allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize;           \
+  }                                                                          \
+                                                                             \
+  HeapWord* q = bottom();                                                    \
+  HeapWord* t = scan_limit();                                                \
+                                                                             \
+  HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last \
+                                   live object. */                           \
+  HeapWord*  first_dead = end();/* The first dead object. */                 \
+  LiveRange* liveRange  = NULL; /* The current live range, recorded in the   \
+                                   first header of preceding free area. */   \
+  _first_dead = first_dead;                                                  \
+                                                                             \
+  const intx interval = PrefetchScanIntervalInBytes;                         \
+                                                                             \
+  while (q < t) {                                                            \
+    assert(!block_is_obj(q) ||                                               \
+           oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||   \
+           oop(q)->mark()->has_bias_pattern(),                               \
+           "these are the only valid states during a mark sweep");           \
+    if (block_is_obj(q) && oop(q)->is_gc_marked()) {                         \
+      /* prefetch beyond q */                                                \
+      Prefetch::write(q, interval);                                          \
+      size_t size = block_size(q);                                           \
+      compact_top = cp->space->forward(oop(q), size, cp, compact_top);       \
+      q += size;                                                             \
+      end_of_live = q;                                                       \
+    } else {                                                                 \
+      /* run over all the contiguous dead objects */                         \
+      HeapWord* end = q;                                                     \
+      do {                                                                   \
+        /* prefetch beyond end */                                            \
+        Prefetch::write(end, interval);                                      \
+        end += block_size(end);                                              \
+      } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
+                                                                             \
+      /* see if we might want to pretend this object is alive so that        \
+       * we don't have to compact quite as often.                            \
+       */                                                                    \
+      if (allowed_deadspace > 0 && q == compact_top) {                       \
+        size_t sz = pointer_delta(end, q);                                   \
+        if (insert_deadspace(allowed_deadspace, q, sz)) {                    \
+          compact_top = cp->space->forward(oop(q), sz, cp, compact_top);     \
+          q = end;                                                           \
+          end_of_live = end;                                                 \
+          continue;                                                          \
+        }                                                                    \
+      }                                                                      \
+                                                                             \
+      /* otherwise, it really is a free region. */                           \
+                                                                             \
+      /* for the previous LiveRange, record the end of the live objects. */  \
+      if (liveRange) {                                                       \
+        liveRange->set_end(q);                                               \
+      }                                                                      \
+                                                                             \
+      /* record the current LiveRange object.                                \
+       * liveRange->start() is overlaid on the mark word.                    \
+       */                                                                    \
+      liveRange = (LiveRange*)q;                                             \
+      liveRange->set_start(end);                                             \
+      liveRange->set_end(end);                                               \
+                                                                             \
+      /* see if this is the first dead region. */                            \
+      if (q < first_dead) {                                                  \
+        first_dead = q;                                                      \
+      }                                                                      \
+                                                                             \
+      /* move on to the next object */                                       \
+      q = end;                                                               \
+    }                                                                        \
+  }                                                                          \
+                                                                             \
+  assert(q == t, "just checking");                                           \
+  if (liveRange != NULL) {                                                   \
+    liveRange->set_end(q);                                                   \
+  }                                                                          \
+  _end_of_live = end_of_live;                                                \
+  if (end_of_live < first_dead) {                                            \
+    first_dead = end_of_live;                                                \
+  }                                                                          \
+  _first_dead = first_dead;                                                  \
+                                                                             \
+  /* save the compaction_top of the compaction space. */                     \
+  cp->space->set_compaction_top(compact_top);                                \
+}
+
+#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {                             \
+  /* adjust all the interior pointers to point at the new locations of objects  \
+   * Used by MarkSweep::mark_sweep_phase3() */                                  \
+                                                                                \
+  HeapWord* q = bottom();                                                       \
+  HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */   \
+                                                                                \
+  assert(_first_dead <= _end_of_live, "Stands to reason, no?");                 \
+                                                                                \
+  if (q < t && _first_dead > q &&                                               \
+      !oop(q)->is_gc_marked()) {                                                \
+    /* we have a chunk of the space which hasn't moved and we've                \
+     * reinitialized the mark word during the previous pass, so we can't        \
+     * use is_gc_marked for the traversal. */                                   \
+    HeapWord* end = _first_dead;                                                \
+                                                                                \
+    while (q < end) {                                                           \
+      /* I originally tried to conjoin "block_start(q) == q" to the             \
+       * assertion below, but that doesn't work, because you can't              \
+       * accurately traverse previous objects to get to the current one         \
+       * after their pointers have been                                         \
+       * updated, until the actual compaction is done.  dld, 4/00 */            \
+      assert(block_is_obj(q),                                                   \
+             "should be at block boundaries, and should be looking at objs");   \
+                                                                                \
+      /* point all the oops to the new location */                              \
+      size_t size = oop(q)->adjust_pointers();                                  \
+      size = adjust_obj_size(size);                                             \
+                                                                                \
+      q += size;                                                                \
+    }                                                                           \
+                                                                                \
+    if (_first_dead == t) {                                                     \
+      q = t;                                                                    \
+    } else {                                                                    \
+      /* $$$ This is funky.  Using this to read the previously written          \
+       * LiveRange.  See also use below. */                                     \
+      q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();                \
+    }                                                                           \
+  }                                                                             \
+                                                                                \
+  const intx interval = PrefetchScanIntervalInBytes;                            \
+                                                                                \
+  debug_only(HeapWord* prev_q = NULL);                                          \
+  while (q < t) {                                                               \
+    /* prefetch beyond q */                                                     \
+    Prefetch::write(q, interval);                                               \
+    if (oop(q)->is_gc_marked()) {                                               \
+      /* q is alive */                                                          \
+      /* point all the oops to the new location */                              \
+      size_t size = oop(q)->adjust_pointers();                                  \
+      size = adjust_obj_size(size);                                             \
+      debug_only(prev_q = q);                                                   \
+      q += size;                                                                \
+    } else {                                                                    \
+      /* q is not a live object, so its mark should point at the next           \
+       * live object */                                                         \
+      debug_only(prev_q = q);                                                   \
+      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
+      assert(q > prev_q, "we should be moving forward through memory");         \
+    }                                                                           \
+  }                                                                             \
+                                                                                \
+  assert(q == t, "just checking");                                              \
+}
+
+#define SCAN_AND_COMPACT(obj_size) {                                            \
+  /* Copy all live objects to their new location                                \
+   * Used by MarkSweep::mark_sweep_phase4() */                                  \
+                                                                                \
+  HeapWord*       q = bottom();                                                 \
+  HeapWord* const t = _end_of_live;                                             \
+  debug_only(HeapWord* prev_q = NULL);                                          \
+                                                                                \
+  if (q < t && _first_dead > q &&                                               \
+      !oop(q)->is_gc_marked()) {                                                \
+    debug_only(                                                                 \
+    /* we have a chunk of the space which hasn't moved and we've reinitialized  \
+     * the mark word during the previous pass, so we can't use is_gc_marked for \
+     * the traversal. */                                                        \
+    HeapWord* const end = _first_dead;                                          \
+                                                                                \
+    while (q < end) {                                                           \
+      size_t size = obj_size(q);                                                \
+      assert(!oop(q)->is_gc_marked(),                                           \
+             "should be unmarked (special dense prefix handling)");             \
+      debug_only(prev_q = q);                                                   \
+      q += size;                                                                \
+    }                                                                           \
+    )  /* debug_only */                                                         \
+                                                                                \
+    if (_first_dead == t) {                                                     \
+      q = t;                                                                    \
+    } else {                                                                    \
+      /* $$$ Funky */                                                           \
+      q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();               \
+    }                                                                           \
+  }                                                                             \
+                                                                                \
+  const intx scan_interval = PrefetchScanIntervalInBytes;                       \
+  const intx copy_interval = PrefetchCopyIntervalInBytes;                       \
+  while (q < t) {                                                               \
+    if (!oop(q)->is_gc_marked()) {                                              \
+      /* mark is pointer to next marked oop */                                  \
+      debug_only(prev_q = q);                                                   \
+      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
+      assert(q > prev_q, "we should be moving forward through memory");         \
+    } else {                                                                    \
+      /* prefetch beyond q */                                                   \
+      Prefetch::read(q, scan_interval);                                         \
+                                                                                \
+      /* size and destination */                                                \
+      size_t size = obj_size(q);                                                \
+      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();                \
+                                                                                \
+      /* prefetch beyond compaction_top */                                      \
+      Prefetch::write(compaction_top, copy_interval);                           \
+                                                                                \
+      /* copy object and reinit its mark */                                     \
+      assert(q != compaction_top, "everything in this pass should be moving");  \
+      Copy::aligned_conjoint_words(q, compaction_top, size);                    \
+      oop(compaction_top)->init_mark();                                         \
+      assert(oop(compaction_top)->klass() != NULL, "should have a class");      \
+                                                                                \
+      debug_only(prev_q = q);                                                   \
+      q += size;                                                                \
+    }                                                                           \
+  }                                                                             \
+                                                                                \
+  /* Let's remember if we were empty before we did the compaction. */           \
+  bool was_empty = used_region().is_empty();                                    \
+  /* Reset space after compaction is complete */                                \
+  reset_after_compaction();                                                     \
+  /* We do this clear, below, since it has overloaded meanings for some */      \
+  /* space subtypes.  For example, OffsetTableContigSpace's that were   */      \
+  /* compacted into will have had their offset table thresholds updated */      \
+  /* continuously, but those that weren't need to have their thresholds */      \
+  /* re-initialized.  Also mangles unused area for debugging.           */      \
+  if (used_region().is_empty()) {                                               \
+    if (!was_empty) clear(SpaceDecorator::Mangle);                              \
+  } else {                                                                      \
+    if (ZapUnusedHeapArea) mangle_unused_area();                                \
+  }                                                                             \
+}
+
 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
   HeapWord* res = ContiguousSpace::allocate(size);
   if (res != NULL) {
--- a/src/share/vm/memory/threadLocalAllocBuffer.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/threadLocalAllocBuffer.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -239,22 +239,19 @@
 }
 
 size_t ThreadLocalAllocBuffer::initial_desired_size() {
-  size_t init_sz;
+  size_t init_sz = 0;
 
   if (TLABSize > 0) {
-    init_sz = MIN2(TLABSize / HeapWordSize, max_size());
-  } else if (global_stats() == NULL) {
-    // Startup issue - main thread initialized before heap initialized.
-    init_sz = min_size();
-  } else {
+    init_sz = TLABSize / HeapWordSize;
+  } else if (global_stats() != NULL) {
     // Initial size is a function of the average number of allocating threads.
     unsigned nof_threads = global_stats()->allocating_threads_avg();
 
     init_sz  = (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize) /
                       (nof_threads * target_refills());
     init_sz = align_object_size(init_sz);
-    init_sz = MIN2(MAX2(init_sz, min_size()), max_size());
   }
+  init_sz = MIN2(MAX2(init_sz, min_size()), max_size());
   return init_sz;
 }
 
--- a/src/share/vm/memory/threadLocalAllocBuffer.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -105,7 +105,7 @@
     // do nothing.  tlabs must be inited by initialize() calls
   }
 
-  static const size_t min_size()                 { return align_object_size(MinTLABSize / HeapWordSize); }
+  static const size_t min_size()                 { return align_object_size(MinTLABSize / HeapWordSize) + alignment_reserve(); }
   static const size_t max_size()                 { assert(_max_size != 0, "max_size not set up"); return _max_size; }
   static void set_max_size(size_t max_size)      { _max_size = max_size; }
 
--- a/src/share/vm/memory/universe.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/universe.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,6 +26,9 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/classLoaderData.hpp"
 #include "classfile/javaClasses.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#endif
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
@@ -34,6 +37,7 @@
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "interpreter/interpreter.hpp"
 #include "memory/cardTableModRefBS.hpp"
+#include "memory/filemap.hpp"
 #include "memory/gcLocker.inline.hpp"
 #include "memory/genCollectedHeap.hpp"
 #include "memory/genRemSet.hpp"
@@ -74,7 +78,7 @@
 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
-#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy_ext.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #endif // INCLUDE_ALL_GCS
 
@@ -115,6 +119,7 @@
 oop Universe::_out_of_memory_error_class_metaspace    = NULL;
 oop Universe::_out_of_memory_error_array_size         = NULL;
 oop Universe::_out_of_memory_error_gc_overhead_limit  = NULL;
+oop Universe::_out_of_memory_error_realloc_objects    = NULL;
 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
 bool Universe::_verify_in_progress                    = false;
@@ -122,6 +127,8 @@
 oop Universe::_arithmetic_exception_instance          = NULL;
 oop Universe::_virtual_machine_error_instance         = NULL;
 oop Universe::_vm_exception                           = NULL;
+oop Universe::_allocation_context_notification_obj    = NULL;
+
 Method* Universe::_throw_illegal_access_error         = NULL;
 Array<int>* Universe::_the_empty_int_array            = NULL;
 Array<u2>* Universe::_the_empty_short_array           = NULL;
@@ -184,6 +191,7 @@
   f->do_oop((oop*)&_out_of_memory_error_class_metaspace);
   f->do_oop((oop*)&_out_of_memory_error_array_size);
   f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
+  f->do_oop((oop*)&_out_of_memory_error_realloc_objects);
     f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
   f->do_oop((oop*)&_null_ptr_exception_instance);
   f->do_oop((oop*)&_arithmetic_exception_instance);
@@ -191,6 +199,7 @@
   f->do_oop((oop*)&_main_thread_group);
   f->do_oop((oop*)&_system_thread_group);
   f->do_oop((oop*)&_vm_exception);
+  f->do_oop((oop*)&_allocation_context_notification_obj);
   debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
 }
 
@@ -238,8 +247,9 @@
 void initialize_basic_type_klass(Klass* k, TRAPS) {
   Klass* ok = SystemDictionary::Object_klass();
   if (UseSharedSpaces) {
+    ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
     assert(k->super() == ok, "u3");
-    k->restore_unshareable_info(CHECK);
+    k->restore_unshareable_info(loader_data, Handle(), CHECK);
   } else {
     k->initialize_supers(ok, CHECK);
   }
@@ -566,7 +576,8 @@
           (throwable() != Universe::_out_of_memory_error_metaspace)  &&
           (throwable() != Universe::_out_of_memory_error_class_metaspace)  &&
           (throwable() != Universe::_out_of_memory_error_array_size) &&
-          (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
+          (throwable() != Universe::_out_of_memory_error_gc_overhead_limit) &&
+          (throwable() != Universe::_out_of_memory_error_realloc_objects));
 }
 
 
@@ -665,6 +676,10 @@
     SymbolTable::create_table();
     StringTable::create_table();
     ClassLoader::create_package_info_table();
+
+    if (DumpSharedSpaces) {
+      MetaspaceShared::prepare_for_dumping();
+    }
   }
 
   return JNI_OK;
@@ -786,7 +801,7 @@
 
   } else if (UseG1GC) {
 #if INCLUDE_ALL_GCS
-    G1CollectorPolicy* g1p = new G1CollectorPolicy();
+    G1CollectorPolicyExt* g1p = new G1CollectorPolicyExt();
     g1p->initialize_all();
     G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
     Universe::_collectedHeap = g1h;
@@ -1032,6 +1047,7 @@
     Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
     Universe::_out_of_memory_error_gc_overhead_limit =
       k_h->allocate_instance(CHECK_false);
+    Universe::_out_of_memory_error_realloc_objects = k_h->allocate_instance(CHECK_false);
 
     // Setup preallocated NullPointerException
     // (this is currently used for a cheap & dirty solution in compiler exception handling)
@@ -1071,6 +1087,9 @@
     msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
 
+    msg = java_lang_String::create_from_str("Java heap space: failed reallocation of scalar replaced objects", CHECK_false);
+    java_lang_Throwable::set_message(Universe::_out_of_memory_error_realloc_objects, msg());
+
     msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
     java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
 
@@ -1166,6 +1185,11 @@
   MemoryService::add_metaspace_memory_pools();
 
   MemoryService::set_universe_heap(Universe::_collectedHeap);
+#if INCLUDE_CDS
+  if (UseSharedSpaces) {
+    SharedClassUtil::initialize(CHECK_false);
+  }
+#endif
   return true;
 }
 
--- a/src/share/vm/memory/universe.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/universe.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -157,6 +157,7 @@
   static oop          _out_of_memory_error_class_metaspace;
   static oop          _out_of_memory_error_array_size;
   static oop          _out_of_memory_error_gc_overhead_limit;
+  static oop          _out_of_memory_error_realloc_objects;
 
   static Array<int>*       _the_empty_int_array;    // Canonicalized int array
   static Array<u2>*        _the_empty_short_array;  // Canonicalized short array
@@ -178,6 +179,8 @@
   // the vm thread.
   static oop          _vm_exception;
 
+  static oop          _allocation_context_notification_obj;
+
   // The particular choice of collected heap.
   static CollectedHeap* _collectedHeap;
 
@@ -307,6 +310,10 @@
   static oop          arithmetic_exception_instance() { return _arithmetic_exception_instance; }
   static oop          virtual_machine_error_instance() { return _virtual_machine_error_instance; }
   static oop          vm_exception()                  { return _vm_exception; }
+
+  static inline oop   allocation_context_notification_obj();
+  static inline void  set_allocation_context_notification_obj(oop obj);
+
   static Method*      throw_illegal_access_error()    { return _throw_illegal_access_error; }
 
   static Array<int>*       the_empty_int_array()    { return _the_empty_int_array; }
@@ -322,6 +329,7 @@
   static oop out_of_memory_error_class_metaspace()    { return gen_out_of_memory_error(_out_of_memory_error_class_metaspace);   }
   static oop out_of_memory_error_array_size()         { return gen_out_of_memory_error(_out_of_memory_error_array_size); }
   static oop out_of_memory_error_gc_overhead_limit()  { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit);  }
+  static oop out_of_memory_error_realloc_objects()    { return gen_out_of_memory_error(_out_of_memory_error_realloc_objects);  }
 
   // Accessors needed for fast allocation
   static Klass** boolArrayKlassObj_addr()           { return &_boolArrayKlassObj;   }
--- a/src/share/vm/memory/universe.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/memory/universe.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -41,4 +41,12 @@
   return type == T_DOUBLE || type == T_LONG;
 }
 
+inline oop Universe::allocation_context_notification_obj() {
+  return _allocation_context_notification_obj;
+}
+
+inline void Universe::set_allocation_context_notification_obj(oop obj) {
+  _allocation_context_notification_obj = obj;
+}
+
 #endif // SHARE_VM_MEMORY_UNIVERSE_INLINE_HPP
--- a/src/share/vm/oops/arrayKlass.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/arrayKlass.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -100,7 +100,7 @@
   ResourceMark rm(THREAD);
   k->initialize_supers(super_klass(), CHECK);
   k->vtable()->initialize_vtable(false, CHECK);
-  java_lang_Class::create_mirror(k, Handle(NULL), CHECK);
+  java_lang_Class::create_mirror(k, Handle(THREAD, k->class_loader()), Handle(NULL), CHECK);
 }
 
 GrowableArray<Klass*>* ArrayKlass::compute_secondary_supers(int num_extra_slots) {
@@ -193,8 +193,9 @@
   set_component_mirror(NULL);
 }
 
-void ArrayKlass::restore_unshareable_info(TRAPS) {
-  Klass::restore_unshareable_info(CHECK);
+void ArrayKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
+  assert(loader_data == ClassLoaderData::the_null_class_loader_data(), "array classes belong to null loader");
+  Klass::restore_unshareable_info(loader_data, protection_domain, CHECK);
   // Klass recreates the component mirror also
 }
 
--- a/src/share/vm/oops/arrayKlass.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/arrayKlass.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -141,7 +141,7 @@
 
   // CDS support - remove and restore oops from metadata. Oops are not shared.
   virtual void remove_unshareable_info();
-  virtual void restore_unshareable_info(TRAPS);
+  virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
 
   // Printing
   void print_on(outputStream* st) const;
--- a/src/share/vm/oops/constantPool.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/constantPool.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1817,11 +1817,22 @@
 
 void ConstantPool::set_on_stack(const bool value) {
   if (value) {
-    _flags |= _on_stack;
+    int old_flags = *const_cast<volatile int *>(&_flags);
+    while ((old_flags & _on_stack) == 0) {
+      int new_flags = old_flags | _on_stack;
+      int result = Atomic::cmpxchg(new_flags, &_flags, old_flags);
+
+      if (result == old_flags) {
+        // Succeeded.
+        MetadataOnStackMark::record(this, Thread::current());
+        return;
+      }
+      old_flags = result;
+    }
   } else {
+    // Clearing is done single-threadedly.
     _flags &= ~_on_stack;
   }
-  if (value) MetadataOnStackMark::record(this);
 }
 
 // JSR 292 support for patching constant pool oops after the class is linked and
--- a/src/share/vm/oops/cpCache.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/cpCache.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -33,6 +33,7 @@
 #include "prims/jvmtiRedefineClassesTrace.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
 # include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
--- a/src/share/vm/oops/cpCache.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/cpCache.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -27,6 +27,7 @@
 
 #include "interpreter/bytecodes.hpp"
 #include "memory/allocation.hpp"
+#include "runtime/orderAccess.hpp"
 #include "utilities/array.hpp"
 
 class PSPromotionManager;
--- a/src/share/vm/oops/instanceClassLoaderKlass.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/instanceClassLoaderKlass.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,6 +28,7 @@
 #include "gc_implementation/shared/markSweep.inline.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/genOopClosures.inline.hpp"
+#include "memory/iterator.inline.hpp"
 #include "memory/oopFactory.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceClassLoaderKlass.hpp"
@@ -44,12 +45,6 @@
 #include "oops/oop.pcgc.inline.hpp"
 #endif // INCLUDE_ALL_GCS
 
-#define if_do_metadata_checked(closure, nv_suffix)                    \
-  /* Make sure the non-virtual and the virtual versions match. */     \
-  assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
-      "Inconsistency in do_metadata");                                \
-  if (closure->do_metadata##nv_suffix())
-
 // Macro to define InstanceClassLoaderKlass::oop_oop_iterate for virtual/nonvirtual for
 // all closures.  Macros calling macros above for each oop size.
 // Since ClassLoader objects have only a pointer to the loader_data, they are not
--- a/src/share/vm/oops/instanceKlass.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/instanceKlass.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -35,6 +35,7 @@
 #include "jvmtifiles/jvmti.h"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/heapInspection.hpp"
+#include "memory/iterator.inline.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/oopFactory.hpp"
 #include "oops/fieldStreams.hpp"
@@ -54,6 +55,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/thread.inline.hpp"
 #include "services/classLoadingService.hpp"
 #include "services/threadService.hpp"
@@ -68,7 +70,7 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
@@ -291,6 +293,7 @@
   set_static_oop_field_count(0);
   set_nonstatic_field_size(0);
   set_is_marked_dependent(false);
+  set_has_unloaded_dependent(false);
   set_init_state(InstanceKlass::allocated);
   set_init_thread(NULL);
   set_reference_type(rt);
@@ -504,6 +507,8 @@
 oop InstanceKlass::init_lock() const {
   // return the init lock from the mirror
   oop lock = java_lang_Class::init_lock(java_mirror());
+  // Prevent reordering with any access of initialization state
+  OrderAccess::loadload();
   assert((oop)lock != NULL || !is_not_initialized(), // initialized or in_error state
          "only fully initialized state can have a null lock");
   return lock;
@@ -778,6 +783,41 @@
   }
 }
 
+// Eagerly initialize superinterfaces that declare default methods (concrete instance: any access)
+void InstanceKlass::initialize_super_interfaces(instanceKlassHandle this_oop, TRAPS) {
+  if (this_oop->has_default_methods()) {
+    for (int i = 0; i < this_oop->local_interfaces()->length(); ++i) {
+      Klass* iface = this_oop->local_interfaces()->at(i);
+      InstanceKlass* ik = InstanceKlass::cast(iface);
+      if (ik->should_be_initialized()) {
+        if (ik->has_default_methods()) {
+          ik->initialize_super_interfaces(ik, THREAD);
+        }
+        // Only initialize() interfaces that "declare" concrete methods.
+        // has_default_methods drives searching superinterfaces since it
+        // means has_default_methods in its superinterface hierarchy
+        if (!HAS_PENDING_EXCEPTION && ik->declares_default_methods()) {
+          ik->initialize(THREAD);
+        }
+        if (HAS_PENDING_EXCEPTION) {
+          Handle e(THREAD, PENDING_EXCEPTION);
+          CLEAR_PENDING_EXCEPTION;
+          {
+            EXCEPTION_MARK;
+            // Locks object, set state, and notify all waiting threads
+            this_oop->set_initialization_state_and_notify(
+                initialization_error, THREAD);
+
+            // ignore any exception thrown, superclass initialization error is
+            // thrown below
+            CLEAR_PENDING_EXCEPTION;
+          }
+          THROW_OOP(e());
+        }
+      }
+    }
+  }
+}
 
 void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
   // Make sure klass is linked (verified) before initialization
@@ -857,33 +897,11 @@
     }
   }
 
+  // Recursively initialize any superinterfaces that declare default methods
+  // Only need to recurse if has_default_methods which includes declaring and
+  // inheriting default methods
   if (this_oop->has_default_methods()) {
-    // Step 7.5: initialize any interfaces which have default methods
-    for (int i = 0; i < this_oop->local_interfaces()->length(); ++i) {
-      Klass* iface = this_oop->local_interfaces()->at(i);
-      InstanceKlass* ik = InstanceKlass::cast(iface);
-      if (ik->has_default_methods() && ik->should_be_initialized()) {
-        ik->initialize(THREAD);
-
-        if (HAS_PENDING_EXCEPTION) {
-          Handle e(THREAD, PENDING_EXCEPTION);
-          CLEAR_PENDING_EXCEPTION;
-          {
-            EXCEPTION_MARK;
-            // Locks object, set state, and notify all waiting threads
-            this_oop->set_initialization_state_and_notify(
-                initialization_error, THREAD);
-
-            // ignore any exception thrown, superclass initialization error is
-            // thrown below
-            CLEAR_PENDING_EXCEPTION;
-          }
-          DTRACE_CLASSINIT_PROBE_WAIT(
-              super__failed, InstanceKlass::cast(this_oop()), -1, wait);
-          THROW_OOP(e());
-        }
-      }
-    }
+    this_oop->initialize_super_interfaces(this_oop, CHECK);
   }
 
   // Step 8
@@ -1439,32 +1457,41 @@
 }
 
 Method* InstanceKlass::find_method_impl(Symbol* name, Symbol* signature, bool skipping_overpass) const {
-  return InstanceKlass::find_method_impl(methods(), name, signature, skipping_overpass);
+  return InstanceKlass::find_method_impl(methods(), name, signature, skipping_overpass, false);
 }
 
 // find_instance_method looks up the name/signature in the local methods array
 // and skips over static methods
 Method* InstanceKlass::find_instance_method(
     Array<Method*>* methods, Symbol* name, Symbol* signature) {
-  Method* meth = InstanceKlass::find_method(methods, name, signature);
-  if (meth != NULL && meth->is_static()) {
-      meth = NULL;
-  }
+  Method* meth = InstanceKlass::find_method_impl(methods, name, signature, false, true);
   return meth;
 }
 
+// find_instance_method looks up the name/signature in the local methods array
+// and skips over static methods
+Method* InstanceKlass::find_instance_method(Symbol* name, Symbol* signature) {
+    return InstanceKlass::find_instance_method(methods(), name, signature);
+}
+
 // find_method looks up the name/signature in the local methods array
 Method* InstanceKlass::find_method(
     Array<Method*>* methods, Symbol* name, Symbol* signature) {
-  return InstanceKlass::find_method_impl(methods, name, signature, false);
+  return InstanceKlass::find_method_impl(methods, name, signature, false, false);
 }
 
 Method* InstanceKlass::find_method_impl(
-    Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass) {
-  int hit = find_method_index(methods, name, signature, skipping_overpass);
+    Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass, bool skipping_static) {
+  int hit = find_method_index(methods, name, signature, skipping_overpass, skipping_static);
   return hit >= 0 ? methods->at(hit): NULL;
 }
 
+bool InstanceKlass::method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static) {
+    return (m->signature() == signature) &&
+            (!skipping_overpass || !m->is_overpass()) &&
+            (!skipping_static || !m->is_static());
+}
+
 // Used directly for default_methods to find the index into the
 // default_vtable_indices, and indirectly by find_method
 // find_method_index looks in the local methods array to return the index
@@ -1473,13 +1500,14 @@
 // is important during method resolution to prefer a static method, for example,
 // over an overpass method.
 int InstanceKlass::find_method_index(
-    Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass) {
+    Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass, bool skipping_static) {
   int hit = binary_search(methods, name);
   if (hit != -1) {
     Method* m = methods->at(hit);
+
     // Do linear search to find matching signature.  First, quick check
     // for common case, ignoring overpasses if requested.
-    if ((m->signature() == signature) && (!skipping_overpass || !m->is_overpass())) return hit;
+    if (method_matches(m, signature, skipping_overpass, skipping_static)) return hit;
 
     // search downwards through overloaded methods
     int i;
@@ -1487,18 +1515,18 @@
         Method* m = methods->at(i);
         assert(m->is_method(), "must be method");
         if (m->name() != name) break;
-        if ((m->signature() == signature) && (!skipping_overpass || !m->is_overpass())) return i;
+        if (method_matches(m, signature, skipping_overpass, skipping_static)) return i;
     }
     // search upwards
     for (i = hit + 1; i < methods->length(); ++i) {
         Method* m = methods->at(i);
         assert(m->is_method(), "must be method");
         if (m->name() != name) break;
-        if ((m->signature() == signature) && (!skipping_overpass || !m->is_overpass())) return i;
+        if (method_matches(m, signature, skipping_overpass, skipping_static)) return i;
     }
     // not found
 #ifdef ASSERT
-    int index = skipping_overpass ? -1 : linear_search(methods, name, signature);
+    int index = skipping_overpass || skipping_static ? -1 : linear_search(methods, name, signature);
     assert(index == -1, err_msg("binary search should have found entry %d", index));
 #endif
   }
@@ -1821,6 +1849,9 @@
   return id;
 }
 
+int nmethodBucket::decrement() {
+  return Atomic::add(-1, (volatile int *)&_count);
+}
 
 //
 // Walk the list of dependent nmethods searching for nmethods which
@@ -1835,7 +1866,7 @@
     nmethod* nm = b->get_nmethod();
     // since dependencies aren't removed until an nmethod becomes a zombie,
     // the dependency list may contain nmethods which aren't alive.
-    if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
+    if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
       if (TraceDependencies) {
         ResourceMark rm;
         tty->print_cr("Marked for deoptimization");
@@ -1852,6 +1883,43 @@
   return found;
 }
 
+void InstanceKlass::clean_dependent_nmethods() {
+  assert_locked_or_safepoint(CodeCache_lock);
+
+  if (has_unloaded_dependent()) {
+    nmethodBucket* b = _dependencies;
+    nmethodBucket* last = NULL;
+    while (b != NULL) {
+      assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
+
+      nmethodBucket* next = b->next();
+
+      if (b->count() == 0) {
+        if (last == NULL) {
+          _dependencies = next;
+        } else {
+          last->set_next(next);
+        }
+        delete b;
+        // last stays the same.
+      } else {
+        last = b;
+      }
+
+      b = next;
+    }
+    set_has_unloaded_dependent(false);
+  }
+#ifdef ASSERT
+  else {
+    // Verification
+    for (nmethodBucket* b = _dependencies; b != NULL; b = b->next()) {
+      assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
+      assert(b->count() != 0, "empty buckets need to be cleaned");
+    }
+  }
+#endif
+}
 
 //
 // Add an nmethodBucket to the list of dependencies for this nmethod.
@@ -1886,13 +1954,10 @@
   nmethodBucket* last = NULL;
   while (b != NULL) {
     if (nm == b->get_nmethod()) {
-      if (b->decrement() == 0) {
-        if (last == NULL) {
-          _dependencies = b->next();
-        } else {
-          last->set_next(b->next());
-        }
-        delete b;
+      int val = b->decrement();
+      guarantee(val >= 0, err_msg("Underflow: %d", val));
+      if (val == 0) {
+        set_has_unloaded_dependent(true);
       }
       return;
     }
@@ -1931,6 +1996,10 @@
   nmethodBucket* b = _dependencies;
   while (b != NULL) {
     if (nm == b->get_nmethod()) {
+#ifdef ASSERT
+      int count = b->count();
+      assert(count >= 0, err_msg("count shouldn't be negative: %d", count));
+#endif
       return true;
     }
     b = b->next();
@@ -2135,12 +2204,6 @@
 // closure's do_metadata() method dictates whether the given closure should be
 // applied to the klass ptr in the object header.
 
-#define if_do_metadata_checked(closure, nv_suffix)                    \
-  /* Make sure the non-virtual and the virtual versions match. */     \
-  assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
-      "Inconsistency in do_metadata");                                \
-  if (closure->do_metadata##nv_suffix())
-
 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)        \
                                                                              \
 int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
@@ -2164,10 +2227,9 @@
 int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj,                \
                                               OopClosureType* closure) {        \
   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
-  /* header */                                                                  \
-  if_do_metadata_checked(closure, nv_suffix) {                                  \
-    closure->do_klass##nv_suffix(obj->klass());                                 \
-  }                                                                             \
+                                                                                \
+  assert_should_ignore_metadata(closure, nv_suffix);                            \
+                                                                                \
   /* instance variables */                                                      \
   InstanceKlass_OOP_MAP_REVERSE_ITERATE(                                        \
     obj,                                                                        \
@@ -2236,7 +2298,7 @@
 #endif // INCLUDE_ALL_GCS
 
 void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
-  assert(is_loader_alive(is_alive), "this klass should be live");
+  assert(class_loader_data()->is_alive(is_alive), "this klass should be live");
   if (is_interface()) {
     if (ClassUnloading) {
       Klass* impl = implementor();
@@ -2288,12 +2350,14 @@
   array_klasses_do(remove_unshareable_in_class);
 }
 
-void restore_unshareable_in_class(Klass* k, TRAPS) {
-  k->restore_unshareable_info(CHECK);
+static void restore_unshareable_in_class(Klass* k, TRAPS) {
+  // Array classes have null protection domain.
+  // --> see ArrayKlass::complete_create_array_klass()
+  k->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
 }
 
-void InstanceKlass::restore_unshareable_info(TRAPS) {
-  Klass::restore_unshareable_info(CHECK);
+void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
+  Klass::restore_unshareable_info(loader_data, protection_domain, CHECK);
   instanceKlassHandle ik(THREAD, this);
 
   Array<Method*>* methods = ik->methods();
@@ -2319,6 +2383,38 @@
   ik->array_klasses_do(restore_unshareable_in_class, CHECK);
 }
 
+// returns true IFF is_in_error_state() has been changed as a result of this call.
+bool InstanceKlass::check_sharing_error_state() {
+  assert(DumpSharedSpaces, "should only be called during dumping");
+  bool old_state = is_in_error_state();
+
+  if (!is_in_error_state()) {
+    bool bad = false;
+    for (InstanceKlass* sup = java_super(); sup; sup = sup->java_super()) {
+      if (sup->is_in_error_state()) {
+        bad = true;
+        break;
+      }
+    }
+    if (!bad) {
+      Array<Klass*>* interfaces = transitive_interfaces();
+      for (int i = 0; i < interfaces->length(); i++) {
+        Klass* iface = interfaces->at(i);
+        if (InstanceKlass::cast(iface)->is_in_error_state()) {
+          bad = true;
+          break;
+        }
+      }
+    }
+
+    if (bad) {
+      set_in_error_state();
+    }
+  }
+
+  return (old_state != is_in_error_state());
+}
+
 static void clear_all_breakpoints(Method* m) {
   m->clear_all_breakpoints();
 }
@@ -2808,6 +2904,22 @@
   OsrList_lock->unlock();
 }
 
+int InstanceKlass::mark_osr_nmethods(const Method* m) {
+  // This is a short non-blocking critical region, so the no safepoint check is ok.
+  MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
+  nmethod* osr = osr_nmethods_head();
+  int found = 0;
+  while (osr != NULL) {
+    assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
+    if (osr->method() == m) {
+      osr->mark_for_deoptimization();
+      found++;
+    }
+    osr = osr->osr_link();
+  }
+  return found;
+}
+
 nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_level, bool match_level) const {
   // This is a short non-blocking critical region, so the no safepoint check is ok.
   OsrList_lock->lock_without_safepoint_check();
@@ -2849,28 +2961,27 @@
   return NULL;
 }
 
-void InstanceKlass::add_member_name(int index, Handle mem_name) {
+bool InstanceKlass::add_member_name(Handle mem_name) {
   jweak mem_name_wref = JNIHandles::make_weak_global(mem_name);
   MutexLocker ml(MemberNameTable_lock);
-  assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds");
   DEBUG_ONLY(No_Safepoint_Verifier nsv);
 
+  // Check if method has been redefined while taking out MemberNameTable_lock, if so
+  // return false.  We cannot cache obsolete methods. They will crash when the function
+  // is called!
+  Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(mem_name());
+  if (method->is_obsolete()) {
+    return false;
+  } else if (method->is_old()) {
+    // Replace method with redefined version
+    java_lang_invoke_MemberName::set_vmtarget(mem_name(), method_with_idnum(method->method_idnum()));
+  }
+
   if (_member_names == NULL) {
     _member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable(idnum_allocated_count());
   }
-  _member_names->add_member_name(index, mem_name_wref);
-}
-
-oop InstanceKlass::get_member_name(int index) {
-  MutexLocker ml(MemberNameTable_lock);
-  assert(0 <= index && index < idnum_allocated_count(), "index is out of bounds");
-  DEBUG_ONLY(No_Safepoint_Verifier nsv);
-
-  if (_member_names == NULL) {
-    return NULL;
-  }
-  oop mem_name =_member_names->get_member_name(index);
-  return mem_name;
+  _member_names->add_member_name(mem_name_wref);
+  return true;
 }
 
 // -----------------------------------------------------------------------------------------------------
@@ -3045,8 +3156,7 @@
         offset          <= (juint) value->length() &&
         offset + length <= (juint) value->length()) {
       st->print(BULLET"string: ");
-      Handle h_obj(obj);
-      java_lang_String::print(h_obj, st);
+      java_lang_String::print(obj, st);
       st->cr();
       if (!WizardMode)  return;  // that is enough
     }
--- a/src/share/vm/oops/instanceKlass.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/instanceKlass.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -226,14 +226,16 @@
   // _is_marked_dependent can be set concurrently, thus cannot be part of the
   // _misc_flags.
   bool            _is_marked_dependent;  // used for marking during flushing and deoptimization
+  bool            _has_unloaded_dependent;
 
   enum {
-    _misc_rewritten            = 1 << 0, // methods rewritten.
-    _misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops
-    _misc_should_verify_class  = 1 << 2, // allow caching of preverification
-    _misc_is_anonymous         = 1 << 3, // has embedded _host_klass field
-    _misc_is_contended         = 1 << 4, // marked with contended annotation
-    _misc_has_default_methods  = 1 << 5  // class/superclass/implemented interfaces has default methods
+    _misc_rewritten                = 1 << 0, // methods rewritten.
+    _misc_has_nonstatic_fields     = 1 << 1, // for sizing with UseCompressedOops
+    _misc_should_verify_class      = 1 << 2, // allow caching of preverification
+    _misc_is_anonymous             = 1 << 3, // has embedded _host_klass field
+    _misc_is_contended             = 1 << 4, // marked with contended annotation
+    _misc_has_default_methods      = 1 << 5, // class/superclass/implemented interfaces has default methods
+    _misc_declares_default_methods = 1 << 6  // directly declares default methods (any access)
   };
   u2              _misc_flags;
   u2              _minor_version;        // minor version number of class file
@@ -473,6 +475,9 @@
   bool is_marked_dependent() const         { return _is_marked_dependent; }
   void set_is_marked_dependent(bool value) { _is_marked_dependent = value; }
 
+  bool has_unloaded_dependent() const         { return _has_unloaded_dependent; }
+  void set_has_unloaded_dependent(bool value) { _has_unloaded_dependent = value; }
+
   // initialization (virtuals from Klass)
   bool should_be_initialized() const;  // means that initialize should be called
   void initialize(TRAPS);
@@ -515,10 +520,16 @@
   // find a local method (returns NULL if not found)
   Method* find_method(Symbol* name, Symbol* signature) const;
   static Method* find_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
+
+  // find a local method, but skip static methods
+  Method* find_instance_method(Symbol* name, Symbol* signature);
   static Method* find_instance_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
 
+  // true if method matches signature and conforms to skipping_X conditions.
+  static bool method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static);
+
   // find a local method index in default_methods (returns -1 if not found)
-  static int find_method_index(Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass);
+  static int find_method_index(Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass, bool skipping_static);
 
   // lookup operation (returns NULL if not found)
   Method* uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
@@ -676,6 +687,17 @@
     }
   }
 
+  bool declares_default_methods() const {
+    return (_misc_flags & _misc_declares_default_methods) != 0;
+  }
+  void set_declares_default_methods(bool b) {
+    if (b) {
+      _misc_flags |= _misc_declares_default_methods;
+    } else {
+      _misc_flags &= ~_misc_declares_default_methods;
+    }
+  }
+
   // for adding methods, ConstMethod::UNSET_IDNUM means no more ids available
   inline u2 next_method_idnum();
   void set_initial_method_idnum(u2 value)             { _idnum_allocated_count = value; }
@@ -767,6 +789,7 @@
   void set_osr_nmethods_head(nmethod* h)     { _osr_nmethods_head = h; };
   void add_osr_nmethod(nmethod* n);
   void remove_osr_nmethod(nmethod* n);
+  int mark_osr_nmethods(const Method* m);
   nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const;
 
   // Breakpoint support (see methods on Method* for details)
@@ -946,6 +969,7 @@
 
   void clean_implementors_list(BoolObjectClosure* is_alive);
   void clean_method_data(BoolObjectClosure* is_alive);
+  void clean_dependent_nmethods();
 
   // Explicit metaspace deallocation of fields
   // For RedefineClasses and class file parsing errors, we need to deallocate
@@ -999,6 +1023,13 @@
 
   u2 idnum_allocated_count() const      { return _idnum_allocated_count; }
 
+public:
+  void set_in_error_state() {
+    assert(DumpSharedSpaces, "only call this when dumping archive");
+    _init_state = initialization_error;
+  }
+  bool check_sharing_error_state();
+
 private:
   // initialization state
 #ifdef ASSERT
@@ -1034,6 +1065,7 @@
   static bool link_class_impl                           (instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS);
   static bool verify_code                               (instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS);
   static void initialize_impl                           (instanceKlassHandle this_oop, TRAPS);
+  static void initialize_super_interfaces               (instanceKlassHandle this_oop, TRAPS);
   static void eager_initialize_impl                     (instanceKlassHandle this_oop);
   static void set_initialization_state_and_notify_impl  (instanceKlassHandle this_oop, ClassState state, TRAPS);
   static void call_class_initializer_impl               (instanceKlassHandle this_oop, TRAPS);
@@ -1050,14 +1082,14 @@
 
   // find a local method (returns NULL if not found)
   Method* find_method_impl(Symbol* name, Symbol* signature, bool skipping_overpass) const;
-  static Method* find_method_impl(Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass);
+  static Method* find_method_impl(Array<Method*>* methods, Symbol* name, Symbol* signature, bool skipping_overpass, bool skipping_static);
 
   // Free CHeap allocated fields.
   void release_C_heap_structures();
 public:
   // CDS support - remove and restore oops from metadata. Oops are not shared.
   virtual void remove_unshareable_info();
-  virtual void restore_unshareable_info(TRAPS);
+  virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
 
   // jvm support
   jint compute_modifier_flags(TRAPS) const;
@@ -1065,8 +1097,7 @@
   // JSR-292 support
   MemberNameTable* member_names() { return _member_names; }
   void set_member_names(MemberNameTable* member_names) { _member_names = member_names; }
-  void add_member_name(int index, Handle member_name);
-  oop  get_member_name(int index);
+  bool add_member_name(Handle member_name);
 
 public:
   // JVMTI support
@@ -1234,7 +1265,7 @@
   }
   int count()                             { return _count; }
   int increment()                         { _count += 1; return _count; }
-  int decrement()                         { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
+  int decrement();
   nmethodBucket* next()                   { return _next; }
   void set_next(nmethodBucket* b)         { _next = b; }
   nmethod* get_nmethod()                  { return _nmethod; }
--- a/src/share/vm/oops/instanceMirrorKlass.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/instanceMirrorKlass.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,6 +28,7 @@
 #include "gc_implementation/shared/markSweep.inline.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/genOopClosures.inline.hpp"
+#include "memory/iterator.inline.hpp"
 #include "memory/oopFactory.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceMirrorKlass.hpp"
@@ -41,7 +42,7 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
@@ -241,12 +242,6 @@
   return oop_size(obj);                                                               \
 
 
-#define if_do_metadata_checked(closure, nv_suffix)                    \
-  /* Make sure the non-virtual and the virtual versions match. */     \
-  assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
-      "Inconsistency in do_metadata");                                \
-  if (closure->do_metadata##nv_suffix())
-
 // Macro to define InstanceMirrorKlass::oop_oop_iterate for virtual/nonvirtual for
 // all closures.  Macros calling macros above for each oop size.
 
--- a/src/share/vm/oops/instanceRefKlass.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/instanceRefKlass.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -38,7 +38,7 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
--- a/src/share/vm/oops/klass.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/klass.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -36,11 +36,13 @@
 #include "oops/instanceKlass.hpp"
 #include "oops/klass.inline.hpp"
 #include "oops/oop.inline2.hpp"
-#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "trace/traceMacros.hpp"
 #include "utilities/stack.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
@@ -167,7 +169,12 @@
   _primary_supers[0] = k;
   set_super_check_offset(in_bytes(primary_supers_offset()));
 
-  set_java_mirror(NULL);
+  // The constructor is used from init_self_patching_vtbl_list,
+  // which doesn't zero out the memory before calling the constructor.
+  // Need to set the field explicitly to not hit an assert that the field
+  // should be NULL before setting it.
+  _java_mirror = NULL;
+
   set_modifier_flags(0);
   set_layout_helper(Klass::_lh_neutral_value);
   set_name(NULL);
@@ -186,6 +193,7 @@
   // The klass doesn't have any references at this point.
   clear_modified_oops();
   clear_accumulated_modified_oops();
+  _shared_class_path_index = -1;
 }
 
 jint Klass::array_layout_helper(BasicType etype) {
@@ -399,7 +407,7 @@
   return mirror_alive;
 }
 
-void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive) {
+void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses) {
   if (!ClassUnloading) {
     return;
   }
@@ -444,7 +452,7 @@
     }
 
     // Clean the implementors list and method data.
-    if (current->oop_is_instance()) {
+    if (clean_alive_klasses && current->oop_is_instance()) {
       InstanceKlass* ik = InstanceKlass::cast(current);
       ik->clean_implementors_list(is_alive);
       ik->clean_method_data(is_alive);
@@ -456,12 +464,18 @@
   record_modified_oops();
 }
 
-void Klass::klass_update_barrier_set_pre(void* p, oop v) {
-  // This barrier used by G1, where it's used remember the old oop values,
-  // so that we don't forget any objects that were live at the snapshot at
-  // the beginning. This function is only used when we write oops into
-  // Klasses. Since the Klasses are used as roots in G1, we don't have to
-  // do anything here.
+// This barrier is used by G1 to remember the old oop values, so
+// that we don't forget any objects that were live at the snapshot at
+// the beginning. This function is only used when we write oops into Klasses.
+void Klass::klass_update_barrier_set_pre(oop* p, oop v) {
+#if INCLUDE_ALL_GCS
+  if (UseG1GC) {
+    oop obj = *p;
+    if (obj != NULL) {
+      G1SATBCardTableModRefBS::enqueue(obj);
+    }
+  }
+#endif
 }
 
 void Klass::klass_oop_store(oop* p, oop v) {
@@ -472,7 +486,7 @@
   if (always_do_update_barrier) {
     klass_oop_store((volatile oop*)p, v);
   } else {
-    klass_update_barrier_set_pre((void*)p, v);
+    klass_update_barrier_set_pre(p, v);
     *p = v;
     klass_update_barrier_set(v);
   }
@@ -482,7 +496,7 @@
   assert(!Universe::heap()->is_in_reserved((void*)p), "Should store pointer into metadata");
   assert(v == NULL || Universe::heap()->is_in_reserved((void*)v), "Should store pointer to an object");
 
-  klass_update_barrier_set_pre((void*)p, v);
+  klass_update_barrier_set_pre((oop*)p, v); // Cast away volatile.
   OrderAccess::release_store_ptr(p, v);
   klass_update_barrier_set(v);
 }
@@ -504,27 +518,25 @@
   set_class_loader_data(NULL);
 }
 
-void Klass::restore_unshareable_info(TRAPS) {
+void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
   TRACE_INIT_ID(this);
   // If an exception happened during CDS restore, some of these fields may already be
   // set.  We leave the class on the CLD list, even if incomplete so that we don't
   // modify the CLD list outside a safepoint.
   if (class_loader_data() == NULL) {
-    ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
-    // Restore class_loader_data to the null class loader data
+    // Restore class_loader_data
     set_class_loader_data(loader_data);
 
-    // Add to null class loader list first before creating the mirror
+    // Add to class loader list first before creating the mirror
     // (same order as class file parsing)
     loader_data->add_class(this);
   }
 
-  // Recreate the class mirror.  The protection_domain is always null for
-  // boot loader, for now.
+  // Recreate the class mirror.
   // Only recreate it if not present.  A previous attempt to restore may have
   // gotten an OOM later but keep the mirror if it was created.
   if (java_mirror() == NULL) {
-    java_lang_Class::create_mirror(this, Handle(NULL), CHECK);
+    java_lang_Class::create_mirror(this, class_loader(), protection_domain, CHECK);
   }
 }
 
@@ -707,3 +719,21 @@
 }
 
 #endif
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TestKlass {
+ public:
+  static void test_oop_is_instanceClassLoader() {
+    assert(SystemDictionary::ClassLoader_klass()->oop_is_instanceClassLoader(), "assert");
+    assert(!SystemDictionary::String_klass()->oop_is_instanceClassLoader(), "assert");
+  }
+};
+
+void TestKlass_test() {
+  TestKlass::test_oop_is_instanceClassLoader();
+}
+
+#endif
--- a/src/share/vm/oops/klass.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/klass.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -32,7 +32,6 @@
 #include "oops/klassPS.hpp"
 #include "oops/metadata.hpp"
 #include "oops/oop.hpp"
-#include "runtime/orderAccess.hpp"
 #include "trace/traceMacros.hpp"
 #include "utilities/accessFlags.hpp"
 #include "utilities/macros.hpp"
@@ -177,6 +176,16 @@
   jbyte _modified_oops;             // Card Table Equivalent (YC/CMS support)
   jbyte _accumulated_modified_oops; // Mod Union Equivalent (CMS support)
 
+private:
+  // This is an index into FileMapHeader::_classpath_entry_table[], to
+  // associate this class with the JAR file where it's loaded from during
+  // dump time. If a class is not loaded from the shared archive, this field is
+  // -1.
+  jshort _shared_class_path_index;
+
+  friend class SharedClassUtil;
+protected:
+
   // Constructor
   Klass();
 
@@ -283,6 +292,15 @@
   void clear_accumulated_modified_oops() { _accumulated_modified_oops = 0; }
   bool has_accumulated_modified_oops()   { return _accumulated_modified_oops == 1; }
 
+  int shared_classpath_index() const   {
+    return _shared_class_path_index;
+  };
+
+  void set_shared_classpath_index(int index) {
+    _shared_class_path_index = index;
+  };
+
+
  protected:                                // internal accessors
   Klass* subklass_oop() const            { return _subklass; }
   Klass* next_sibling_oop() const        { return _next_sibling; }
@@ -455,7 +473,7 @@
  public:
   // CDS support - remove and restore oops from metadata. Oops are not shared.
   virtual void remove_unshareable_info();
-  virtual void restore_unshareable_info(TRAPS);
+  virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
 
  protected:
   // computes the subtype relationship
@@ -502,6 +520,7 @@
   virtual bool oop_is_objArray_slow()       const { return false; }
   virtual bool oop_is_typeArray_slow()      const { return false; }
  public:
+  virtual bool oop_is_instanceClassLoader() const { return false; }
   virtual bool oop_is_instanceMirror()      const { return false; }
   virtual bool oop_is_instanceRef()         const { return false; }
 
@@ -585,36 +604,9 @@
   // The is_alive closure passed in depends on the Garbage Collector used.
   bool is_loader_alive(BoolObjectClosure* is_alive);
 
-  static void clean_weak_klass_links(BoolObjectClosure* is_alive);
-
-  // Prefetch within oop iterators.  This is a macro because we
-  // can't guarantee that the compiler will inline it.  In 64-bit
-  // it generally doesn't.  Signature is
-  //
-  // static void prefetch_beyond(oop* const start,
-  //                             oop* const end,
-  //                             const intx foffset,
-  //                             const Prefetch::style pstyle);
-#define prefetch_beyond(start, end, foffset, pstyle) {   \
-    const intx foffset_ = (foffset);                     \
-    const Prefetch::style pstyle_ = (pstyle);            \
-    assert(foffset_ > 0, "prefetch beyond, not behind"); \
-    if (pstyle_ != Prefetch::do_none) {                  \
-      oop* ref = (start);                                \
-      if (ref < (end)) {                                 \
-        switch (pstyle_) {                               \
-        case Prefetch::do_read:                          \
-          Prefetch::read(*ref, foffset_);                \
-          break;                                         \
-        case Prefetch::do_write:                         \
-          Prefetch::write(*ref, foffset_);               \
-          break;                                         \
-        default:                                         \
-          ShouldNotReachHere();                          \
-          break;                                         \
-        }                                                \
-      }                                                  \
-    }                                                    \
+  static void clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive_klasses = true);
+  static void clean_subklass_tree(BoolObjectClosure* is_alive) {
+    clean_weak_klass_links(is_alive, false /* clean_alive_klasses */);
   }
 
   // iterators
@@ -722,7 +714,7 @@
  private:
   // barriers used by klass_oop_store
   void klass_update_barrier_set(oop v);
-  void klass_update_barrier_set_pre(void* p, oop v);
+  void klass_update_barrier_set_pre(oop* p, oop v);
 };
 
 #endif // SHARE_VM_OOPS_KLASS_HPP
--- a/src/share/vm/oops/method.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/method.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -49,6 +49,7 @@
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/relocator.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
@@ -92,7 +93,7 @@
   set_hidden(false);
   set_dont_inline(false);
   set_method_data(NULL);
-  set_method_counters(NULL);
+  clear_method_counters();
   set_vtable_index(Method::garbage_vtable_index);
 
   // Fix and bury in Method*
@@ -116,7 +117,7 @@
   MetadataFactory::free_metadata(loader_data, method_data());
   set_method_data(NULL);
   MetadataFactory::free_metadata(loader_data, method_counters());
-  set_method_counters(NULL);
+  clear_method_counters();
   // The nmethod will be gone when we get here.
   if (code() != NULL) _code = NULL;
 }
@@ -387,9 +388,7 @@
   methodHandle mh(m);
   ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
   MethodCounters* counters = MethodCounters::allocate(loader_data, CHECK_NULL);
-  if (mh->method_counters() == NULL) {
-    mh->set_method_counters(counters);
-  } else {
+  if (!mh->init_method_counters(counters)) {
     MetadataFactory::free_metadata(loader_data, counters);
   }
   return mh->method_counters();
@@ -559,6 +558,15 @@
   return true;
 }
 
+bool Method::is_constant_getter() const {
+  int last_index = code_size() - 1;
+  // Check if the first 1-3 bytecodes are a constant push
+  // and the last bytecode is a return.
+  return (2 <= code_size() && code_size() <= 4 &&
+          Bytecodes::is_const(java_code_at(0)) &&
+          Bytecodes::length_for(java_code_at(0)) == last_index &&
+          Bytecodes::is_return(java_code_at(last_index)));
+}
 
 bool Method::is_initializer() const {
   return name() == vmSymbols::object_initializer_name() || is_static_initializer();
@@ -730,8 +738,8 @@
   }
   if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) {
     ttyLocker ttyl;
-    xtty->begin_elem("make_not_%scompilable thread='" UINTX_FORMAT "'",
-                     is_osr ? "osr_" : "", os::current_thread_id());
+    xtty->begin_elem("make_not_compilable thread='" UINTX_FORMAT "' osr='%d' level='%d'",
+                     os::current_thread_id(), is_osr, comp_level);
     if (reason != NULL) {
       xtty->print(" reason=\'%s\'", reason);
     }
@@ -851,7 +859,7 @@
   assert(!DumpSharedSpaces || _method_data == NULL, "unexpected method data?");
 
   set_method_data(NULL);
-  set_method_counters(NULL);
+  clear_method_counters();
 }
 
 // Called when the method_holder is getting linked. Setup entrypoints so the method
@@ -1635,34 +1643,34 @@
 }
 
 int Method::highest_comp_level() const {
-  const MethodData* mdo = method_data();
-  if (mdo != NULL) {
-    return mdo->highest_comp_level();
+  const MethodCounters* mcs = method_counters();
+  if (mcs != NULL) {
+    return mcs->highest_comp_level();
   } else {
     return CompLevel_none;
   }
 }
 
 int Method::highest_osr_comp_level() const {
-  const MethodData* mdo = method_data();
-  if (mdo != NULL) {
-    return mdo->highest_osr_comp_level();
+  const MethodCounters* mcs = method_counters();
+  if (mcs != NULL) {
+    return mcs->highest_osr_comp_level();
   } else {
     return CompLevel_none;
   }
 }
 
 void Method::set_highest_comp_level(int level) {
-  MethodData* mdo = method_data();
-  if (mdo != NULL) {
-    mdo->set_highest_comp_level(level);
+  MethodCounters* mcs = method_counters();
+  if (mcs != NULL) {
+    mcs->set_highest_comp_level(level);
   }
 }
 
 void Method::set_highest_osr_comp_level(int level) {
-  MethodData* mdo = method_data();
-  if (mdo != NULL) {
-    mdo->set_highest_osr_comp_level(level);
+  MethodCounters* mcs = method_counters();
+  if (mcs != NULL) {
+    mcs->set_highest_osr_comp_level(level);
   }
 }
 
@@ -1864,9 +1872,12 @@
 void Method::set_on_stack(const bool value) {
   // Set both the method itself and its constant pool.  The constant pool
   // on stack means some method referring to it is also on the stack.
-  _access_flags.set_on_stack(value);
   constants()->set_on_stack(value);
-  if (value) MetadataOnStackMark::record(this);
+
+  bool succeeded = _access_flags.set_on_stack(value);
+  if (value && succeeded) {
+    MetadataOnStackMark::record(this, Thread::current());
+  }
 }
 
 // Called when the class loader is unloaded to make all methods weak.
--- a/src/share/vm/oops/method.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/method.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -237,10 +237,11 @@
   // Tracking number of breakpoints, for fullspeed debugging.
   // Only mutated by VM thread.
   u2   number_of_breakpoints()             const {
-    if (method_counters() == NULL) {
+    MethodCounters* mcs = method_counters();
+    if (mcs == NULL) {
       return 0;
     } else {
-      return method_counters()->number_of_breakpoints();
+      return mcs->number_of_breakpoints();
     }
   }
   void incr_number_of_breakpoints(TRAPS)         {
@@ -257,8 +258,9 @@
   }
   // Initialization only
   void clear_number_of_breakpoints()             {
-    if (method_counters() != NULL) {
-      method_counters()->clear_number_of_breakpoints();
+    MethodCounters* mcs = method_counters();
+    if (mcs != NULL) {
+      mcs->clear_number_of_breakpoints();
     }
   }
 
@@ -305,10 +307,11 @@
   }
 
   int  interpreter_throwout_count() const        {
-    if (method_counters() == NULL) {
+    MethodCounters* mcs = method_counters();
+    if (mcs == NULL) {
       return 0;
     } else {
-      return method_counters()->interpreter_throwout_count();
+      return mcs->interpreter_throwout_count();
     }
   }
 
@@ -366,11 +369,13 @@
     return _method_counters;
   }
 
-  void set_method_counters(MethodCounters* counters) {
-    // The store into method must be released. On platforms without
-    // total store order (TSO) the reference may become visible before
-    // the initialization of data otherwise.
-    OrderAccess::release_store_ptr((volatile void *)&_method_counters, counters);
+  void clear_method_counters() {
+    _method_counters = NULL;
+  }
+
+  bool init_method_counters(MethodCounters* counters) {
+    // Try to install a pointer to MethodCounters, return true on success.
+    return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL;
   }
 
 #ifdef TIERED
@@ -383,26 +388,28 @@
       return method_counters()->interpreter_invocation_count();
     }
   }
-  void set_prev_event_count(int count, TRAPS)    {
-    MethodCounters* mcs = get_method_counters(CHECK);
+  void set_prev_event_count(int count) {
+    MethodCounters* mcs = method_counters();
     if (mcs != NULL) {
       mcs->set_interpreter_invocation_count(count);
     }
   }
   jlong prev_time() const                        {
-    return method_counters() == NULL ? 0 : method_counters()->prev_time();
+    MethodCounters* mcs = method_counters();
+    return mcs == NULL ? 0 : mcs->prev_time();
   }
-  void set_prev_time(jlong time, TRAPS)          {
-    MethodCounters* mcs = get_method_counters(CHECK);
+  void set_prev_time(jlong time) {
+    MethodCounters* mcs = method_counters();
     if (mcs != NULL) {
       mcs->set_prev_time(time);
     }
   }
   float rate() const                             {
-    return method_counters() == NULL ? 0 : method_counters()->rate();
+    MethodCounters* mcs = method_counters();
+    return mcs == NULL ? 0 : mcs->rate();
   }
-  void set_rate(float rate, TRAPS) {
-    MethodCounters* mcs = get_method_counters(CHECK);
+  void set_rate(float rate) {
+    MethodCounters* mcs = method_counters();
     if (mcs != NULL) {
       mcs->set_rate(rate);
     }
@@ -420,9 +427,12 @@
   static MethodCounters* build_method_counters(Method* m, TRAPS);
 
   int interpreter_invocation_count() {
-    if (TieredCompilation) return invocation_count();
-    else return (method_counters() == NULL) ? 0 :
-                 method_counters()->interpreter_invocation_count();
+    if (TieredCompilation) {
+      return invocation_count();
+    } else {
+      MethodCounters* mcs = method_counters();
+      return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count();
+    }
   }
   int increment_interpreter_invocation_count(TRAPS) {
     if (TieredCompilation) ShouldNotReachHere();
@@ -613,6 +623,9 @@
   // returns true if the method is an accessor function (setter/getter).
   bool is_accessor() const;
 
+  // returns true if the method does nothing but return a constant of primitive type
+  bool is_constant_getter() const;
+
   // returns true if the method is an initializer (<init> or <clinit>).
   bool is_initializer() const;
 
@@ -812,6 +825,10 @@
    return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
   }
 
+  int mark_osr_nmethods() {
+    return method_holder()->mark_osr_nmethods(this);
+  }
+
   nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
     return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
   }
--- a/src/share/vm/oops/methodCounters.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/methodCounters.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -34,4 +34,40 @@
   backedge_counter()->reset();
   set_interpreter_throwout_count(0);
   set_interpreter_invocation_count(0);
+#ifdef TIERED
+  set_prev_time(0);
+  set_rate(0);
+  set_highest_comp_level(0);
+  set_highest_osr_comp_level(0);
+#endif
 }
+
+
+int MethodCounters::highest_comp_level() const {
+#ifdef TIERED
+  return _highest_comp_level;
+#else
+  return CompLevel_none;
+#endif
+}
+
+void MethodCounters::set_highest_comp_level(int level) {
+#ifdef TIERED
+  _highest_comp_level = level;
+#endif
+}
+
+int MethodCounters::highest_osr_comp_level() const {
+#ifdef TIERED
+  return _highest_osr_comp_level;
+#else
+  return CompLevel_none;
+#endif
+}
+
+void MethodCounters::set_highest_osr_comp_level(int level) {
+#ifdef TIERED
+  _highest_osr_comp_level = level;
+#endif
+}
+
--- a/src/share/vm/oops/methodCounters.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/methodCounters.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -39,6 +39,8 @@
 
 #ifdef TIERED
   float             _rate;                        // Events (invocation and backedge counter increments) per millisecond
+  u1                _highest_comp_level;          // Highest compile level this method has ever seen.
+  u1                _highest_osr_comp_level;      // Same for OSR level
   jlong             _prev_time;                   // Previous time the rate was acquired
 #endif
 
@@ -47,6 +49,8 @@
                      _number_of_breakpoints(0)
 #ifdef TIERED
                    , _rate(0),
+                     _highest_comp_level(0),
+                     _highest_osr_comp_level(0),
                      _prev_time(0)
 #endif
   {
@@ -100,6 +104,11 @@
   void set_rate(float rate)                      { _rate = rate; }
 #endif
 
+  int highest_comp_level() const;
+  void set_highest_comp_level(int level);
+  int highest_osr_comp_level() const;
+  void set_highest_osr_comp_level(int level);
+
   // invocation counter
   InvocationCounter* invocation_counter() { return &_invocation_counter; }
   InvocationCounter* backedge_counter()   { return &_backedge_counter; }
--- a/src/share/vm/oops/methodData.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/methodData.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -34,6 +34,7 @@
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
@@ -1233,9 +1234,8 @@
   _backedge_counter_start = 0;
   _num_loops = 0;
   _num_blocks = 0;
-  _highest_comp_level = 0;
-  _highest_osr_comp_level = 0;
-  _would_profile = true;
+  _would_profile = unknown;
+
 #ifdef GRAAL
   _graal_node_count = 0;
 #endif
--- a/src/share/vm/oops/methodData.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/methodData.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -542,6 +542,7 @@
     // null_seen:
     //  saw a null operand (cast/aastore/instanceof)
       null_seen_flag              = DataLayout::first_flag + 0
+
 #ifdef GRAAL
     // bytecode threw any exception
     , exception_seen_flag         = null_seen_flag + 1
@@ -568,6 +569,7 @@
   // Consulting it allows the compiler to avoid setting up null_check traps.
   bool null_seen()     { return flag_at(null_seen_flag); }
   void set_null_seen()    { set_flag_at(null_seen_flag); }
+
 #ifdef GRAAL
   // true if an exception was thrown at the specific BCI
   bool exception_seen() { return flag_at(exception_seen_flag); }
@@ -2163,7 +2165,7 @@
 
   // Whole-method sticky bits and flags
   enum {
-    _trap_hist_limit    = 19 GRAAL_ONLY(+5),   // decoupled from Deoptimization::Reason_LIMIT
+    _trap_hist_limit    = 20 GRAAL_ONLY(+5),   // decoupled from Deoptimization::Reason_LIMIT
     _trap_hist_mask     = max_jubyte,
     _extra_data_count   = 4     // extra DataLayout headers, for trap history
   }; // Public flag values
@@ -2173,7 +2175,7 @@
   uint _nof_overflow_traps;         // trap count, excluding _trap_hist
   union {
     intptr_t _align;
-    u1 _array[GRAAL_ONLY(2*)_trap_hist_limit];
+    u1 _array[_trap_hist_limit];
   } _trap_hist;
 
   // Support for interprocedural escape analysis, from Thomas Kotzmann.
@@ -2204,12 +2206,9 @@
   // time with C1. It is used to determine if method is trivial.
   short             _num_loops;
   short             _num_blocks;
-  // Highest compile level this method has ever seen.
-  u1                _highest_comp_level;
-  // Same for OSR level
-  u1                _highest_osr_comp_level;
   // Does this method contain anything worth profiling?
-  bool              _would_profile;
+  enum WouldProfile {unknown, no_profile, profile};
+  WouldProfile      _would_profile;
 
 #ifdef GRAAL
   // Support for HotSpotMethodData.setCompiledGraphSize(int)
@@ -2383,13 +2382,8 @@
   }
 #endif
 
-  void set_would_profile(bool p)              { _would_profile = p;    }
-  bool would_profile() const                  { return _would_profile; }
-
-  int highest_comp_level() const              { return _highest_comp_level;      }
-  void set_highest_comp_level(int level)      { _highest_comp_level = level;     }
-  int highest_osr_comp_level() const          { return _highest_osr_comp_level;  }
-  void set_highest_osr_comp_level(int level)  { _highest_osr_comp_level = level; }
+  void set_would_profile(bool p)              { _would_profile = p ? profile : no_profile; }
+  bool would_profile() const                  { return _would_profile != no_profile; }
 
   int num_loops() const                       { return _num_loops;  }
   void set_num_loops(int n)                   { _num_loops = n;     }
--- a/src/share/vm/oops/objArrayKlass.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/objArrayKlass.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -29,6 +29,7 @@
 #include "gc_implementation/shared/markSweep.inline.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/genOopClosures.inline.hpp"
+#include "memory/iterator.inline.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
@@ -42,6 +43,7 @@
 #include "oops/symbol.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
@@ -49,7 +51,7 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/g1/heapRegionManager.inline.hpp"
 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
@@ -475,12 +477,6 @@
 }
 #endif // INCLUDE_ALL_GCS
 
-#define if_do_metadata_checked(closure, nv_suffix)                    \
-  /* Make sure the non-virtual and the virtual versions match. */     \
-  assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
-      "Inconsistency in do_metadata");                                \
-  if (closure->do_metadata##nv_suffix())
-
 #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)           \
                                                                                 \
 int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj,                          \
--- a/src/share/vm/oops/objArrayOop.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/objArrayOop.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -45,9 +45,10 @@
 private:
   // Give size of objArrayOop in HeapWords minus the header
   static int array_size(int length) {
-    const int OopsPerHeapWord = HeapWordSize/heapOopSize;
+    const uint OopsPerHeapWord = HeapWordSize/heapOopSize;
     assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0),
            "Else the following (new) computation would be in error");
+    uint res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
 #ifdef ASSERT
     // The old code is left in for sanity-checking; it'll
     // go away pretty soon. XXX
@@ -55,16 +56,15 @@
     // oop->length() * HeapWordsPerOop;
     // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
     // The oop elements are aligned up to wordSize
-    const int HeapWordsPerOop = heapOopSize/HeapWordSize;
-    int old_res;
+    const uint HeapWordsPerOop = heapOopSize/HeapWordSize;
+    uint old_res;
     if (HeapWordsPerOop > 0) {
       old_res = length * HeapWordsPerOop;
     } else {
-      old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
+      old_res = align_size_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord;
     }
+    assert(res == old_res, "Inconsistency between old and new.");
 #endif  // ASSERT
-    int res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
-    assert(res == old_res, "Inconsistency between old and new.");
     return res;
   }
 
--- a/src/share/vm/oops/oop.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/oop.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -109,12 +109,13 @@
   int size_given_klass(Klass* klass);
 
   // type test operations (inlined in oop.inline.h)
-  bool is_instance()           const;
-  bool is_instanceMirror()     const;
-  bool is_instanceRef()        const;
-  bool is_array()              const;
-  bool is_objArray()           const;
-  bool is_typeArray()          const;
+  bool is_instance()            const;
+  bool is_instanceMirror()      const;
+  bool is_instanceClassLoader() const;
+  bool is_instanceRef()         const;
+  bool is_array()               const;
+  bool is_objArray()            const;
+  bool is_typeArray()           const;
 
  private:
   // field addresses in oop
--- a/src/share/vm/oops/oop.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/oop.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -38,7 +38,8 @@
 #include "oops/klass.inline.hpp"
 #include "oops/markOop.inline.hpp"
 #include "oops/oop.hpp"
-#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
 #ifdef TARGET_ARCH_x86
@@ -147,12 +148,13 @@
 
 inline bool oopDesc::is_a(Klass* k)        const { return klass()->is_subtype_of(k); }
 
-inline bool oopDesc::is_instance()           const { return klass()->oop_is_instance(); }
-inline bool oopDesc::is_instanceMirror()     const { return klass()->oop_is_instanceMirror(); }
-inline bool oopDesc::is_instanceRef()        const { return klass()->oop_is_instanceRef(); }
-inline bool oopDesc::is_array()              const { return klass()->oop_is_array(); }
-inline bool oopDesc::is_objArray()           const { return klass()->oop_is_objArray(); }
-inline bool oopDesc::is_typeArray()          const { return klass()->oop_is_typeArray(); }
+inline bool oopDesc::is_instance()            const { return klass()->oop_is_instance(); }
+inline bool oopDesc::is_instanceClassLoader() const { return klass()->oop_is_instanceClassLoader(); }
+inline bool oopDesc::is_instanceMirror()      const { return klass()->oop_is_instanceMirror(); }
+inline bool oopDesc::is_instanceRef()         const { return klass()->oop_is_instanceRef(); }
+inline bool oopDesc::is_array()               const { return klass()->oop_is_array(); }
+inline bool oopDesc::is_objArray()            const { return klass()->oop_is_objArray(); }
+inline bool oopDesc::is_typeArray()           const { return klass()->oop_is_typeArray(); }
 
 inline void*     oopDesc::field_base(int offset)        const { return (void*)&((char*)this)[offset]; }
 
--- a/src/share/vm/oops/oop.pcgc.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/oop.pcgc.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -54,8 +54,6 @@
   klass()->oop_follow_contents(cm, this);
 }
 
-// Used by parallel old GC.
-
 inline oop oopDesc::forward_to_atomic(oop p) {
   assert(ParNewGeneration::is_legal_forward_ptr(p),
          "illegal forwarding pointer value.");
--- a/src/share/vm/oops/typeArrayKlass.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/typeArrayKlass.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -39,6 +39,7 @@
 #include "oops/typeArrayKlass.hpp"
 #include "oops/typeArrayOop.hpp"
 #include "runtime/handles.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/macros.hpp"
 
 bool TypeArrayKlass::compute_is_subtype_of(Klass* k) {
--- a/src/share/vm/oops/typeArrayOop.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/oops/typeArrayOop.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -27,39 +27,7 @@
 
 #include "oops/arrayOop.hpp"
 #include "oops/typeArrayKlass.hpp"
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "orderAccess_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "orderAccess_linux_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "orderAccess_linux_zero.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "orderAccess_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "orderAccess_solaris_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "orderAccess_windows_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "orderAccess_linux_arm.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "orderAccess_linux_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_aix_ppc
-# include "orderAccess_aix_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "orderAccess_bsd_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "orderAccess_bsd_zero.inline.hpp"
-#endif
+#include "runtime/orderAccess.inline.hpp"
 
 // A typeArrayOop is an array containing basic types (non oop elements).
 // It is used for arrays of {characters, singles, doubles, bytes, shorts, integers, longs}
@@ -182,7 +150,7 @@
     DEBUG_ONLY(BasicType etype = Klass::layout_helper_element_type(lh));
     assert(length <= arrayOopDesc::max_array_length(etype), "no overflow");
 
-    julong size_in_bytes = length;
+    julong size_in_bytes = (juint)length;
     size_in_bytes <<= element_shift;
     size_in_bytes += instance_header_size;
     julong size_in_words = ((size_in_bytes + (HeapWordSize-1)) >> LogHeapWordSize);
--- a/src/share/vm/opto/bytecodeInfo.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -107,7 +107,7 @@
                                int caller_bci, ciCallProfile& profile,
                                WarmCallInfo* wci_result) {
   // Allows targeted inlining
-  if(callee_method->should_inline()) {
+  if (callee_method->should_inline()) {
     *wci_result = *(WarmCallInfo::always_hot());
     if (C->print_inlining() && Verbose) {
       CompileTask::print_inline_indent(inline_level());
@@ -118,6 +118,12 @@
     return true;
   }
 
+  if (callee_method->force_inline()) {
+      set_msg("force inline by annotation");
+      _forced_inline = true;
+      return true;
+  }
+
 #ifndef PRODUCT
   int inline_depth = inline_level()+1;
   if (ciReplay::should_inline(C->replay_inline_data(), callee_method, caller_bci, inline_depth)) {
@@ -244,6 +250,11 @@
   }
 #endif
 
+  if (callee_method->force_inline()) {
+    set_msg("force inline by annotation");
+    return false;
+  }
+
   // Now perform checks which are heuristic
 
   if (is_unboxing_method(callee_method, C)) {
@@ -251,12 +262,10 @@
     return false;
   }
 
-  if (!callee_method->force_inline()) {
-    if (callee_method->has_compiled_code() &&
-        callee_method->instructions_size() > InlineSmallCode) {
-      set_msg("already compiled into a big method");
-      return true;
-    }
+  if (callee_method->has_compiled_code() &&
+      callee_method->instructions_size() > InlineSmallCode) {
+    set_msg("already compiled into a big method");
+    return true;
   }
 
   // don't inline exception code unless the top method belongs to an
@@ -349,7 +358,7 @@
       // Escape Analysis stress testing when running Xcomp or CTW:
       // inline constructors even if they are not reached.
     } else if (forced_inline()) {
-      // Inlining was forced by CompilerOracle or ciReplay
+      // Inlining was forced by CompilerOracle, ciReplay or annotation
     } else if (profile.count() == 0) {
       // don't inline unreached call sites
        set_msg("call site not reached");
--- a/src/share/vm/opto/c2_globals.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/c2_globals.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -473,6 +473,9 @@
   product(bool, DoEscapeAnalysis, true,                                     \
           "Perform escape analysis")                                        \
                                                                             \
+  product(double, EscapeAnalysisTimeout, 20. DEBUG_ONLY(+40.),              \
+          "Abort EA when it reaches time limit (in sec)")                   \
+                                                                            \
   develop(bool, ExitEscapeAnalysisOnTimeout, true,                          \
           "Exit or throw assert in EA when it reaches time limit")          \
                                                                             \
@@ -644,7 +647,7 @@
   develop(bool, AlwaysIncrementalInline, false,                             \
           "do all inlining incrementally")                                  \
                                                                             \
-  product(intx, LiveNodeCountInliningCutoff, 20000,                         \
+  product(intx, LiveNodeCountInliningCutoff, 40000,                         \
           "max number of live nodes in a method")                           \
                                                                             \
   diagnostic(bool, OptimizeExpensiveOps, true,                              \
@@ -653,8 +656,8 @@
   product(bool, UseMathExactIntrinsics, true,                               \
           "Enables intrinsification of various java.lang.Math functions")   \
                                                                             \
-  experimental(bool, ReplaceInParentMaps, false,                            \
-          "Propagate type improvements in callers of inlinee if possible")  \
+  product(bool, UseMultiplyToLenIntrinsic, false,                           \
+          "Enables intrinsification of BigInteger.multiplyToLen()")         \
                                                                             \
   product(bool, UseTypeSpeculation, true,                                   \
           "Speculatively propagate types from profiles")                    \
--- a/src/share/vm/opto/callGenerator.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/callGenerator.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -63,12 +63,12 @@
   }
 
   virtual bool      is_parse() const           { return true; }
-  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
+  virtual JVMState* generate(JVMState* jvms);
   int is_osr() { return _is_osr; }
 
 };
 
-JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
+JVMState* ParseGenerator::generate(JVMState* jvms) {
   Compile* C = Compile::current();
 
   if (is_osr()) {
@@ -80,7 +80,7 @@
     return NULL;  // bailing out of the compile; do not try to parse
   }
 
-  Parse parser(jvms, method(), _expected_uses, parent_parser);
+  Parse parser(jvms, method(), _expected_uses);
   // Grab signature for matching/allocation
 #ifdef ASSERT
   if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
@@ -119,12 +119,12 @@
       _separate_io_proj(separate_io_proj)
   {
   }
-  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
+  virtual JVMState* generate(JVMState* jvms);
 
   CallStaticJavaNode* call_node() const { return _call_node; }
 };
 
-JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
+JVMState* DirectCallGenerator::generate(JVMState* jvms) {
   GraphKit kit(jvms);
   bool is_static = method()->is_static();
   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
@@ -171,10 +171,10 @@
            vtable_index >= 0, "either invalid or usable");
   }
   virtual bool      is_virtual() const          { return true; }
-  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
+  virtual JVMState* generate(JVMState* jvms);
 };
 
-JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
+JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
   GraphKit kit(jvms);
   Node* receiver = kit.argument(0);
 
@@ -276,7 +276,7 @@
   // Convert the CallStaticJava into an inline
   virtual void do_late_inline();
 
-  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
+  virtual JVMState* generate(JVMState* jvms) {
     Compile *C = Compile::current();
     C->print_inlining_skip(this);
 
@@ -290,7 +290,7 @@
     // that the late inlining logic can distinguish between fall
     // through and exceptional uses of the memory and io projections
     // as is done for allocations and macro expansion.
-    return DirectCallGenerator::generate(jvms, parent_parser);
+    return DirectCallGenerator::generate(jvms);
   }
 
   virtual void print_inlining_late(const char* msg) {
@@ -389,7 +389,7 @@
   }
 
   // Now perform the inling using the synthesized JVMState
-  JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
+  JVMState* new_jvms = _inline_cg->generate(jvms);
   if (new_jvms == NULL)  return;  // no change
   if (C->failing())      return;
 
@@ -407,7 +407,7 @@
   C->env()->notice_inlined_method(_inline_cg->method());
   C->set_inlining_progress(true);
 
-  kit.replace_call(call, result);
+  kit.replace_call(call, result, true);
 }
 
 
@@ -429,8 +429,8 @@
 
   virtual bool is_mh_late_inline() const { return true; }
 
-  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
-    JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
+  virtual JVMState* generate(JVMState* jvms) {
+    JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
     if (_input_not_const) {
       // inlining won't be possible so no need to enqueue right now.
       call_node()->set_generator(this);
@@ -477,13 +477,13 @@
   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
     LateInlineCallGenerator(method, inline_cg) {}
 
-  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
+  virtual JVMState* generate(JVMState* jvms) {
     Compile *C = Compile::current();
     C->print_inlining_skip(this);
 
     C->add_string_late_inline(this);
 
-    JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
+    JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
     return new_jvms;
   }
 
@@ -500,13 +500,13 @@
   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
     LateInlineCallGenerator(method, inline_cg) {}
 
-  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
+  virtual JVMState* generate(JVMState* jvms) {
     Compile *C = Compile::current();
     C->print_inlining_skip(this);
 
     C->add_boxing_late_inline(this);
 
-    JVMState* new_jvms =  DirectCallGenerator::generate(jvms, parent_parser);
+    JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
     return new_jvms;
   }
 };
@@ -542,7 +542,7 @@
   virtual bool      is_virtual() const          { return _is_virtual; }
   virtual bool      is_deferred() const         { return true; }
 
-  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
+  virtual JVMState* generate(JVMState* jvms);
 };
 
 
@@ -552,12 +552,12 @@
   return new WarmCallGenerator(ci, if_cold, if_hot);
 }
 
-JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
+JVMState* WarmCallGenerator::generate(JVMState* jvms) {
   Compile* C = Compile::current();
   if (C->log() != NULL) {
     C->log()->elem("warm_call bci='%d'", jvms->bci());
   }
-  jvms = _if_cold->generate(jvms, parent_parser);
+  jvms = _if_cold->generate(jvms);
   if (jvms != NULL) {
     Node* m = jvms->map()->control();
     if (m->is_CatchProj()) m = m->in(0);  else m = C->top();
@@ -618,7 +618,7 @@
   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 
-  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
+  virtual JVMState* generate(JVMState* jvms);
 };
 
 
@@ -630,7 +630,7 @@
 }
 
 
-JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
+JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
   GraphKit kit(jvms);
   PhaseGVN& gvn = kit.gvn();
   // We need an explicit receiver null_check before checking its type.
@@ -648,6 +648,10 @@
     return kit.transfer_exceptions_into_jvms();
   }
 
+  // Make a copy of the replaced nodes in case we need to restore them
+  ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
+  replaced_nodes.clone();
+
   Node* exact_receiver = receiver;  // will get updated in place...
   Node* slow_ctl = kit.type_check_receiver(receiver,
                                            _predicted_receiver, _hit_prob,
@@ -658,7 +662,7 @@
   { PreserveJVMState pjvms(&kit);
     kit.set_control(slow_ctl);
     if (!kit.stopped()) {
-      slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser);
+      slow_jvms = _if_missed->generate(kit.sync_jvms());
       if (kit.failing())
         return NULL;  // might happen because of NodeCountInliningCutoff
       assert(slow_jvms != NULL, "must be");
@@ -679,12 +683,12 @@
   kit.replace_in_map(receiver, exact_receiver);
 
   // Make the hot call:
-  JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser);
+  JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
   if (new_jvms == NULL) {
     // Inline failed, so make a direct call.
     assert(_if_hit->is_inline(), "must have been a failed inline");
     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
-    new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
+    new_jvms = cg->generate(kit.sync_jvms());
   }
   kit.add_exception_states_from(new_jvms);
   kit.set_jvms(new_jvms);
@@ -701,6 +705,11 @@
     return kit.transfer_exceptions_into_jvms();
   }
 
+  // There are 2 branches and the replaced nodes are only valid on
+  // one: restore the replaced nodes to what they were before the
+  // branch.
+  kit.map()->set_replaced_nodes(replaced_nodes);
+
   // Finish the diamond.
   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
   RegionNode* region = new (kit.C) RegionNode(3);
@@ -710,7 +719,15 @@
   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
   iophi->set_req(2, slow_map->i_o());
   kit.set_i_o(gvn.transform(iophi));
+  // Merge memory
   kit.merge_memory(slow_map->merged_memory(), region, 2);
+  // Transform new memory Phis.
+  for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
+    Node* phi = mms.memory();
+    if (phi->is_Phi() && phi->in(0) == region) {
+      mms.set_memory(gvn.transform(phi));
+    }
+  }
   uint tos = kit.jvms()->stkoff() + kit.sp();
   uint limit = slow_map->req();
   for (uint i = TypeFunc::Parms; i < limit; i++) {
@@ -845,7 +862,7 @@
                                             call_does_dispatch, vtable_index);  // out-parameters
           // We lack profiling at this call but type speculation may
           // provide us with a type
-          speculative_receiver_type = receiver_type->speculative_type();
+          speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
         }
 
         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
@@ -864,15 +881,15 @@
 }
 
 
-//------------------------PredictedIntrinsicGenerator------------------------------
-// Internal class which handles all predicted Intrinsic calls.
-class PredictedIntrinsicGenerator : public CallGenerator {
+//------------------------PredicatedIntrinsicGenerator------------------------------
+// Internal class which handles all predicated Intrinsic calls.
+class PredicatedIntrinsicGenerator : public CallGenerator {
   CallGenerator* _intrinsic;
   CallGenerator* _cg;
 
 public:
-  PredictedIntrinsicGenerator(CallGenerator* intrinsic,
-                              CallGenerator* cg)
+  PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
+                               CallGenerator* cg)
     : CallGenerator(cg->method())
   {
     _intrinsic = intrinsic;
@@ -883,107 +900,186 @@
   virtual bool      is_inlined()   const    { return true; }
   virtual bool      is_intrinsic() const    { return true; }
 
-  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
+  virtual JVMState* generate(JVMState* jvms);
 };
 
 
-CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic,
-                                                      CallGenerator* cg) {
-  return new PredictedIntrinsicGenerator(intrinsic, cg);
+CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
+                                                       CallGenerator* cg) {
+  return new PredicatedIntrinsicGenerator(intrinsic, cg);
 }
 
 
-JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
+JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
+  // The code we want to generate here is:
+  //    if (receiver == NULL)
+  //        uncommon_Trap
+  //    if (predicate(0))
+  //        do_intrinsic(0)
+  //    else
+  //    if (predicate(1))
+  //        do_intrinsic(1)
+  //    ...
+  //    else
+  //        do_java_comp
+
   GraphKit kit(jvms);
   PhaseGVN& gvn = kit.gvn();
 
   CompileLog* log = kit.C->log();
   if (log != NULL) {
-    log->elem("predicted_intrinsic bci='%d' method='%d'",
+    log->elem("predicated_intrinsic bci='%d' method='%d'",
               jvms->bci(), log->identify(method()));
   }
 
-  Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms());
-  if (kit.failing())
-    return NULL;  // might happen because of NodeCountInliningCutoff
-
-  SafePointNode* slow_map = NULL;
-  JVMState* slow_jvms;
-  if (slow_ctl != NULL) {
-    PreserveJVMState pjvms(&kit);
-    kit.set_control(slow_ctl);
-    if (!kit.stopped()) {
-      slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
-      if (kit.failing())
-        return NULL;  // might happen because of NodeCountInliningCutoff
-      assert(slow_jvms != NULL, "must be");
-      kit.add_exception_states_from(slow_jvms);
-      kit.set_map(slow_jvms->map());
-      if (!kit.stopped())
-        slow_map = kit.stop();
+  if (!method()->is_static()) {
+    // We need an explicit receiver null_check before checking its type in predicate.
+    // We share a map with the caller, so his JVMS gets adjusted.
+    Node* receiver = kit.null_check_receiver_before_call(method());
+    if (kit.stopped()) {
+      return kit.transfer_exceptions_into_jvms();
     }
   }
 
-  if (kit.stopped()) {
-    // Predicate is always false.
-    kit.set_jvms(slow_jvms);
+  int n_predicates = _intrinsic->predicates_count();
+  assert(n_predicates > 0, "sanity");
+
+  JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
+
+  // Region for normal compilation code if intrinsic failed.
+  Node* slow_region = new (kit.C) RegionNode(1);
+
+  int results = 0;
+  for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
+#ifdef ASSERT
+    JVMState* old_jvms = kit.jvms();
+    SafePointNode* old_map = kit.map();
+    Node* old_io  = old_map->i_o();
+    Node* old_mem = old_map->memory();
+    Node* old_exc = old_map->next_exception();
+#endif
+    Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
+#ifdef ASSERT
+    // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
+    assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
+    SafePointNode* new_map = kit.map();
+    assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
+    assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
+    assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
+#endif
+    if (!kit.stopped()) {
+      PreserveJVMState pjvms(&kit);
+      // Generate intrinsic code:
+      JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
+      if (new_jvms == NULL) {
+        // Intrinsic failed, use normal compilation path for this predicate.
+        slow_region->add_req(kit.control());
+      } else {
+        kit.add_exception_states_from(new_jvms);
+        kit.set_jvms(new_jvms);
+        if (!kit.stopped()) {
+          result_jvms[results++] = kit.jvms();
+        }
+      }
+    }
+    if (else_ctrl == NULL) {
+      else_ctrl = kit.C->top();
+    }
+    kit.set_control(else_ctrl);
+  }
+  if (!kit.stopped()) {
+    // Final 'else' after predicates.
+    slow_region->add_req(kit.control());
+  }
+  if (slow_region->req() > 1) {
+    PreserveJVMState pjvms(&kit);
+    // Generate normal compilation code:
+    kit.set_control(gvn.transform(slow_region));
+    JVMState* new_jvms = _cg->generate(kit.sync_jvms());
+    if (kit.failing())
+      return NULL;  // might happen because of NodeCountInliningCutoff
+    assert(new_jvms != NULL, "must be");
+    kit.add_exception_states_from(new_jvms);
+    kit.set_jvms(new_jvms);
+    if (!kit.stopped()) {
+      result_jvms[results++] = kit.jvms();
+    }
+  }
+
+  if (results == 0) {
+    // All paths ended in uncommon traps.
+    (void) kit.stop();
     return kit.transfer_exceptions_into_jvms();
   }
 
-  // Generate intrinsic code:
-  JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
-  if (new_jvms == NULL) {
-    // Intrinsic failed, so use slow code or make a direct call.
-    if (slow_map == NULL) {
-      CallGenerator* cg = CallGenerator::for_direct_call(method());
-      new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
-    } else {
-      kit.set_jvms(slow_jvms);
-      return kit.transfer_exceptions_into_jvms();
-    }
-  }
-  kit.add_exception_states_from(new_jvms);
-  kit.set_jvms(new_jvms);
-
-  // Need to merge slow and fast?
-  if (slow_map == NULL) {
-    // The fast path is the only path remaining.
+  if (results == 1) { // Only one path
+    kit.set_jvms(result_jvms[0]);
     return kit.transfer_exceptions_into_jvms();
   }
 
-  if (kit.stopped()) {
-    // Intrinsic method threw an exception, so it's just the slow path after all.
-    kit.set_jvms(slow_jvms);
-    return kit.transfer_exceptions_into_jvms();
+  // Merge all paths.
+  kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
+  RegionNode* region = new (kit.C) RegionNode(results + 1);
+  Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
+  for (int i = 0; i < results; i++) {
+    JVMState* jvms = result_jvms[i];
+    int path = i + 1;
+    SafePointNode* map = jvms->map();
+    region->init_req(path, map->control());
+    iophi->set_req(path, map->i_o());
+    if (i == 0) {
+      kit.set_jvms(jvms);
+    } else {
+      kit.merge_memory(map->merged_memory(), region, path);
+    }
+  }
+  kit.set_control(gvn.transform(region));
+  kit.set_i_o(gvn.transform(iophi));
+  // Transform new memory Phis.
+  for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
+    Node* phi = mms.memory();
+    if (phi->is_Phi() && phi->in(0) == region) {
+      mms.set_memory(gvn.transform(phi));
+    }
   }
 
-  // Finish the diamond.
-  kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
-  RegionNode* region = new (kit.C) RegionNode(3);
-  region->init_req(1, kit.control());
-  region->init_req(2, slow_map->control());
-  kit.set_control(gvn.transform(region));
-  Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
-  iophi->set_req(2, slow_map->i_o());
-  kit.set_i_o(gvn.transform(iophi));
-  kit.merge_memory(slow_map->merged_memory(), region, 2);
+  // Merge debug info.
+  Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
   uint tos = kit.jvms()->stkoff() + kit.sp();
-  uint limit = slow_map->req();
+  Node* map = kit.map();
+  uint limit = map->req();
   for (uint i = TypeFunc::Parms; i < limit; i++) {
     // Skip unused stack slots; fast forward to monoff();
     if (i == tos) {
       i = kit.jvms()->monoff();
       if( i >= limit ) break;
     }
-    Node* m = kit.map()->in(i);
-    Node* n = slow_map->in(i);
-    if (m != n) {
-      const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
-      Node* phi = PhiNode::make(region, m, t);
-      phi->set_req(2, n);
-      kit.map()->set_req(i, gvn.transform(phi));
+    Node* n = map->in(i);
+    ins[0] = n;
+    const Type* t = gvn.type(n);
+    bool needs_phi = false;
+    for (int j = 1; j < results; j++) {
+      JVMState* jvms = result_jvms[j];
+      Node* jmap = jvms->map();
+      Node* m = NULL;
+      if (jmap->req() > i) {
+        m = jmap->in(i);
+        if (m != n) {
+          needs_phi = true;
+          t = t->meet_speculative(gvn.type(m));
+        }
+      }
+      ins[j] = m;
+    }
+    if (needs_phi) {
+      Node* phi = PhiNode::make(region, n, t);
+      for (int j = 1; j < results; j++) {
+        phi->set_req(j + 1, ins[j]);
+      }
+      map->set_req(i, gvn.transform(phi));
     }
   }
+
   return kit.transfer_exceptions_into_jvms();
 }
 
@@ -1006,7 +1102,7 @@
   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
   virtual bool      is_trap() const             { return true; }
 
-  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
+  virtual JVMState* generate(JVMState* jvms);
 };
 
 
@@ -1018,7 +1114,7 @@
 }
 
 
-JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
+JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
   GraphKit kit(jvms);
   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
   int nargs = method()->arg_size();
--- a/src/share/vm/opto/callGenerator.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/callGenerator.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -31,8 +31,6 @@
 #include "opto/type.hpp"
 #include "runtime/deoptimization.hpp"
 
-class Parse;
-
 //---------------------------CallGenerator-------------------------------------
 // The subclasses of this class handle generation of ideal nodes for
 // call sites and method entry points.
@@ -63,8 +61,9 @@
   virtual bool      is_virtual() const          { return false; }
   // is_deferred: The decision whether to inline or not is deferred.
   virtual bool      is_deferred() const         { return false; }
-  // is_predicted: Uses an explicit check against a predicted type.
-  virtual bool      is_predicted() const        { return false; }
+  // is_predicated: Uses an explicit check (predicate).
+  virtual bool      is_predicated() const       { return false; }
+  virtual int       predicates_count() const    { return 0; }
   // is_trap: Does not return to the caller.  (E.g., uncommon trap.)
   virtual bool      is_trap() const             { return false; }
   // does_virtual_dispatch: Should try inlining as normal method first.
@@ -111,7 +110,7 @@
   //
   // If the result is NULL, it means that this CallGenerator was unable
   // to handle the given call, and another CallGenerator should be consulted.
-  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) = 0;
+  virtual JVMState* generate(JVMState* jvms) = 0;
 
   // How to generate a call site that is inlined:
   static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
@@ -157,9 +156,9 @@
   // Registry for intrinsics:
   static CallGenerator* for_intrinsic(ciMethod* m);
   static void register_intrinsic(ciMethod* m, CallGenerator* cg);
-  static CallGenerator* for_predicted_intrinsic(CallGenerator* intrinsic,
-                                                CallGenerator* cg);
-  virtual Node* generate_predicate(JVMState* jvms) { return NULL; };
+  static CallGenerator* for_predicated_intrinsic(CallGenerator* intrinsic,
+                                                 CallGenerator* cg);
+  virtual Node* generate_predicate(JVMState* jvms, int predicate) { return NULL; };
 
   virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
 
--- a/src/share/vm/opto/callnode.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/callnode.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -777,7 +777,7 @@
 }
 
 // Returns the unique CheckCastPP of a call
-// or 'this' if there are several CheckCastPP
+// or 'this' if there are several CheckCastPP or unexpected uses
 // or returns NULL if there is no one.
 Node *CallNode::result_cast() {
   Node *cast = NULL;
@@ -793,6 +793,13 @@
         return this;  // more than 1 CheckCastPP
       }
       cast = use;
+    } else if (!use->is_Initialize() &&
+               !use->is_AddP()) {
+      // Expected uses are restricted to a CheckCastPP, an Initialize
+      // node, and AddP nodes. If we encounter any other use (a Phi
+      // node can be seen in rare cases) return this to prevent
+      // incorrect optimizations.
+      return this;
     }
   }
   return cast;
@@ -1089,6 +1096,7 @@
 #ifndef PRODUCT
 void SafePointNode::dump_spec(outputStream *st) const {
   st->print(" SafePoint ");
+  _replaced_nodes.dump(st);
 }
 #endif
 
--- a/src/share/vm/opto/callnode.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/callnode.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -30,6 +30,7 @@
 #include "opto/multnode.hpp"
 #include "opto/opcodes.hpp"
 #include "opto/phaseX.hpp"
+#include "opto/replacednodes.hpp"
 #include "opto/type.hpp"
 
 // Portions of code courtesy of Clifford Click
@@ -335,6 +336,7 @@
   OopMap*         _oop_map;   // Array of OopMap info (8-bit char) for GC
   JVMState* const _jvms;      // Pointer to list of JVM State objects
   const TypePtr*  _adr_type;  // What type of memory does this node produce?
+  ReplacedNodes   _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map()
 
   // Many calls take *all* of memory as input,
   // but some produce a limited subset of that memory as output.
@@ -426,6 +428,37 @@
   void               set_next_exception(SafePointNode* n);
   bool                   has_exceptions() const { return next_exception() != NULL; }
 
+  // Helper methods to operate on replaced nodes
+  ReplacedNodes replaced_nodes() const {
+    return _replaced_nodes;
+  }
+
+  void set_replaced_nodes(ReplacedNodes replaced_nodes) {
+    _replaced_nodes = replaced_nodes;
+  }
+
+  void clone_replaced_nodes() {
+    _replaced_nodes.clone();
+  }
+  void record_replaced_node(Node* initial, Node* improved) {
+    _replaced_nodes.record(initial, improved);
+  }
+  void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) {
+    _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx);
+  }
+  void delete_replaced_nodes() {
+    _replaced_nodes.reset();
+  }
+  void apply_replaced_nodes() {
+    _replaced_nodes.apply(this);
+  }
+  void merge_replaced_nodes_with(SafePointNode* sfpt) {
+    _replaced_nodes.merge_with(sfpt->_replaced_nodes);
+  }
+  bool has_replaced_nodes() const {
+    return !_replaced_nodes.is_empty();
+  }
+
   // Standard Node stuff
   virtual int            Opcode() const;
   virtual bool           pinned() const { return true; }
--- a/src/share/vm/opto/coalesce.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/coalesce.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -281,9 +281,11 @@
             Block *pred = _phc._cfg.get_block_for_node(b->pred(j));
             Node *copy;
             assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
-            // Rematerialize constants instead of copying them
-            if( m->is_Mach() && m->as_Mach()->is_Con() &&
-                m->as_Mach()->rematerialize() ) {
+            // Rematerialize constants instead of copying them.
+            // We do this only for immediate constants, we avoid constant table loads
+            // because that will unsafely extend the live range of the constant table base.
+            if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() &&
+                m->as_Mach()->rematerialize()) {
               copy = m->clone();
               // Insert the copy in the predecessor basic block
               pred->add_inst(copy);
@@ -317,8 +319,8 @@
             assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
             // At this point it is unsafe to extend live ranges (6550579).
             // Rematerialize only constants as we do for Phi above.
-            if(m->is_Mach() && m->as_Mach()->is_Con() &&
-               m->as_Mach()->rematerialize()) {
+            if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() &&
+                m->as_Mach()->rematerialize()) {
               copy = m->clone();
               // Insert the copy in the basic block, just before us
               b->insert_node(copy, l++);
--- a/src/share/vm/opto/compile.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/compile.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -391,6 +391,11 @@
   uint next = 0;
   while (next < useful.size()) {
     Node *n = useful.at(next++);
+    if (n->is_SafePoint()) {
+      // We're done with a parsing phase. Replaced nodes are not valid
+      // beyond that point.
+      n->as_SafePoint()->delete_replaced_nodes();
+    }
     // Use raw traversal of out edges since this code removes out edges
     int max = n->outcnt();
     for (int j = 0; j < max; ++j) {
@@ -660,6 +665,10 @@
                   _printer(IdealGraphPrinter::printer()),
 #endif
                   _congraph(NULL),
+                  _comp_arena(mtCompiler),
+                  _node_arena(mtCompiler),
+                  _old_arena(mtCompiler),
+                  _Compile_types(mtCompiler),
                   _replay_inline_data(NULL),
                   _late_inlines(comp_arena(), 2, 0, NULL),
                   _string_late_inlines(comp_arena(), 2, 0, NULL),
@@ -670,8 +679,8 @@
                   _inlining_incrementally(false),
                   _print_inlining_list(NULL),
                   _print_inlining_idx(0),
-                  _preserve_jvm_state(0),
-                  _interpreter_frame_size(0) {
+                  _interpreter_frame_size(0),
+                  _max_node_limit(MaxNodeLimit) {
   C = this;
 
   CompileWrapper cw(this);
@@ -782,7 +791,7 @@
       return;
     }
     JVMState* jvms = build_start_state(start(), tf());
-    if ((jvms = cg->generate(jvms, NULL)) == NULL) {
+    if ((jvms = cg->generate(jvms)) == NULL) {
       record_method_not_compilable("method parse failed");
       return;
     }
@@ -968,6 +977,10 @@
     _in_dump_cnt(0),
     _printer(NULL),
 #endif
+    _comp_arena(mtCompiler),
+    _node_arena(mtCompiler),
+    _old_arena(mtCompiler),
+    _Compile_types(mtCompiler),
     _dead_node_list(comp_arena()),
     _dead_node_count(0),
     _congraph(NULL),
@@ -977,9 +990,9 @@
     _inlining_incrementally(false),
     _print_inlining_list(NULL),
     _print_inlining_idx(0),
-    _preserve_jvm_state(0),
     _allowed_reasons(0),
-    _interpreter_frame_size(0) {
+    _interpreter_frame_size(0),
+    _max_node_limit(MaxNodeLimit) {
   C = this;
 
 #ifndef PRODUCT
@@ -1089,6 +1102,7 @@
   set_do_count_invocations(false);
   set_do_method_data_update(false);
   set_rtm_state(NoRTM); // No RTM lock eliding by default
+  method_has_option_value("MaxNodeLimit", _max_node_limit);
 #if INCLUDE_RTM_OPT
   if (UseRTMLocking && has_method() && (method()->method_data_or_null() != NULL)) {
     int rtm_state = method()->method_data()->rtm_state();
@@ -1910,6 +1924,8 @@
     for_igvn()->clear();
     gvn->replace_with(&igvn);
 
+    _late_inlines_pos = _late_inlines.length();
+
     while (_boxing_late_inlines.length() > 0) {
       CallGenerator* cg = _boxing_late_inlines.pop();
       cg->do_late_inline();
@@ -1973,8 +1989,8 @@
     if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
       if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
         // PhaseIdealLoop is expensive so we only try it once we are
-        // out of loop and we only try it again if the previous helped
-        // got the number of nodes down significantly
+        // out of live nodes and we only try it again if the previous
+        // helped got the number of nodes down significantly
         PhaseIdealLoop ideal_loop( igvn, false, true );
         if (failing())  return;
         low_live_nodes = live_nodes();
@@ -2066,6 +2082,10 @@
     // Inline valueOf() methods now.
     inline_boxing_calls(igvn);
 
+    if (AlwaysIncrementalInline) {
+      inline_incrementally(igvn);
+    }
+
     print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
 
     if (failing())  return;
--- a/src/share/vm/opto/compile.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/compile.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -290,6 +290,7 @@
   int                   _freq_inline_size;      // Max hot method inline size for this compilation
   int                   _fixed_slots;           // count of frame slots not allocated by the register
                                                 // allocator i.e. locks, original deopt pc, etc.
+  uintx                 _max_node_limit;        // Max unique node count during a single compilation.
   // For deopt
   int                   _orig_pc_slot;
   int                   _orig_pc_slot_offset_in_bytes;
@@ -429,9 +430,6 @@
   // Remove the speculative part of types and clean up the graph
   void remove_speculative_types(PhaseIterGVN &igvn);
 
-  // Are we within a PreserveJVMState block?
-  int _preserve_jvm_state;
-
   void* _replay_inline_data; // Pointer to data loaded from file
 
  public:
@@ -597,10 +595,17 @@
   void          set_rtm_state(RTMState s)        { _rtm_state = s; }
   bool              use_rtm() const              { return (_rtm_state & NoRTM) == 0; }
   bool          profile_rtm() const              { return _rtm_state == ProfileRTM; }
+  uint              max_node_limit() const       { return (uint)_max_node_limit; }
+  void          set_max_node_limit(uint n)       { _max_node_limit = n; }
+
   // check the CompilerOracle for special behaviours for this compile
   bool          method_has_option(const char * option) {
     return method() != NULL && method()->has_option(option);
   }
+  template<typename T>
+  bool          method_has_option_value(const char * option, T& value) {
+    return method() != NULL && method()->has_option_value(option, value);
+  }
 #ifndef PRODUCT
   bool          trace_opto_output() const       { return _trace_opto_output; }
   bool              parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
@@ -722,7 +727,7 @@
     record_method_not_compilable(reason, true);
   }
   bool check_node_count(uint margin, const char* reason) {
-    if (live_nodes() + margin > (uint)MaxNodeLimit) {
+    if (live_nodes() + margin > max_node_limit()) {
       record_method_not_compilable(reason);
       return true;
     } else {
@@ -1196,21 +1201,6 @@
 
   // Auxiliary method for randomized fuzzing/stressing
   static bool randomized_select(int count);
-
-  // enter a PreserveJVMState block
-  void inc_preserve_jvm_state() {
-    _preserve_jvm_state++;
-  }
-
-  // exit a PreserveJVMState block
-  void dec_preserve_jvm_state() {
-    _preserve_jvm_state--;
-    assert(_preserve_jvm_state >= 0, "_preserve_jvm_state shouldn't be negative");
-  }
-
-  bool has_preserve_jvm_state() const {
-    return _preserve_jvm_state > 0;
-  }
 };
 
 #endif // SHARE_VM_OPTO_COMPILE_HPP
--- a/src/share/vm/opto/connode.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/connode.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -441,6 +441,102 @@
   return this;
 }
 
+uint CastIINode::size_of() const {
+  return sizeof(*this);
+}
+
+uint CastIINode::cmp(const Node &n) const {
+  return TypeNode::cmp(n) && ((CastIINode&)n)._carry_dependency == _carry_dependency;
+}
+
+Node *CastIINode::Identity(PhaseTransform *phase) {
+  if (_carry_dependency) {
+    return this;
+  }
+  return ConstraintCastNode::Identity(phase);
+}
+
+const Type *CastIINode::Value(PhaseTransform *phase) const {
+  const Type *res = ConstraintCastNode::Value(phase);
+
+  // Try to improve the type of the CastII if we recognize a CmpI/If
+  // pattern.
+  if (_carry_dependency) {
+    if (in(0) != NULL && in(0)->in(0) != NULL && in(0)->in(0)->is_If()) {
+      assert(in(0)->is_IfFalse() || in(0)->is_IfTrue(), "should be If proj");
+      Node* proj = in(0);
+      if (proj->in(0)->in(1)->is_Bool()) {
+        Node* b = proj->in(0)->in(1);
+        if (b->in(1)->Opcode() == Op_CmpI) {
+          Node* cmp = b->in(1);
+          if (cmp->in(1) == in(1) && phase->type(cmp->in(2))->isa_int()) {
+            const TypeInt* in2_t = phase->type(cmp->in(2))->is_int();
+            const Type* t = TypeInt::INT;
+            BoolTest test = b->as_Bool()->_test;
+            if (proj->is_IfFalse()) {
+              test = test.negate();
+            }
+            BoolTest::mask m = test._test;
+            jlong lo_long = min_jint;
+            jlong hi_long = max_jint;
+            if (m == BoolTest::le || m == BoolTest::lt) {
+              hi_long = in2_t->_hi;
+              if (m == BoolTest::lt) {
+                hi_long -= 1;
+              }
+            } else if (m == BoolTest::ge || m == BoolTest::gt) {
+              lo_long = in2_t->_lo;
+              if (m == BoolTest::gt) {
+                lo_long += 1;
+              }
+            } else if (m == BoolTest::eq) {
+              lo_long = in2_t->_lo;
+              hi_long = in2_t->_hi;
+            } else if (m == BoolTest::ne) {
+              // can't do any better
+            } else {
+              stringStream ss;
+              test.dump_on(&ss);
+              fatal(err_msg_res("unexpected comparison %s", ss.as_string()));
+            }
+            int lo_int = (int)lo_long;
+            int hi_int = (int)hi_long;
+
+            if (lo_long != (jlong)lo_int) {
+              lo_int = min_jint;
+            }
+            if (hi_long != (jlong)hi_int) {
+              hi_int = max_jint;
+            }
+
+            t = TypeInt::make(lo_int, hi_int, Type::WidenMax);
+
+            res = res->filter_speculative(t);
+
+            return res;
+          }
+        }
+      }
+    }
+  }
+  return res;
+}
+
+Node *CastIINode::Ideal_DU_postCCP(PhaseCCP *ccp) {
+  if (_carry_dependency) {
+    return NULL;
+  }
+  return ConstraintCastNode::Ideal_DU_postCCP(ccp);
+}
+
+#ifndef PRODUCT
+void CastIINode::dump_spec(outputStream *st) const {
+  TypeNode::dump_spec(st);
+  if (_carry_dependency) {
+    st->print(" carry dependency");
+  }
+}
+#endif
 
 //=============================================================================
 
--- a/src/share/vm/opto/connode.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/connode.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -241,10 +241,25 @@
 //------------------------------CastIINode-------------------------------------
 // cast integer to integer (different range)
 class CastIINode: public ConstraintCastNode {
+  private:
+  // Can this node be removed post CCP or does it carry a required dependency?
+  const bool _carry_dependency;
+
+  protected:
+  virtual uint cmp( const Node &n ) const;
+  virtual uint size_of() const;
+
 public:
-  CastIINode (Node *n, const Type *t ): ConstraintCastNode(n,t) {}
+  CastIINode(Node *n, const Type *t, bool carry_dependency = false)
+    : ConstraintCastNode(n,t), _carry_dependency(carry_dependency) {}
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegI; }
+  virtual Node *Identity( PhaseTransform *phase );
+  virtual const Type *Value( PhaseTransform *phase ) const;
+  virtual Node *Ideal_DU_postCCP( PhaseCCP * );
+#ifndef PRODUCT
+  virtual void dump_spec(outputStream *st) const;
+#endif
 };
 
 //------------------------------CastPPNode-------------------------------------
--- a/src/share/vm/opto/doCall.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/doCall.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -115,12 +115,12 @@
   if (allow_inline && allow_intrinsics) {
     CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
     if (cg != NULL) {
-      if (cg->is_predicted()) {
+      if (cg->is_predicated()) {
         // Code without intrinsic but, hopefully, inlined.
         CallGenerator* inline_cg = this->call_generator(callee,
               vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false);
         if (inline_cg != NULL) {
-          cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg);
+          cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg);
         }
       }
 
@@ -410,6 +410,11 @@
   ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
   assert(declared_signature != NULL, "cannot be null");
 
+  // Bump max node limit for JSR292 users
+  if (bc() == Bytecodes::_invokedynamic || orig_callee->is_method_handle_intrinsic()) {
+    C->set_max_node_limit(3*MaxNodeLimit);
+  }
+
   // uncommon-trap when callee is unloaded, uninitialized or will not link
   // bailout when too many arguments for register representation
   if (!will_link || can_not_compile_call_site(orig_callee, klass)) {
@@ -523,7 +528,7 @@
   // because exceptions don't return to the call site.)
   profile_call(receiver);
 
-  JVMState* new_jvms = cg->generate(jvms, this);
+  JVMState* new_jvms = cg->generate(jvms);
   if (new_jvms == NULL) {
     // When inlining attempt fails (e.g., too many arguments),
     // it may contaminate the current compile state, making it
@@ -537,7 +542,7 @@
     // intrinsic was expecting to optimize. Should always be possible to
     // get a normal java call that may inline in that case
     cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
-    if ((new_jvms = cg->generate(jvms, this)) == NULL) {
+    if ((new_jvms = cg->generate(jvms)) == NULL) {
       guarantee(failing(), "call failed to generate:  calls should work");
       return;
     }
@@ -791,7 +796,7 @@
   Node* ex_klass_node = NULL;
   if (has_ex_handler() && !ex_type->klass_is_exact()) {
     Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
-    ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
+    ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
 
     // Compute the exception klass a little more cleverly.
     // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
@@ -799,11 +804,17 @@
     // each arm of the Phi.  If I know something clever about the exceptions
     // I'm loading the class from, I can replace the LoadKlass with the
     // klass constant for the exception oop.
-    if( ex_node->is_Phi() ) {
-      ex_klass_node = new (C) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
-      for( uint i = 1; i < ex_node->req(); i++ ) {
-        Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
-        Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
+    if (ex_node->is_Phi()) {
+      ex_klass_node = new (C) PhiNode(ex_node->in(0), TypeKlassPtr::OBJECT);
+      for (uint i = 1; i < ex_node->req(); i++) {
+        Node* ex_in = ex_node->in(i);
+        if (ex_in == top() || ex_in == NULL) {
+          // This path was not taken.
+          ex_klass_node->init_req(i, top());
+          continue;
+        }
+        Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
+        Node* k = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
         ex_klass_node->init_req( i, k );
       }
       _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
--- a/src/share/vm/opto/escape.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/escape.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,8 @@
 
 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
   _nodes(C->comp_arena(), C->unique(), C->unique(), NULL),
+  _in_worklist(C->comp_arena()),
+  _next_pidx(0),
   _collecting(true),
   _verify(false),
   _compile(C),
@@ -124,13 +126,19 @@
   if (C->root() != NULL) {
     ideal_nodes.push(C->root());
   }
+  // Processed ideal nodes are unique on ideal_nodes list
+  // but several ideal nodes are mapped to the phantom_obj.
+  // To avoid duplicated entries on the following worklists
+  // add the phantom_obj only once to them.
+  ptnodes_worklist.append(phantom_obj);
+  java_objects_worklist.append(phantom_obj);
   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
     Node* n = ideal_nodes.at(next);
     // Create PointsTo nodes and add them to Connection Graph. Called
     // only once per ideal node since ideal_nodes is Unique_Node list.
     add_node_to_connection_graph(n, &delayed_worklist);
     PointsToNode* ptn = ptnode_adr(n->_idx);
-    if (ptn != NULL) {
+    if (ptn != NULL && ptn != phantom_obj) {
       ptnodes_worklist.append(ptn);
       if (ptn->is_JavaObject()) {
         java_objects_worklist.append(ptn->as_JavaObject());
@@ -414,7 +422,7 @@
     }
     case Op_CreateEx: {
       // assume that all exception objects globally escape
-      add_java_object(n, PointsToNode::GlobalEscape);
+      map_ideal_node(n, phantom_obj);
       break;
     }
     case Op_LoadKlass:
@@ -938,7 +946,14 @@
                   strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
                   strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
-                  strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0)
+                  strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
+                  strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
+                  strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
+                  strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
+                  strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
+                  strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
+                  strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
+                  strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0)
                   ))) {
             call->dump();
             fatal(err_msg_res("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name));
@@ -1058,13 +1073,8 @@
   // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
   // Set limit to 20 to catch situation when something did go wrong and
   // bailout Escape Analysis.
-  // Also limit build time to 30 sec (60 in debug VM).
+  // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
 #define CG_BUILD_ITER_LIMIT 20
-#ifdef ASSERT
-#define CG_BUILD_TIME_LIMIT 60.0
-#else
-#define CG_BUILD_TIME_LIMIT 30.0
-#endif
 
   // Propagate GlobalEscape and ArgEscape escape states and check that
   // we still have non-escaping objects. The method pushs on _worklist
@@ -1075,12 +1085,13 @@
   // Now propagate references to all JavaObject nodes.
   int java_objects_length = java_objects_worklist.length();
   elapsedTimer time;
+  bool timeout = false;
   int new_edges = 1;
   int iterations = 0;
   do {
     while ((new_edges > 0) &&
-          (iterations++   < CG_BUILD_ITER_LIMIT) &&
-          (time.seconds() < CG_BUILD_TIME_LIMIT)) {
+           (iterations++ < CG_BUILD_ITER_LIMIT)) {
+      double start_time = time.seconds();
       time.start();
       new_edges = 0;
       // Propagate references to phantom_object for nodes pushed on _worklist
@@ -1089,7 +1100,29 @@
       for (int next = 0; next < java_objects_length; ++next) {
         JavaObjectNode* ptn = java_objects_worklist.at(next);
         new_edges += add_java_object_edges(ptn, true);
+
+#define SAMPLE_SIZE 4
+        if ((next % SAMPLE_SIZE) == 0) {
+          // Each 4 iterations calculate how much time it will take
+          // to complete graph construction.
+          time.stop();
+          // Poll for requests from shutdown mechanism to quiesce compiler
+          // because Connection graph construction may take long time.
+          CompileBroker::maybe_block();
+          double stop_time = time.seconds();
+          double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
+          double time_until_end = time_per_iter * (double)(java_objects_length - next);
+          if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
+            timeout = true;
+            break; // Timeout
+          }
+          start_time = stop_time;
+          time.start();
+        }
+#undef SAMPLE_SIZE
+
       }
+      if (timeout) break;
       if (new_edges > 0) {
         // Update escape states on each iteration if graph was updated.
         if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) {
@@ -1097,9 +1130,12 @@
         }
       }
       time.stop();
+      if (time.seconds() >= EscapeAnalysisTimeout) {
+        timeout = true;
+        break;
+      }
     }
-    if ((iterations     < CG_BUILD_ITER_LIMIT) &&
-        (time.seconds() < CG_BUILD_TIME_LIMIT)) {
+    if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) {
       time.start();
       // Find fields which have unknown value.
       int fields_length = oop_fields_worklist.length();
@@ -1112,18 +1148,21 @@
         }
       }
       time.stop();
+      if (time.seconds() >= EscapeAnalysisTimeout) {
+        timeout = true;
+        break;
+      }
     } else {
       new_edges = 0; // Bailout
     }
   } while (new_edges > 0);
 
   // Bailout if passed limits.
-  if ((iterations     >= CG_BUILD_ITER_LIMIT) ||
-      (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
+  if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) {
     Compile* C = _compile;
     if (C->log() != NULL) {
       C->log()->begin_elem("connectionGraph_bailout reason='reached ");
-      C->log()->text("%s", (iterations >= CG_BUILD_ITER_LIMIT) ? "iterations" : "time");
+      C->log()->text("%s", timeout ? "time" : "iterations");
       C->log()->end_elem(" limit'");
     }
     assert(ExitEscapeAnalysisOnTimeout, err_msg_res("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
@@ -1140,7 +1179,6 @@
 #endif
 
 #undef CG_BUILD_ITER_LIMIT
-#undef CG_BUILD_TIME_LIMIT
 
   // Find fields initialized by NULL for non-escaping Allocations.
   int non_escaped_length = non_escaped_worklist.length();
@@ -1264,8 +1302,8 @@
       }
     }
   }
-  while(_worklist.length() > 0) {
-    PointsToNode* use = _worklist.pop();
+  for (int l = 0; l < _worklist.length(); l++) {
+    PointsToNode* use = _worklist.at(l);
     if (PointsToNode::is_base_use(use)) {
       // Add reference from jobj to field and from field to jobj (field's base).
       use = PointsToNode::get_use_node(use)->as_Field();
@@ -1312,6 +1350,8 @@
       add_field_uses_to_worklist(use->as_Field());
     }
   }
+  _worklist.clear();
+  _in_worklist.Reset();
   return new_edges;
 }
 
@@ -1891,7 +1931,7 @@
     return;
   }
   Compile* C = _compile;
-  ptadr = new (C->comp_arena()) LocalVarNode(C, n, es);
+  ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
   _nodes.at_put(n->_idx, ptadr);
 }
 
@@ -1902,7 +1942,7 @@
     return;
   }
   Compile* C = _compile;
-  ptadr = new (C->comp_arena()) JavaObjectNode(C, n, es);
+  ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
   _nodes.at_put(n->_idx, ptadr);
 }
 
@@ -1918,7 +1958,7 @@
     es = PointsToNode::GlobalEscape;
   }
   Compile* C = _compile;
-  FieldNode* field = new (C->comp_arena()) FieldNode(C, n, es, offset, is_oop);
+  FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
   _nodes.at_put(n->_idx, field);
 }
 
@@ -1932,7 +1972,7 @@
     return;
   }
   Compile* C = _compile;
-  ptadr = new (C->comp_arena()) ArraycopyNode(C, n, es);
+  ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
   _nodes.at_put(n->_idx, ptadr);
   // Add edge from arraycopy node to source object.
   (void)add_edge(ptadr, src);
@@ -2372,7 +2412,7 @@
       }
     }
   }
-  if ((int) (C->live_nodes() + 2*NodeLimitFudgeFactor) > MaxNodeLimit) {
+  if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {
     if (C->do_escape_analysis() == true && !C->failing()) {
       // Retry compilation without escape analysis.
       // If this is the first failure, the sentinel string will "stick"
@@ -2832,6 +2872,13 @@
           continue;
         }
       }
+
+      const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
+      if (t == NULL)
+        continue;  // not a TypeOopPtr
+      if (!t->klass_is_exact())
+        continue; // not an unique type
+
       if (alloc->is_Allocate()) {
         // Set the scalar_replaceable flag for allocation
         // so it could be eliminated.
@@ -2850,10 +2897,7 @@
       //   - not determined to be ineligible by escape analysis
       set_map(alloc, n);
       set_map(n, alloc);
-      const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
-      if (t == NULL)
-        continue;  // not a TypeOopPtr
-      const TypeOopPtr* tinst = t->cast_to_exactness(true)->is_oopptr()->cast_to_instance_id(ni);
+      const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
       igvn->hash_delete(n);
       igvn->set_type(n,  tinst);
       n->raise_bottom_type(tinst);
--- a/src/share/vm/opto/escape.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/escape.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -125,6 +125,8 @@
 class FieldNode;
 class ArraycopyNode;
 
+class ConnectionGraph;
+
 // ConnectionGraph nodes
 class PointsToNode : public ResourceObj {
   GrowableArray<PointsToNode*> _edges; // List of nodes this node points to
@@ -137,6 +139,7 @@
 
   Node* const        _node;  // Ideal node corresponding to this PointsTo node.
   const int           _idx;  // Cached ideal node's _idx
+  const uint         _pidx;  // Index of this node
 
 public:
   typedef enum {
@@ -165,17 +168,9 @@
   } NodeFlags;
 
 
-  PointsToNode(Compile *C, Node* n, EscapeState es, NodeType type):
-    _edges(C->comp_arena(), 2, 0, NULL),
-    _uses (C->comp_arena(), 2, 0, NULL),
-    _node(n),
-    _idx(n->_idx),
-    _type((u1)type),
-    _escape((u1)es),
-    _fields_escape((u1)es),
-    _flags(ScalarReplaceable) {
-    assert(n != NULL && es != UnknownEscape, "sanity");
-  }
+  inline PointsToNode(ConnectionGraph* CG, Node* n, EscapeState es, NodeType type);
+
+  uint        pidx()   const { return _pidx; }
 
   Node* ideal_node()   const { return _node; }
   int          idx()   const { return _idx; }
@@ -243,14 +238,14 @@
 
 class LocalVarNode: public PointsToNode {
 public:
-  LocalVarNode(Compile *C, Node* n, EscapeState es):
-    PointsToNode(C, n, es, LocalVar) {}
+  LocalVarNode(ConnectionGraph *CG, Node* n, EscapeState es):
+    PointsToNode(CG, n, es, LocalVar) {}
 };
 
 class JavaObjectNode: public PointsToNode {
 public:
-  JavaObjectNode(Compile *C, Node* n, EscapeState es):
-    PointsToNode(C, n, es, JavaObject) {
+  JavaObjectNode(ConnectionGraph *CG, Node* n, EscapeState es):
+    PointsToNode(CG, n, es, JavaObject) {
       if (es > NoEscape)
         set_scalar_replaceable(false);
     }
@@ -262,8 +257,8 @@
   const bool  _is_oop; // Field points to object
         bool  _has_unknown_base; // Has phantom_object base
 public:
-  FieldNode(Compile *C, Node* n, EscapeState es, int offs, bool is_oop):
-    PointsToNode(C, n, es, Field),
+  FieldNode(ConnectionGraph *CG, Node* n, EscapeState es, int offs, bool is_oop):
+    PointsToNode(CG, n, es, Field),
     _offset(offs), _is_oop(is_oop),
     _has_unknown_base(false) {}
 
@@ -284,8 +279,8 @@
 
 class ArraycopyNode: public PointsToNode {
 public:
-  ArraycopyNode(Compile *C, Node* n, EscapeState es):
-    PointsToNode(C, n, es, Arraycopy) {}
+  ArraycopyNode(ConnectionGraph *CG, Node* n, EscapeState es):
+    PointsToNode(CG, n, es, Arraycopy) {}
 };
 
 // Iterators for PointsTo node's edges:
@@ -323,11 +318,14 @@
 
 
 class ConnectionGraph: public ResourceObj {
+  friend class PointsToNode;
 private:
   GrowableArray<PointsToNode*>  _nodes; // Map from ideal nodes to
                                         // ConnectionGraph nodes.
 
   GrowableArray<PointsToNode*>  _worklist; // Nodes to be processed
+  VectorSet                  _in_worklist;
+  uint                         _next_pidx;
 
   bool            _collecting; // Indicates whether escape information
                                // is still being collected. If false,
@@ -353,6 +351,8 @@
   }
   uint nodes_size() const { return _nodes.length(); }
 
+  uint next_pidx() { return _next_pidx++; }
+
   // Add nodes to ConnectionGraph.
   void add_local_var(Node* n, PointsToNode::EscapeState es);
   void add_java_object(Node* n, PointsToNode::EscapeState es);
@@ -396,15 +396,26 @@
   int add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist);
 
   // Put node on worklist if it is (or was) not there.
-  void add_to_worklist(PointsToNode* pt) {
-    _worklist.push(pt);
-    return;
+  inline void add_to_worklist(PointsToNode* pt) {
+    PointsToNode* ptf = pt;
+    uint pidx_bias = 0;
+    if (PointsToNode::is_base_use(pt)) {
+      // Create a separate entry in _in_worklist for a marked base edge
+      // because _worklist may have an entry for a normal edge pointing
+      // to the same node. To separate them use _next_pidx as bias.
+      ptf = PointsToNode::get_use_node(pt)->as_Field();
+      pidx_bias = _next_pidx;
+    }
+    if (!_in_worklist.test_set(ptf->pidx() + pidx_bias)) {
+      _worklist.append(pt);
+    }
   }
 
   // Put on worklist all uses of this node.
-  void add_uses_to_worklist(PointsToNode* pt) {
-    for (UseIterator i(pt); i.has_next(); i.next())
-      _worklist.push(i.get());
+  inline void add_uses_to_worklist(PointsToNode* pt) {
+    for (UseIterator i(pt); i.has_next(); i.next()) {
+      add_to_worklist(i.get());
+    }
   }
 
   // Put on worklist all field's uses and related field nodes.
@@ -517,8 +528,8 @@
  }
   // Helper functions
   bool   is_oop_field(Node* n, int offset, bool* unsafe);
- static Node* get_addp_base(Node *addp);
- static Node* find_second_addp(Node* addp, Node* n);
+  static Node* get_addp_base(Node *addp);
+  static Node* find_second_addp(Node* addp, Node* n);
   // offset of a field reference
   int address_offset(Node* adr, PhaseTransform *phase);
 
@@ -587,4 +598,17 @@
 #endif
 };
 
+inline PointsToNode::PointsToNode(ConnectionGraph *CG, Node* n, EscapeState es, NodeType type):
+  _edges(CG->_compile->comp_arena(), 2, 0, NULL),
+  _uses (CG->_compile->comp_arena(), 2, 0, NULL),
+  _node(n),
+  _idx(n->_idx),
+  _pidx(CG->next_pidx()),
+  _type((u1)type),
+  _escape((u1)es),
+  _fields_escape((u1)es),
+  _flags(ScalarReplaceable) {
+  assert(n != NULL && es != UnknownEscape, "sanity");
+}
+
 #endif // SHARE_VM_OPTO_ESCAPE_HPP
--- a/src/share/vm/opto/graphKit.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/graphKit.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -428,6 +428,7 @@
       }
     }
   }
+  phi_map->merge_replaced_nodes_with(ex_map);
 }
 
 //--------------------------use_exception_state--------------------------------
@@ -641,7 +642,6 @@
   _map    = kit->map();   // preserve the map
   _sp     = kit->sp();
   kit->set_map(clone_map ? kit->clone_map() : NULL);
-  Compile::current()->inc_preserve_jvm_state();
 #ifdef ASSERT
   _bci    = kit->bci();
   Parse* parser = kit->is_Parse();
@@ -659,7 +659,6 @@
 #endif
   kit->set_map(_map);
   kit->set_sp(_sp);
-  Compile::current()->dec_preserve_jvm_state();
 }
 
 
@@ -1151,7 +1150,7 @@
   Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
   if (akls != NULL)  return akls;
   Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
-  return _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS) );
+  return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
 }
 
 //-------------------------load_array_length-----------------------------------
@@ -1398,60 +1397,17 @@
   // on the map.  This includes locals, stack, and monitors
   // of the current (innermost) JVM state.
 
-  if (!ReplaceInParentMaps) {
-    return;
-  }
-
-  // PreserveJVMState doesn't do a deep copy so we can't modify
-  // parents
-  if (Compile::current()->has_preserve_jvm_state()) {
+  // don't let inconsistent types from profiling escape this
+  // method
+
+  const Type* told = _gvn.type(old);
+  const Type* tnew = _gvn.type(neww);
+
+  if (!tnew->higher_equal(told)) {
     return;
   }
 
-  Parse* parser = is_Parse();
-  bool progress = true;
-  Node* ctrl = map()->in(0);
-  // Follow the chain of parsers and see whether the update can be
-  // done in the map of callers. We can do the replace for a caller if
-  // the current control post dominates the control of a caller.
-  while (parser != NULL && parser->caller() != NULL && progress) {
-    progress = false;
-    Node* parent_map = parser->caller()->map();
-    assert(parser->exits().map()->jvms()->depth() == parser->caller()->depth(), "map mismatch");
-
-    Node* parent_ctrl = parent_map->in(0);
-
-    while (parent_ctrl->is_Region()) {
-      Node* n = parent_ctrl->as_Region()->is_copy();
-      if (n == NULL) {
-        break;
-      }
-      parent_ctrl = n;
-    }
-
-    for (;;) {
-      if (ctrl == parent_ctrl) {
-        // update the map of the exits which is the one that will be
-        // used when compilation resume after inlining
-        parser->exits().map()->replace_edge(old, neww);
-        progress = true;
-        break;
-      }
-      if (ctrl->is_Proj() && ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
-        ctrl = ctrl->in(0)->in(0);
-      } else if (ctrl->is_Region()) {
-        Node* n = ctrl->as_Region()->is_copy();
-        if (n == NULL) {
-          break;
-        }
-        ctrl = n;
-      } else {
-        break;
-      }
-    }
-
-    parser = parser->parent_parser();
-  }
+  map()->record_replaced_node(old, neww);
 }
 
 
@@ -1855,12 +1811,16 @@
 
 
 // Replace the call with the current state of the kit.
-void GraphKit::replace_call(CallNode* call, Node* result) {
+void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) {
   JVMState* ejvms = NULL;
   if (has_exceptions()) {
     ejvms = transfer_exceptions_into_jvms();
   }
 
+  ReplacedNodes replaced_nodes = map()->replaced_nodes();
+  ReplacedNodes replaced_nodes_exception;
+  Node* ex_ctl = top();
+
   SafePointNode* final_state = stop();
 
   // Find all the needed outputs of this call
@@ -1877,6 +1837,10 @@
     C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
   }
   if (callprojs.fallthrough_memproj != NULL) {
+    if (final_mem->is_MergeMem()) {
+      // Parser's exits MergeMem was not transformed but may be optimized
+      final_mem = _gvn.transform(final_mem);
+    }
     C->gvn_replace_by(callprojs.fallthrough_memproj,   final_mem);
   }
   if (callprojs.fallthrough_ioproj != NULL) {
@@ -1908,10 +1872,13 @@
 
     // Load my combined exception state into the kit, with all phis transformed:
     SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
+    replaced_nodes_exception = ex_map->replaced_nodes();
 
     Node* ex_oop = ekit.use_exception_state(ex_map);
+
     if (callprojs.catchall_catchproj != NULL) {
       C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
+      ex_ctl = ekit.control();
     }
     if (callprojs.catchall_memproj != NULL) {
       C->gvn_replace_by(callprojs.catchall_memproj,   ekit.reset_memory());
@@ -1944,6 +1911,13 @@
       _gvn.transform(wl.pop());
     }
   }
+
+  if (callprojs.fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) {
+    replaced_nodes.apply(C, final_ctl);
+  }
+  if (!ex_ctl->is_top() && do_replaced_nodes) {
+    replaced_nodes_exception.apply(C, ex_ctl);
+  }
 }
 
 
@@ -2435,23 +2409,24 @@
     Node* new_slice = mms.memory2();
     if (old_slice != new_slice) {
       PhiNode* phi;
-      if (new_slice->is_Phi() && new_slice->as_Phi()->region() == region) {
-        phi = new_slice->as_Phi();
-        #ifdef ASSERT
-        if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region)
-          old_slice = old_slice->in(new_path);
-        // Caller is responsible for ensuring that any pre-existing
-        // phis are already aware of old memory.
-        int old_path = (new_path > 1) ? 1 : 2;  // choose old_path != new_path
-        assert(phi->in(old_path) == old_slice, "pre-existing phis OK");
-        #endif
-        mms.set_memory(phi);
+      if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
+        if (mms.is_empty()) {
+          // clone base memory Phi's inputs for this memory slice
+          assert(old_slice == mms.base_memory(), "sanity");
+          phi = PhiNode::make(region, NULL, Type::MEMORY, mms.adr_type(C));
+          _gvn.set_type(phi, Type::MEMORY);
+          for (uint i = 1; i < phi->req(); i++) {
+            phi->init_req(i, old_slice->in(i));
+          }
+        } else {
+          phi = old_slice->as_Phi(); // Phi was generated already
+        }
       } else {
         phi = PhiNode::make(region, old_slice, Type::MEMORY, mms.adr_type(C));
         _gvn.set_type(phi, Type::MEMORY);
-        phi->set_req(new_path, new_slice);
-        mms.set_memory(_gvn.transform(phi));  // assume it is complete
       }
+      phi->set_req(new_path, new_slice);
+      mms.set_memory(phi);
     }
   }
 }
@@ -2567,7 +2542,7 @@
   // cache which is mutable so can't use immutable memory.  Other
   // types load from the super-class display table which is immutable.
   Node *kmem = might_be_cache ? memory(p2) : immutable_memory();
-  Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) );
+  Node* nkls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL));
 
   // Compile speed common case: ARE a subtype and we canNOT fail
   if( superklass == nkls )
--- a/src/share/vm/opto/graphKit.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/graphKit.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -685,7 +685,7 @@
   // Replace the call with the current state of the kit.  Requires
   // that the call was generated with separate io_projs so that
   // exceptional control flow can be handled properly.
-  void replace_call(CallNode* call, Node* result);
+  void replace_call(CallNode* call, Node* result, bool do_replaced_nodes = false);
 
   // helper functions for statistics
   void increment_counter(address counter_addr);   // increment a debug counter
--- a/src/share/vm/opto/ifg.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/ifg.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -541,17 +541,37 @@
           if( !n->is_Proj() ||
               // Could also be a flags-projection of a dead ADD or such.
               (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
-            block->remove_node(j - 1);
-            if (lrgs(r)._def == n) {
-              lrgs(r)._def = 0;
+            bool remove = true;
+            if (n->is_MachProj()) {
+              // Don't remove KILL projections if their "defining" nodes have
+              // memory effects (have SCMemProj projection node) -
+              // they are not dead even when their result is not used.
+              // For example, compareAndSwapL (and other CAS) and EncodeISOArray nodes.
+              // The method add_input_to_liveout() keeps such nodes alive (put them on liveout list)
+              // when it sees SCMemProj node in a block. Unfortunately SCMemProj node could be placed
+              // in block in such order that KILL MachProj nodes are processed first.
+              uint cnt = def->outcnt();
+              for (uint i = 0; i < cnt; i++) {
+                Node* proj = def->raw_out(i);
+                if (proj->Opcode() == Op_SCMemProj) {
+                  remove = false;
+                  break;
+                }
+              }
             }
-            n->disconnect_inputs(NULL, C);
-            _cfg.unmap_node_from_block(n);
-            n->replace_by(C->top());
-            // Since yanking a Node from block, high pressure moves up one
-            hrp_index[0]--;
-            hrp_index[1]--;
-            continue;
+            if (remove) {
+              block->remove_node(j - 1);
+              if (lrgs(r)._def == n) {
+                lrgs(r)._def = 0;
+              }
+              n->disconnect_inputs(NULL, C);
+              _cfg.unmap_node_from_block(n);
+              n->replace_by(C->top());
+              // Since yanking a Node from block, high pressure moves up one
+              hrp_index[0]--;
+              hrp_index[1]--;
+              continue;
+            }
           }
 
           // Fat-projections kill many registers which cannot be used to
--- a/src/share/vm/opto/ifnode.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/ifnode.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -503,7 +503,7 @@
   jint  off = 0;
   if (l->is_top()) {
     return 0;
-  } else if (l->is_Add()) {
+  } else if (l->Opcode() == Op_AddI) {
     if ((off = l->in(1)->find_int_con(0)) != 0) {
       ind = l->in(2);
     } else if ((off = l->in(2)->find_int_con(0)) != 0) {
@@ -820,6 +820,11 @@
 
 static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff);
 
+struct RangeCheck {
+  Node* ctl;
+  jint off;
+};
+
 //------------------------------Ideal------------------------------------------
 // Return a node which is more "ideal" than the current node.  Strip out
 // control copies
@@ -861,83 +866,141 @@
   jint offset1;
   int flip1 = is_range_check(range1, index1, offset1);
   if( flip1 ) {
-    Node *first_prev_dom = NULL;
-
     // Try to remove extra range checks.  All 'up_one_dom' gives up at merges
     // so all checks we inspect post-dominate the top-most check we find.
     // If we are going to fail the current check and we reach the top check
     // then we are guaranteed to fail, so just start interpreting there.
-    // We 'expand' the top 2 range checks to include all post-dominating
+    // We 'expand' the top 3 range checks to include all post-dominating
     // checks.
 
-    // The top 2 range checks seen
-    Node *prev_chk1 = NULL;
-    Node *prev_chk2 = NULL;
+    // The top 3 range checks seen
+    const int NRC =3;
+    RangeCheck prev_checks[NRC];
+    int nb_checks = 0;
+
     // Low and high offsets seen so far
     jint off_lo = offset1;
     jint off_hi = offset1;
 
-    // Scan for the top 2 checks and collect range of offsets
-    for( int dist = 0; dist < 999; dist++ ) { // Range-Check scan limit
-      if( dom->Opcode() == Op_If &&  // Not same opcode?
-          prev_dom->in(0) == dom ) { // One path of test does dominate?
-        if( dom == this ) return NULL; // dead loop
+    bool found_immediate_dominator = false;
+
+    // Scan for the top checks and collect range of offsets
+    for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit
+      if (dom->Opcode() == Op_If &&  // Not same opcode?
+          prev_dom->in(0) == dom) { // One path of test does dominate?
+        if (dom == this) return NULL; // dead loop
         // See if this is a range check
         Node *index2, *range2;
         jint offset2;
         int flip2 = dom->as_If()->is_range_check(range2, index2, offset2);
         // See if this is a _matching_ range check, checking against
         // the same array bounds.
-        if( flip2 == flip1 && range2 == range1 && index2 == index1 &&
-            dom->outcnt() == 2 ) {
+        if (flip2 == flip1 && range2 == range1 && index2 == index1 &&
+            dom->outcnt() == 2) {
+          if (nb_checks == 0 && dom->in(1) == in(1)) {
+            // Found an immediately dominating test at the same offset.
+            // This kind of back-to-back test can be eliminated locally,
+            // and there is no need to search further for dominating tests.
+            assert(offset2 == offset1, "Same test but different offsets");
+            found_immediate_dominator = true;
+            break;
+          }
           // Gather expanded bounds
           off_lo = MIN2(off_lo,offset2);
           off_hi = MAX2(off_hi,offset2);
-          // Record top 2 range checks
-          prev_chk2 = prev_chk1;
-          prev_chk1 = prev_dom;
-          // If we match the test exactly, then the top test covers
-          // both our lower and upper bounds.
-          if( dom->in(1) == in(1) )
-            prev_chk2 = prev_chk1;
+          // Record top NRC range checks
+          prev_checks[nb_checks%NRC].ctl = prev_dom;
+          prev_checks[nb_checks%NRC].off = offset2;
+          nb_checks++;
         }
       }
       prev_dom = dom;
-      dom = up_one_dom( dom );
-      if( !dom ) break;
+      dom = up_one_dom(dom);
+      if (!dom) break;
     }
 
-
-    // Attempt to widen the dominating range check to cover some later
-    // ones.  Since range checks "fail" by uncommon-trapping to the
-    // interpreter, widening a check can make us speculative enter the
-    // interpreter.  If we see range-check deopt's, do not widen!
-    if (!phase->C->allow_range_check_smearing())  return NULL;
+    if (!found_immediate_dominator) {
+      // Attempt to widen the dominating range check to cover some later
+      // ones.  Since range checks "fail" by uncommon-trapping to the
+      // interpreter, widening a check can make us speculatively enter
+      // the interpreter.  If we see range-check deopt's, do not widen!
+      if (!phase->C->allow_range_check_smearing())  return NULL;
 
-    // Constant indices only need to check the upper bound.
-    // Non-constance indices must check both low and high.
-    if( index1 ) {
-      // Didn't find 2 prior covering checks, so cannot remove anything.
-      if( !prev_chk2 ) return NULL;
-      // 'Widen' the offsets of the 1st and 2nd covering check
-      adjust_check( prev_chk1, range1, index1, flip1, off_lo, igvn );
-      // Do not call adjust_check twice on the same projection
-      // as the first call may have transformed the BoolNode to a ConI
-      if( prev_chk1 != prev_chk2 ) {
-        adjust_check( prev_chk2, range1, index1, flip1, off_hi, igvn );
+      // Didn't find prior covering check, so cannot remove anything.
+      if (nb_checks == 0) {
+        return NULL;
       }
-      // Test is now covered by prior checks, dominate it out
-      prev_dom = prev_chk2;
-    } else {
-      // Didn't find prior covering check, so cannot remove anything.
-      if( !prev_chk1 ) return NULL;
-      // 'Widen' the offset of the 1st and only covering check
-      adjust_check( prev_chk1, range1, index1, flip1, off_hi, igvn );
-      // Test is now covered by prior checks, dominate it out
-      prev_dom = prev_chk1;
+      // Constant indices only need to check the upper bound.
+      // Non-constant indices must check both low and high.
+      int chk0 = (nb_checks - 1) % NRC;
+      if (index1) {
+        if (nb_checks == 1) {
+          return NULL;
+        } else {
+          // If the top range check's constant is the min or max of
+          // all constants we widen the next one to cover the whole
+          // range of constants.
+          RangeCheck rc0 = prev_checks[chk0];
+          int chk1 = (nb_checks - 2) % NRC;
+          RangeCheck rc1 = prev_checks[chk1];
+          if (rc0.off == off_lo) {
+            adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
+            prev_dom = rc1.ctl;
+          } else if (rc0.off == off_hi) {
+            adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
+            prev_dom = rc1.ctl;
+          } else {
+            // If the top test's constant is not the min or max of all
+            // constants, we need 3 range checks. We must leave the
+            // top test unchanged because widening it would allow the
+            // accesses it protects to successfully read/write out of
+            // bounds.
+            if (nb_checks == 2) {
+              return NULL;
+            }
+            int chk2 = (nb_checks - 3) % NRC;
+            RangeCheck rc2 = prev_checks[chk2];
+            // The top range check a+i covers interval: -a <= i < length-a
+            // The second range check b+i covers interval: -b <= i < length-b
+            if (rc1.off <= rc0.off) {
+              // if b <= a, we change the second range check to:
+              // -min_of_all_constants <= i < length-min_of_all_constants
+              // Together top and second range checks now cover:
+              // -min_of_all_constants <= i < length-a
+              // which is more restrictive than -b <= i < length-b:
+              // -b <= -min_of_all_constants <= i < length-a <= length-b
+              // The third check is then changed to:
+              // -max_of_all_constants <= i < length-max_of_all_constants
+              // so 2nd and 3rd checks restrict allowed values of i to:
+              // -min_of_all_constants <= i < length-max_of_all_constants
+              adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
+              adjust_check(rc2.ctl, range1, index1, flip1, off_hi, igvn);
+            } else {
+              // if b > a, we change the second range check to:
+              // -max_of_all_constants <= i < length-max_of_all_constants
+              // Together top and second range checks now cover:
+              // -a <= i < length-max_of_all_constants
+              // which is more restrictive than -b <= i < length-b:
+              // -b < -a <= i < length-max_of_all_constants <= length-b
+              // The third check is then changed to:
+              // -max_of_all_constants <= i < length-max_of_all_constants
+              // so 2nd and 3rd checks restrict allowed values of i to:
+              // -min_of_all_constants <= i < length-max_of_all_constants
+              adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
+              adjust_check(rc2.ctl, range1, index1, flip1, off_lo, igvn);
+            }
+            prev_dom = rc2.ctl;
+          }
+        }
+      } else {
+        RangeCheck rc0 = prev_checks[chk0];
+        // 'Widen' the offset of the 1st and only covering check
+        adjust_check(rc0.ctl, range1, index1, flip1, off_hi, igvn);
+        // Test is now covered by prior checks, dominate it out
+        prev_dom = rc0.ctl;
+      }
     }
 
-
   } else {                      // Scan for an equivalent test
 
     Node *cmp;
@@ -1019,7 +1082,7 @@
   // for lower and upper bounds.
   ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
   if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
-    prev_dom = idom;
+   prev_dom = idom;
 
   // Now walk the current IfNode's projections.
   // Loop ends when 'this' has no more uses.
--- a/src/share/vm/opto/library_call.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/library_call.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -46,25 +46,28 @@
  public:
  private:
   bool             _is_virtual;
-  bool             _is_predicted;
   bool             _does_virtual_dispatch;
+  int8_t           _predicates_count;  // Intrinsic is predicated by several conditions
+  int8_t           _last_predicate; // Last generated predicate
   vmIntrinsics::ID _intrinsic_id;
 
  public:
-  LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, bool does_virtual_dispatch, vmIntrinsics::ID id)
+  LibraryIntrinsic(ciMethod* m, bool is_virtual, int predicates_count, bool does_virtual_dispatch, vmIntrinsics::ID id)
     : InlineCallGenerator(m),
       _is_virtual(is_virtual),
-      _is_predicted(is_predicted),
       _does_virtual_dispatch(does_virtual_dispatch),
+      _predicates_count((int8_t)predicates_count),
+      _last_predicate((int8_t)-1),
       _intrinsic_id(id)
   {
   }
   virtual bool is_intrinsic() const { return true; }
   virtual bool is_virtual()   const { return _is_virtual; }
-  virtual bool is_predicted()   const { return _is_predicted; }
+  virtual bool is_predicated() const { return _predicates_count > 0; }
+  virtual int  predicates_count() const { return _predicates_count; }
   virtual bool does_virtual_dispatch()   const { return _does_virtual_dispatch; }
-  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
-  virtual Node* generate_predicate(JVMState* jvms);
+  virtual JVMState* generate(JVMState* jvms);
+  virtual Node* generate_predicate(JVMState* jvms, int predicate);
   vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
 };
 
@@ -107,8 +110,8 @@
   vmIntrinsics::ID  intrinsic_id() const { return _intrinsic->intrinsic_id(); }
   ciMethod*         callee()    const    { return _intrinsic->method(); }
 
-  bool try_to_inline();
-  Node* try_to_predicate();
+  bool  try_to_inline(int predicate);
+  Node* try_to_predicate(int predicate);
 
   void push_result() {
     // Push the result onto the stack.
@@ -307,10 +310,19 @@
   Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
   Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
   Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
+  bool inline_sha_implCompress(vmIntrinsics::ID id);
+  bool inline_digestBase_implCompressMB(int predicate);
+  bool inline_sha_implCompressMB(Node* digestBaseObj, ciInstanceKlass* instklass_SHA,
+                                 bool long_state, address stubAddr, const char *stubName,
+                                 Node* src_start, Node* ofs, Node* limit);
+  Node* get_state_from_sha_object(Node *sha_object);
+  Node* get_state_from_sha5_object(Node *sha_object);
+  Node* inline_digestBase_implCompressMB_predicate(int predicate);
   bool inline_encodeISOArray();
   bool inline_updateCRC32();
   bool inline_updateBytesCRC32();
   bool inline_updateByteBufferCRC32();
+  bool inline_multiplyToLen();
 };
 
 
@@ -319,8 +331,12 @@
   vmIntrinsics::ID id = m->intrinsic_id();
   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
 
-  if (DisableIntrinsic[0] != '\0'
-      && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) {
+  ccstr disable_intr = NULL;
+
+  if ((DisableIntrinsic[0] != '\0'
+       && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) ||
+      (method_has_option_value("DisableIntrinsic", disable_intr)
+       && strstr(disable_intr, vmIntrinsics::name_at(id)) != NULL)) {
     // disabled by a user request on the command line:
     // example: -XX:DisableIntrinsic=_hashCode,_getClass
     return NULL;
@@ -367,7 +383,7 @@
     }
   }
 
-  bool is_predicted = false;
+  int predicates = 0;
   bool does_virtual_dispatch = false;
 
   switch (id) {
@@ -504,11 +520,32 @@
     if (!UseAESIntrinsics) return NULL;
     break;
 
+  case vmIntrinsics::_multiplyToLen:
+    if (!UseMultiplyToLenIntrinsic) return NULL;
+    break;
+
   case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
   case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
     if (!UseAESIntrinsics) return NULL;
     // these two require the predicated logic
-    is_predicted = true;
+    predicates = 1;
+    break;
+
+  case vmIntrinsics::_sha_implCompress:
+    if (!UseSHA1Intrinsics) return NULL;
+    break;
+
+  case vmIntrinsics::_sha2_implCompress:
+    if (!UseSHA256Intrinsics) return NULL;
+    break;
+
+  case vmIntrinsics::_sha5_implCompress:
+    if (!UseSHA512Intrinsics) return NULL;
+    break;
+
+  case vmIntrinsics::_digestBase_implCompressMB:
+    if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) return NULL;
+    predicates = 3;
     break;
 
   case vmIntrinsics::_updateCRC32:
@@ -577,7 +614,7 @@
     if (!InlineUnsafeOps)  return NULL;
   }
 
-  return new LibraryIntrinsic(m, is_virtual, is_predicted, does_virtual_dispatch, (vmIntrinsics::ID) id);
+  return new LibraryIntrinsic(m, is_virtual, predicates, does_virtual_dispatch, (vmIntrinsics::ID) id);
 }
 
 //----------------------register_library_intrinsics-----------------------
@@ -586,7 +623,7 @@
   // Nothing to do here.
 }
 
-JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) {
+JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
   LibraryCallKit kit(jvms, this);
   Compile* C = kit.C;
   int nodes = C->unique();
@@ -601,7 +638,7 @@
   const int bci    = kit.bci();
 
   // Try to inline the intrinsic.
-  if (kit.try_to_inline()) {
+  if (kit.try_to_inline(_last_predicate)) {
     if (C->print_intrinsics() || C->print_inlining()) {
       C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
     }
@@ -634,12 +671,13 @@
   return NULL;
 }
 
-Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
+Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) {
   LibraryCallKit kit(jvms, this);
   Compile* C = kit.C;
   int nodes = C->unique();
+  _last_predicate = predicate;
 #ifndef PRODUCT
-  assert(is_predicted(), "sanity");
+  assert(is_predicated() && predicate < predicates_count(), "sanity");
   if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     char buf[1000];
     const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
@@ -649,10 +687,10 @@
   ciMethod* callee = kit.callee();
   const int bci    = kit.bci();
 
-  Node* slow_ctl = kit.try_to_predicate();
+  Node* slow_ctl = kit.try_to_predicate(predicate);
   if (!kit.failing()) {
     if (C->print_intrinsics() || C->print_inlining()) {
-      C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
+      C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual, predicate)" : "(intrinsic, predicate)");
     }
     C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
     if (C->log()) {
@@ -681,7 +719,7 @@
   return NULL;
 }
 
-bool LibraryCallKit::try_to_inline() {
+bool LibraryCallKit::try_to_inline(int predicate) {
   // Handle symbolic names for otherwise undistinguished boolean switches:
   const bool is_store       = true;
   const bool is_native_ptr  = true;
@@ -875,6 +913,17 @@
   case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
     return inline_cipherBlockChaining_AESCrypt(intrinsic_id());
 
+  case vmIntrinsics::_sha_implCompress:
+  case vmIntrinsics::_sha2_implCompress:
+  case vmIntrinsics::_sha5_implCompress:
+    return inline_sha_implCompress(intrinsic_id());
+
+  case vmIntrinsics::_digestBase_implCompressMB:
+    return inline_digestBase_implCompressMB(predicate);
+
+  case vmIntrinsics::_multiplyToLen:
+    return inline_multiplyToLen();
+
   case vmIntrinsics::_encodeISOArray:
     return inline_encodeISOArray();
 
@@ -898,7 +947,7 @@
   }
 }
 
-Node* LibraryCallKit::try_to_predicate() {
+Node* LibraryCallKit::try_to_predicate(int predicate) {
   if (!jvms()->has_method()) {
     // Root JVMState has a null method.
     assert(map()->memory()->Opcode() == Op_Parm, "");
@@ -912,6 +961,8 @@
     return inline_cipherBlockChaining_AESCrypt_predicate(false);
   case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
     return inline_cipherBlockChaining_AESCrypt_predicate(true);
+  case vmIntrinsics::_digestBase_implCompressMB:
+    return inline_digestBase_implCompressMB_predicate(predicate);
 
   default:
     // If you get here, it may be that someone has added a new intrinsic
@@ -2611,7 +2662,8 @@
   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
 
   if (!is_store) {
-    Node* p = make_load(control(), adr, value_type, type, adr_type, MemNode::unordered, is_volatile);
+    MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
+    Node* p = make_load(control(), adr, value_type, type, adr_type, mo, is_volatile);
     // load value
     switch (type) {
     case T_BOOLEAN:
@@ -3346,7 +3398,7 @@
   if (region == NULL)  never_see_null = true;
   Node* p = basic_plus_adr(mirror, offset);
   const TypeKlassPtr*  kls_type = TypeKlassPtr::OBJECT_OR_NULL;
-  Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
+  Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
   Node* null_ctl = top();
   kls = null_check_oop(kls, &null_ctl, never_see_null);
   if (region != NULL) {
@@ -3522,7 +3574,7 @@
       phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
     // If we fall through, it's a plain class.  Get its _super.
     p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
-    kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
+    kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
     null_ctl = top();
     kls = null_check_oop(kls, &null_ctl);
     if (null_ctl != top()) {
@@ -3604,7 +3656,7 @@
     args[which_arg] = arg;
 
     Node* p = basic_plus_adr(arg, class_klass_offset);
-    Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
+    Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
     klasses[which_arg] = _gvn.transform(kls);
   }
 
@@ -5120,7 +5172,7 @@
       // (At this point we can assume disjoint_bases, since types differ.)
       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
       Node* p1 = basic_plus_adr(dest_klass, ek_offset);
-      Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM);
+      Node* n1 = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p1, TypeRawPtr::BOTTOM);
       Node* dest_elem_klass = _gvn.transform(n1);
       Node* cv = generate_checkcast_arraycopy(adr_type,
                                               dest_elem_klass,
@@ -5695,6 +5747,108 @@
   return true;
 }
 
+//-------------inline_multiplyToLen-----------------------------------
+bool LibraryCallKit::inline_multiplyToLen() {
+  assert(UseMultiplyToLenIntrinsic, "not implementated on this platform");
+
+  address stubAddr = StubRoutines::multiplyToLen();
+  if (stubAddr == NULL) {
+    return false; // Intrinsic's stub is not implemented on this platform
+  }
+  const char* stubName = "multiplyToLen";
+
+  assert(callee()->signature()->size() == 5, "multiplyToLen has 5 parameters");
+
+  Node* x    = argument(1);
+  Node* xlen = argument(2);
+  Node* y    = argument(3);
+  Node* ylen = argument(4);
+  Node* z    = argument(5);
+
+  const Type* x_type = x->Value(&_gvn);
+  const Type* y_type = y->Value(&_gvn);
+  const TypeAryPtr* top_x = x_type->isa_aryptr();
+  const TypeAryPtr* top_y = y_type->isa_aryptr();
+  if (top_x  == NULL || top_x->klass()  == NULL ||
+      top_y == NULL || top_y->klass() == NULL) {
+    // failed array check
+    return false;
+  }
+
+  BasicType x_elem = x_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
+  BasicType y_elem = y_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
+  if (x_elem != T_INT || y_elem != T_INT) {
+    return false;
+  }
+
+  // Set the original stack and the reexecute bit for the interpreter to reexecute
+  // the bytecode that invokes BigInteger.multiplyToLen() if deoptimization happens
+  // on the return from z array allocation in runtime.
+  { PreserveReexecuteState preexecs(this);
+    jvms()->set_should_reexecute(true);
+
+    Node* x_start = array_element_address(x, intcon(0), x_elem);
+    Node* y_start = array_element_address(y, intcon(0), y_elem);
+    // 'x_start' points to x array + scaled xlen
+    // 'y_start' points to y array + scaled ylen
+
+    // Allocate the result array
+    Node* zlen = _gvn.transform(new(C) AddINode(xlen, ylen));
+    ciKlass* klass = ciTypeArrayKlass::make(T_INT);
+    Node* klass_node = makecon(TypeKlassPtr::make(klass));
+
+    IdealKit ideal(this);
+
+#define __ ideal.
+     Node* one = __ ConI(1);
+     Node* zero = __ ConI(0);
+     IdealVariable need_alloc(ideal), z_alloc(ideal);  __ declarations_done();
+     __ set(need_alloc, zero);
+     __ set(z_alloc, z);
+     __ if_then(z, BoolTest::eq, null()); {
+       __ increment (need_alloc, one);
+     } __ else_(); {
+       // Update graphKit memory and control from IdealKit.
+       sync_kit(ideal);
+       Node* zlen_arg = load_array_length(z);
+       // Update IdealKit memory and control from graphKit.
+       __ sync_kit(this);
+       __ if_then(zlen_arg, BoolTest::lt, zlen); {
+         __ increment (need_alloc, one);
+       } __ end_if();
+     } __ end_if();
+
+     __ if_then(__ value(need_alloc), BoolTest::ne, zero); {
+       // Update graphKit memory and control from IdealKit.
+       sync_kit(ideal);
+       Node * narr = new_array(klass_node, zlen, 1);
+       // Update IdealKit memory and control from graphKit.
+       __ sync_kit(this);
+       __ set(z_alloc, narr);
+     } __ end_if();
+
+     sync_kit(ideal);
+     z = __ value(z_alloc);
+     // Can't use TypeAryPtr::INTS which uses Bottom offset.
+     _gvn.set_type(z, TypeOopPtr::make_from_klass(klass));
+     // Final sync IdealKit and GraphKit.
+     final_sync(ideal);
+#undef __
+
+    Node* z_start = array_element_address(z, intcon(0), T_INT);
+
+    Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
+                                   OptoRuntime::multiplyToLen_Type(),
+                                   stubAddr, stubName, TypePtr::BOTTOM,
+                                   x_start, xlen, y_start, ylen, z_start, zlen);
+  } // original reexecute is set back here
+
+  C->set_has_split_ifs(true); // Has chance for split-if optimization
+  set_result(z);
+  return true;
+}
+
+
 /**
  * Calculate CRC32 for byte.
  * int java.util.zip.CRC32.update(int crc, int b)
@@ -5866,10 +6020,26 @@
   BasicType bt = field->layout_type();
 
   // Build the resultant type of the load
-  const Type *type = TypeOopPtr::make_from_klass(field_klass->as_klass());
-
+  const Type *type;
+  if (bt == T_OBJECT) {
+    type = TypeOopPtr::make_from_klass(field_klass->as_klass());
+  } else {
+    type = Type::get_const_basic_type(bt);
+  }
+
+  if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) {
+    insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
+  }
   // Build the load.
-  Node* loadedField = make_load(NULL, adr, type, bt, adr_type, MemNode::unordered, is_vol);
+  MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
+  Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, is_vol);
+  // If reference is volatile, prevent following memory ops from
+  // floating up past the volatile read.  Also prevents commoning
+  // another volatile read.
+  if (is_vol) {
+    // Memory barrier includes bogus read of value to force load BEFORE membar
+    insert_mem_bar(Op_MemBarAcquire, loadedField);
+  }
   return loadedField;
 }
 
@@ -5996,7 +6166,7 @@
   assert(tinst != NULL, "CBC obj is null");
   assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
   ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
-  if (!klass_AESCrypt->is_loaded()) return false;
+  assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
 
   ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
   const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
@@ -6071,11 +6241,8 @@
 //    note cipher==plain is more conservative than the original java code but that's OK
 //
 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
-  // First, check receiver for NULL since it is virtual method.
+  // The receiver was checked for NULL already.
   Node* objCBC = argument(0);
-  objCBC = null_check(objCBC);
-
-  if (stopped()) return NULL; // Always NULL
 
   // Load embeddedCipher field of CipherBlockChaining object.
   Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
@@ -6122,3 +6289,258 @@
   record_for_igvn(region);
   return _gvn.transform(region);
 }
+
+//------------------------------inline_sha_implCompress-----------------------
+//
+// Calculate SHA (i.e., SHA-1) for single-block byte[] array.
+// void com.sun.security.provider.SHA.implCompress(byte[] buf, int ofs)
+//
+// Calculate SHA2 (i.e., SHA-244 or SHA-256) for single-block byte[] array.
+// void com.sun.security.provider.SHA2.implCompress(byte[] buf, int ofs)
+//
+// Calculate SHA5 (i.e., SHA-384 or SHA-512) for single-block byte[] array.
+// void com.sun.security.provider.SHA5.implCompress(byte[] buf, int ofs)
+//
+bool LibraryCallKit::inline_sha_implCompress(vmIntrinsics::ID id) {
+  assert(callee()->signature()->size() == 2, "sha_implCompress has 2 parameters");
+
+  Node* sha_obj = argument(0);
+  Node* src     = argument(1); // type oop
+  Node* ofs     = argument(2); // type int
+
+  const Type* src_type = src->Value(&_gvn);
+  const TypeAryPtr* top_src = src_type->isa_aryptr();
+  if (top_src  == NULL || top_src->klass()  == NULL) {
+    // failed array check
+    return false;
+  }
+  // Figure out the size and type of the elements we will be copying.
+  BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
+  if (src_elem != T_BYTE) {
+    return false;
+  }
+  // 'src_start' points to src array + offset
+  Node* src_start = array_element_address(src, ofs, src_elem);
+  Node* state = NULL;
+  address stubAddr;
+  const char *stubName;
+
+  switch(id) {
+  case vmIntrinsics::_sha_implCompress:
+    assert(UseSHA1Intrinsics, "need SHA1 instruction support");
+    state = get_state_from_sha_object(sha_obj);
+    stubAddr = StubRoutines::sha1_implCompress();
+    stubName = "sha1_implCompress";
+    break;
+  case vmIntrinsics::_sha2_implCompress:
+    assert(UseSHA256Intrinsics, "need SHA256 instruction support");
+    state = get_state_from_sha_object(sha_obj);
+    stubAddr = StubRoutines::sha256_implCompress();
+    stubName = "sha256_implCompress";
+    break;
+  case vmIntrinsics::_sha5_implCompress:
+    assert(UseSHA512Intrinsics, "need SHA512 instruction support");
+    state = get_state_from_sha5_object(sha_obj);
+    stubAddr = StubRoutines::sha512_implCompress();
+    stubName = "sha512_implCompress";
+    break;
+  default:
+    fatal_unexpected_iid(id);
+    return false;
+  }
+  if (state == NULL) return false;
+
+  // Call the stub.
+  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::sha_implCompress_Type(),
+                                 stubAddr, stubName, TypePtr::BOTTOM,
+                                 src_start, state);
+
+  return true;
+}
+
+//------------------------------inline_digestBase_implCompressMB-----------------------
+//
+// Calculate SHA/SHA2/SHA5 for multi-block byte[] array.
+// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
+//
+bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) {
+  assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
+         "need SHA1/SHA256/SHA512 instruction support");
+  assert((uint)predicate < 3, "sanity");
+  assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters");
+
+  Node* digestBase_obj = argument(0); // The receiver was checked for NULL already.
+  Node* src            = argument(1); // byte[] array
+  Node* ofs            = argument(2); // type int
+  Node* limit          = argument(3); // type int
+
+  const Type* src_type = src->Value(&_gvn);
+  const TypeAryPtr* top_src = src_type->isa_aryptr();
+  if (top_src  == NULL || top_src->klass()  == NULL) {
+    // failed array check
+    return false;
+  }
+  // Figure out the size and type of the elements we will be copying.
+  BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
+  if (src_elem != T_BYTE) {
+    return false;
+  }
+  // 'src_start' points to src array + offset
+  Node* src_start = array_element_address(src, ofs, src_elem);
+
+  const char* klass_SHA_name = NULL;
+  const char* stub_name = NULL;
+  address     stub_addr = NULL;
+  bool        long_state = false;
+
+  switch (predicate) {
+  case 0:
+    if (UseSHA1Intrinsics) {
+      klass_SHA_name = "sun/security/provider/SHA";
+      stub_name = "sha1_implCompressMB";
+      stub_addr = StubRoutines::sha1_implCompressMB();
+    }
+    break;
+  case 1:
+    if (UseSHA256Intrinsics) {
+      klass_SHA_name = "sun/security/provider/SHA2";
+      stub_name = "sha256_implCompressMB";
+      stub_addr = StubRoutines::sha256_implCompressMB();
+    }
+    break;
+  case 2:
+    if (UseSHA512Intrinsics) {
+      klass_SHA_name = "sun/security/provider/SHA5";
+      stub_name = "sha512_implCompressMB";
+      stub_addr = StubRoutines::sha512_implCompressMB();
+      long_state = true;
+    }
+    break;
+  default:
+    fatal(err_msg_res("unknown SHA intrinsic predicate: %d", predicate));
+  }
+  if (klass_SHA_name != NULL) {
+    // get DigestBase klass to lookup for SHA klass
+    const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr();
+    assert(tinst != NULL, "digestBase_obj is not instance???");
+    assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
+
+    ciKlass* klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
+    assert(klass_SHA->is_loaded(), "predicate checks that this class is loaded");
+    ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
+    return inline_sha_implCompressMB(digestBase_obj, instklass_SHA, long_state, stub_addr, stub_name, src_start, ofs, limit);
+  }
+  return false;
+}
+//------------------------------inline_sha_implCompressMB-----------------------
+bool LibraryCallKit::inline_sha_implCompressMB(Node* digestBase_obj, ciInstanceKlass* instklass_SHA,
+                                               bool long_state, address stubAddr, const char *stubName,
+                                               Node* src_start, Node* ofs, Node* limit) {
+  const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_SHA);
+  const TypeOopPtr* xtype = aklass->as_instance_type();
+  Node* sha_obj = new (C) CheckCastPPNode(control(), digestBase_obj, xtype);
+  sha_obj = _gvn.transform(sha_obj);
+
+  Node* state;
+  if (long_state) {
+    state = get_state_from_sha5_object(sha_obj);
+  } else {
+    state = get_state_from_sha_object(sha_obj);
+  }
+  if (state == NULL) return false;
+
+  // Call the stub.
+  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
+                                 OptoRuntime::digestBase_implCompressMB_Type(),
+                                 stubAddr, stubName, TypePtr::BOTTOM,
+                                 src_start, state, ofs, limit);
+  // return ofs (int)
+  Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
+  set_result(result);
+
+  return true;
+}
+
+//------------------------------get_state_from_sha_object-----------------------
+Node * LibraryCallKit::get_state_from_sha_object(Node *sha_object) {
+  Node* sha_state = load_field_from_object(sha_object, "state", "[I", /*is_exact*/ false);
+  assert (sha_state != NULL, "wrong version of sun.security.provider.SHA/SHA2");
+  if (sha_state == NULL) return (Node *) NULL;
+
+  // now have the array, need to get the start address of the state array
+  Node* state = array_element_address(sha_state, intcon(0), T_INT);
+  return state;
+}
+
+//------------------------------get_state_from_sha5_object-----------------------
+Node * LibraryCallKit::get_state_from_sha5_object(Node *sha_object) {
+  Node* sha_state = load_field_from_object(sha_object, "state", "[J", /*is_exact*/ false);
+  assert (sha_state != NULL, "wrong version of sun.security.provider.SHA5");
+  if (sha_state == NULL) return (Node *) NULL;
+
+  // now have the array, need to get the start address of the state array
+  Node* state = array_element_address(sha_state, intcon(0), T_LONG);
+  return state;
+}
+
+//----------------------------inline_digestBase_implCompressMB_predicate----------------------------
+// Return node representing slow path of predicate check.
+// the pseudo code we want to emulate with this predicate is:
+//    if (digestBaseObj instanceof SHA/SHA2/SHA5) do_intrinsic, else do_javapath
+//
+Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) {
+  assert(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics,
+         "need SHA1/SHA256/SHA512 instruction support");
+  assert((uint)predicate < 3, "sanity");
+
+  // The receiver was checked for NULL already.
+  Node* digestBaseObj = argument(0);
+
+  // get DigestBase klass for instanceOf check
+  const TypeInstPtr* tinst = _gvn.type(digestBaseObj)->isa_instptr();
+  assert(tinst != NULL, "digestBaseObj is null");
+  assert(tinst->klass()->is_loaded(), "DigestBase is not loaded");
+
+  const char* klass_SHA_name = NULL;
+  switch (predicate) {
+  case 0:
+    if (UseSHA1Intrinsics) {
+      // we want to do an instanceof comparison against the SHA class
+      klass_SHA_name = "sun/security/provider/SHA";
+    }
+    break;
+  case 1:
+    if (UseSHA256Intrinsics) {
+      // we want to do an instanceof comparison against the SHA2 class
+      klass_SHA_name = "sun/security/provider/SHA2";
+    }
+    break;
+  case 2:
+    if (UseSHA512Intrinsics) {
+      // we want to do an instanceof comparison against the SHA5 class
+      klass_SHA_name = "sun/security/provider/SHA5";
+    }
+    break;
+  default:
+    fatal(err_msg_res("unknown SHA intrinsic predicate: %d", predicate));
+  }
+
+  ciKlass* klass_SHA = NULL;
+  if (klass_SHA_name != NULL) {
+    klass_SHA = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make(klass_SHA_name));
+  }
+  if ((klass_SHA == NULL) || !klass_SHA->is_loaded()) {
+    // if none of SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path
+    Node* ctrl = control();
+    set_control(top()); // no intrinsic path
+    return ctrl;
+  }
+  ciInstanceKlass* instklass_SHA = klass_SHA->as_instance_klass();
+
+  Node* instofSHA = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass_SHA)));
+  Node* cmp_instof = _gvn.transform(new (C) CmpINode(instofSHA, intcon(1)));
+  Node* bool_instof = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne));
+  Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
+
+  return instof_false;  // even if it is NULL
+}
--- a/src/share/vm/opto/loopTransform.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/loopTransform.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -269,10 +269,9 @@
 bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
   Node *test = ((IdealLoopTree*)this)->tail();
   int  body_size = ((IdealLoopTree*)this)->_body.size();
-  int  live_node_count = phase->C->live_nodes();
   // Peeling does loop cloning which can result in O(N^2) node construction
   if( body_size > 255 /* Prevent overflow for large body_size */
-      || (body_size * body_size + live_node_count > MaxNodeLimit) ) {
+      || (body_size * body_size + phase->C->live_nodes()) > phase->C->max_node_limit() ) {
     return false;           // too large to safely clone
   }
   while( test != _head ) {      // Scan till run off top of loop
@@ -601,7 +600,7 @@
     return false;
   if (new_body_size > unroll_limit ||
       // Unrolling can result in a large amount of node construction
-      new_body_size >= MaxNodeLimit - (uint) phase->C->live_nodes()) {
+      new_body_size >= phase->C->max_node_limit() - phase->C->live_nodes()) {
     return false;
   }
 
@@ -882,6 +881,20 @@
   return n;
 }
 
+bool PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop) {
+  Node* castii = new (C) CastIINode(incr, TypeInt::INT, true);
+  castii->set_req(0, ctrl);
+  register_new_node(castii, ctrl);
+  for (DUIterator_Fast imax, i = incr->fast_outs(imax); i < imax; i++) {
+    Node* n = incr->fast_out(i);
+    if (n->is_Phi() && n->in(0) == loop) {
+      int nrep = n->replace_edge(incr, castii);
+      return true;
+    }
+  }
+  return false;
+}
+
 //------------------------------insert_pre_post_loops--------------------------
 // Insert pre and post loops.  If peel_only is set, the pre-loop can not have
 // more iterations added.  It acts as a 'peel' only, no lower-bound RCE, no
@@ -1080,6 +1093,24 @@
     }
   }
 
+  // Nodes inside the loop may be control dependent on a predicate
+  // that was moved before the preloop. If the back branch of the main
+  // or post loops becomes dead, those nodes won't be dependent on the
+  // test that guards that loop nest anymore which could lead to an
+  // incorrect array access because it executes independently of the
+  // test that was guarding the loop nest. We add a special CastII on
+  // the if branch that enters the loop, between the input induction
+  // variable value and the induction variable Phi to preserve correct
+  // dependencies.
+
+  // CastII for the post loop:
+  bool inserted = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head);
+  assert(inserted, "no castII inserted");
+
+  // CastII for the main loop:
+  inserted = cast_incr_before_loop(pre_incr, min_taken, main_head);
+  assert(inserted, "no castII inserted");
+
   // Step B4: Shorten the pre-loop to run only 1 iteration (for now).
   // RCE and alignment may change this later.
   Node *cmp_end = pre_end->cmp_node();
@@ -2287,8 +2318,8 @@
 
   // Skip next optimizations if running low on nodes. Note that
   // policy_unswitching and policy_maximally_unroll have this check.
-  uint nodes_left = MaxNodeLimit - (uint) phase->C->live_nodes();
-  if ((2 * _body.size()) > nodes_left) {
+  int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes();
+  if ((int)(2 * _body.size()) > nodes_left) {
     return true;
   }
 
--- a/src/share/vm/opto/loopUnswitch.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/loopUnswitch.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -59,8 +59,8 @@
   if (!_head->is_Loop()) {
     return false;
   }
-  uint nodes_left = MaxNodeLimit - phase->C->live_nodes();
-  if (2 * _body.size() > nodes_left) {
+  int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes();
+  if ((int)(2 * _body.size()) > nodes_left) {
     return false; // Too speculative if running low on nodes.
   }
   LoopNode* head = _head->as_Loop();
--- a/src/share/vm/opto/loopnode.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/loopnode.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -602,6 +602,8 @@
     return ctrl;
   }
 
+  bool cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop);
+
 public:
   bool has_node( Node* n ) const {
     guarantee(n != NULL, "No Node.");
--- a/src/share/vm/opto/loopopts.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/loopopts.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -239,8 +239,13 @@
   ProjNode* dp_proj  = dp->as_Proj();
   ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj();
   if (exclude_loop_predicate &&
-      unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
+      (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) ||
+       unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check))) {
+    // If this is a range check (IfNode::is_range_check), do not
+    // reorder because Compile::allow_range_check_smearing might have
+    // changed the check.
     return; // Let IGVN transformation change control dependence.
+  }
 
   IdealLoopTree *old_loop = get_loop(dp);
 
@@ -734,7 +739,7 @@
   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
     weight += region->fast_out(i)->outcnt();
   }
-  int nodes_left = MaxNodeLimit - C->live_nodes();
+  int nodes_left = C->max_node_limit() - C->live_nodes();
   if (weight * 8 > nodes_left) {
 #ifndef PRODUCT
     if (PrintOpto)
@@ -896,23 +901,23 @@
   int n_op = n->Opcode();
 
   // Check for an IF being dominated by another IF same test
-  if( n_op == Op_If ) {
+  if (n_op == Op_If) {
     Node *bol = n->in(1);
     uint max = bol->outcnt();
     // Check for same test used more than once?
-    if( n_op == Op_If && max > 1 && bol->is_Bool() ) {
+    if (max > 1 && bol->is_Bool()) {
       // Search up IDOMs to see if this IF is dominated.
       Node *cutoff = get_ctrl(bol);
 
       // Now search up IDOMs till cutoff, looking for a dominating test
       Node *prevdom = n;
       Node *dom = idom(prevdom);
-      while( dom != cutoff ) {
-        if( dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom ) {
+      while (dom != cutoff) {
+        if (dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom) {
           // Replace the dominated test with an obvious true or false.
           // Place it on the IGVN worklist for later cleanup.
           C->set_major_progress();
-          dominated_by( prevdom, n, false, true );
+          dominated_by(prevdom, n, false, true);
 #ifndef PRODUCT
           if( VerifyLoopOptimizations ) verify();
 #endif
--- a/src/share/vm/opto/machnode.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/machnode.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -639,7 +639,6 @@
 }
 #endif
 
-
 bool MachCallNode::return_value_is_used() const {
   if (tf()->range()->cnt() == TypeFunc::Parms) {
     // void return
@@ -657,6 +656,14 @@
   return false;
 }
 
+// Similar to cousin class CallNode::returns_pointer
+// Because this is used in deoptimization, we want the type info, not the data
+// flow info; the interpreter will "use" things that are dead to the optimizer.
+bool MachCallNode::returns_pointer() const {
+  const TypeTuple *r = tf()->range();
+  return (r->cnt() > TypeFunc::Parms &&
+          r->field_at(TypeFunc::Parms)->isa_ptr());
+}
 
 //------------------------------Registers--------------------------------------
 const RegMask &MachCallNode::in_RegMask(uint idx) const {
--- a/src/share/vm/opto/machnode.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/machnode.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -784,6 +784,10 @@
 
   bool returns_long() const { return tf()->return_type() == T_LONG; }
   bool return_value_is_used() const;
+
+  // Similar to cousin class CallNode::returns_pointer
+  bool returns_pointer() const;
+
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const;
 #endif
--- a/src/share/vm/opto/macro.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/macro.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -699,6 +699,7 @@
   ciType* elem_type;
 
   Node* res = alloc->result_cast();
+  assert(res == NULL || res->is_CheckCastPP(), "unexpected AllocateNode result");
   const TypeOopPtr* res_type = NULL;
   if (res != NULL) { // Could be NULL when there are no users
     res_type = _igvn.type(res)->isa_oopptr();
@@ -963,7 +964,11 @@
 }
 
 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
-  if (!EliminateAllocations || !alloc->_is_non_escaping) {
+  // Don't do scalar replacement if the frame can be popped by JVMTI:
+  // if reallocation fails during deoptimization we'll pop all
+  // interpreter frames for this compiled frame and that won't play
+  // nice with JVMTI popframe.
+  if (!EliminateAllocations || JvmtiExport::can_pop_frame() || !alloc->_is_non_escaping) {
     return false;
   }
   Node* klass = alloc->in(AllocateNode::KlassNode);
@@ -1031,6 +1036,8 @@
     return false;
   }
 
+  assert(boxing->result_cast() == NULL, "unexpected boxing node result");
+
   extract_call_projections(boxing);
 
   const TypeTuple* r = boxing->tf()->range();
@@ -2191,7 +2198,7 @@
     Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn);
     if (klass_node == NULL) {
       Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
-      klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
+      klass_node = transform_later(LoadKlassNode::make(_igvn, NULL, mem, k_adr, _igvn.type(k_adr)->is_ptr()));
 #ifdef _LP64
       if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
         assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
--- a/src/share/vm/opto/memnode.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/memnode.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -859,6 +859,10 @@
 
 
 //=============================================================================
+// Should LoadNode::Ideal() attempt to remove control edges?
+bool LoadNode::can_remove_control() const {
+  return true;
+}
 uint LoadNode::size_of() const { return sizeof(*this); }
 uint LoadNode::cmp( const Node &n ) const
 { return !Type::cmp( _type, ((LoadNode&)n)._type ); }
@@ -1251,6 +1255,16 @@
               result = new (phase->C) ConvI2LNode(phase->transform(result));
             }
 #endif
+            // Boxing/unboxing can be done from signed & unsigned loads (e.g. LoadUB -> ... -> LoadB pair).
+            // Need to preserve unboxing load type if it is unsigned.
+            switch(this->Opcode()) {
+              case Op_LoadUB:
+                result = new (phase->C) AndINode(phase->transform(result), phase->intcon(0xFF));
+                break;
+              case Op_LoadUS:
+                result = new (phase->C) AndINode(phase->transform(result), phase->intcon(0xFFFF));
+                break;
+            }
             return result;
           }
         }
@@ -1455,7 +1469,7 @@
 }
 
 //------------------------------Ideal------------------------------------------
-// If the load is from Field memory and the pointer is non-null, we can
+// If the load is from Field memory and the pointer is non-null, it might be possible to
 // zero out the control input.
 // If the offset is constant and the base is an object allocation,
 // try to hook me up to the exact initializing store.
@@ -1480,6 +1494,7 @@
       && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
     // Check for useless control edge in some common special cases
     if (in(MemNode::Control) != NULL
+        && can_remove_control()
         && phase->type(base)->higher_equal(TypePtr::NOTNULL)
         && all_controls_dominate(base, phase->C->start())) {
       // A method-invariant, non-null address (constant or 'this' argument).
@@ -2007,9 +2022,8 @@
 //=============================================================================
 //----------------------------LoadKlassNode::make------------------------------
 // Polymorphic factory method:
-Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) {
+Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk) {
   Compile* C = gvn.C;
-  Node *ctl = NULL;
   // sanity check the alias category against the created node type
   const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
   assert(adr_type != NULL, "expecting TypeKlassPtr");
@@ -2029,6 +2043,12 @@
   return klass_value_common(phase);
 }
 
+// In most cases, LoadKlassNode does not have the control input set. If the control
+// input is set, it must not be removed (by LoadNode::Ideal()).
+bool LoadKlassNode::can_remove_control() const {
+  return false;
+}
+
 const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
   // Either input is TOP ==> the result is TOP
   const Type *t1 = phase->type( in(MemNode::Memory) );
--- a/src/share/vm/opto/memnode.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/memnode.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -148,6 +148,8 @@
 protected:
   virtual uint cmp(const Node &n) const;
   virtual uint size_of() const; // Size is bigger
+  // Should LoadNode::Ideal() attempt to remove control edges?
+  virtual bool can_remove_control() const;
   const Type* const _type;      // What kind of value is loaded?
 public:
 
@@ -171,8 +173,10 @@
   // we are equivalent to.  We look for Load of a Store.
   virtual Node *Identity( PhaseTransform *phase );
 
-  // If the load is from Field memory and the pointer is non-null, we can
+  // If the load is from Field memory and the pointer is non-null, it might be possible to
   // zero out the control input.
+  // If the offset is constant and the base is an object allocation,
+  // try to hook me up to the exact initializing store.
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 
   // Split instance field load through Phi.
@@ -413,6 +417,10 @@
 //------------------------------LoadKlassNode----------------------------------
 // Load a Klass from an object
 class LoadKlassNode : public LoadPNode {
+protected:
+  // In most cases, LoadKlassNode does not have the control input set. If the control
+  // input is set, it must not be removed (by LoadNode::Ideal()).
+  virtual bool can_remove_control() const;
 public:
   LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
     : LoadPNode(c, mem, adr, at, tk, mo) {}
@@ -422,8 +430,8 @@
   virtual bool depends_only_on_test() const { return true; }
 
   // Polymorphic factory method:
-  static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
-                     const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
+  static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
+                    const TypeKlassPtr* tk = TypeKlassPtr::OBJECT);
 };
 
 //------------------------------LoadNKlassNode---------------------------------
--- a/src/share/vm/opto/multnode.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/multnode.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -194,7 +194,9 @@
     }
   }
 
-  ProjNode* other_proj = iff->proj_out(1-_con)->as_Proj();
+  ProjNode* other_proj = iff->proj_out(1-_con);
+  if (other_proj == NULL) // Should never happen, but make Parfait happy.
+      return false;
   if (other_proj->is_uncommon_trap_proj(reason)) {
     assert(reason == Deoptimization::Reason_none ||
            Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list");
--- a/src/share/vm/opto/node.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/node.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -69,7 +69,7 @@
   Compile::set_debug_idx(new_debug_idx);
   set_debug_idx( new_debug_idx );
   assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX");
-  assert(Compile::current()->live_nodes() < (uint)MaxNodeLimit, "Live Node limit exceeded limit");
+  assert(Compile::current()->live_nodes() < Compile::current()->max_node_limit(), "Live Node limit exceeded limit");
   if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
     tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
     BREAKPOINT;
@@ -330,7 +330,7 @@
 Node::Node(uint req)
   : _idx(IDX_INIT(req))
 {
-  assert( req < (uint)(MaxNodeLimit - NodeLimitFudgeFactor), "Input limit exceeded" );
+  assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" );
   debug_only( verify_construction() );
   NOT_PRODUCT(nodes_created++);
   if (req == 0) {
@@ -535,6 +535,9 @@
   if (n->is_Call()) {
     n->as_Call()->clone_jvms(C);
   }
+  if (n->is_SafePoint()) {
+    n->as_SafePoint()->clone_replaced_nodes();
+  }
   return n;                     // Return the clone
 }
 
@@ -630,6 +633,9 @@
   if (is_expensive()) {
     compile->remove_expensive_node(this);
   }
+  if (is_SafePoint()) {
+    as_SafePoint()->delete_replaced_nodes();
+  }
 #ifdef ASSERT
   // We will not actually delete the storage, but we'll make the node unusable.
   *(address*)this = badAddress;  // smash the C++ vtbl, probably
@@ -1095,6 +1101,9 @@
   if( this->is_Store() ) {
     // Condition for back-to-back stores folding.
     return n->Opcode() == op && n->in(MemNode::Memory) == this;
+  } else if (this->is_Load()) {
+    // Condition for removing an unused LoadNode from the MemBarAcquire precedence input
+    return n->Opcode() == Op_MemBarAcquire;
   } else if( op == Op_AddL ) {
     // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
     return n->Opcode() == Op_ConvL2I && n->in(1) == this;
--- a/src/share/vm/opto/output.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/output.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -783,9 +783,10 @@
     // grow downwards in all implementations.
     // (If, on some machine, the interpreter's Java locals or stack
     // were to grow upwards, the embedded doubles would be word-swapped.)
-    jint   *dp = (jint*)&d;
-    array->append(new ConstantIntValue(dp[1]));
-    array->append(new ConstantIntValue(dp[0]));
+    jlong_accessor acc;
+    acc.long_value = jlong_cast(d);
+    array->append(new ConstantIntValue(acc.words[1]));
+    array->append(new ConstantIntValue(acc.words[0]));
 #endif
     break;
   }
@@ -802,9 +803,10 @@
     // grow downwards in all implementations.
     // (If, on some machine, the interpreter's Java locals or stack
     // were to grow upwards, the embedded doubles would be word-swapped.)
-    jint *dp = (jint*)&d;
-    array->append(new ConstantIntValue(dp[1]));
-    array->append(new ConstantIntValue(dp[0]));
+    jlong_accessor acc;
+    acc.long_value = d;
+    array->append(new ConstantIntValue(acc.words[1]));
+    array->append(new ConstantIntValue(acc.words[0]));
 #endif
     break;
   }
@@ -854,8 +856,7 @@
     }
 
     // Check if a call returns an object.
-    if (mcall->return_value_is_used() &&
-        mcall->tf()->range()->field_at(TypeFunc::Parms)->isa_ptr()) {
+    if (mcall->returns_pointer()) {
       return_oop = true;
     }
     safepoint_pc_offset += mcall->ret_addr_offset();
--- a/src/share/vm/opto/parse.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/parse.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -142,7 +142,7 @@
 
   void print_value_on(outputStream* st) const PRODUCT_RETURN;
 
-  bool        _forced_inline;     // Inlining was forced by CompilerOracle or ciReplay
+  bool        _forced_inline;     // Inlining was forced by CompilerOracle, ciReplay or annotation
   bool        forced_inline()     const { return _forced_inline; }
   // Count number of nodes in this subtree
   int         count() const;
@@ -357,12 +357,13 @@
   int _est_switch_depth;        // Debugging SwitchRanges.
 #endif
 
-  // parser for the caller of the method of this object
-  Parse* const _parent;
+  bool         _first_return;                  // true if return is the first to be parsed
+  bool         _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
+  uint         _new_idx;                       // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
 
  public:
   // Constructor
-  Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent);
+  Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
 
   virtual Parse* is_Parse() const { return (Parse*)this; }
 
@@ -419,8 +420,6 @@
     return block()->successor_for_bci(bci);
   }
 
-  Parse* parent_parser() const { return _parent; }
-
  private:
   // Create a JVMS & map for the initial state of this method.
   SafePointNode* create_entry_map();
@@ -552,8 +551,9 @@
 
   float   dynamic_branch_prediction(float &cnt);
   float   branch_prediction(float &cnt, BoolTest::mask btest, int target_bci);
-  bool    seems_never_taken(float prob);
-  bool    seems_stable_comparison(BoolTest::mask btest, Node* c);
+  bool    seems_never_taken(float prob) const;
+  bool    path_is_suitable_for_uncommon_trap(float prob) const;
+  bool    seems_stable_comparison() const;
 
   void    do_ifnull(BoolTest::mask btest, Node* c);
   void    do_if(BoolTest::mask btest, Node* c);
--- a/src/share/vm/opto/parse1.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/parse1.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -381,8 +381,8 @@
 
 //------------------------------Parse------------------------------------------
 // Main parser constructor.
-Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent)
-  : _exits(caller), _parent(parent)
+Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
+  : _exits(caller)
 {
   // Init some variables
   _caller = caller;
@@ -395,6 +395,9 @@
   _entry_bci = InvocationEntryBci;
   _tf = NULL;
   _block = NULL;
+  _first_return = true;
+  _replaced_nodes_for_exceptions = false;
+  _new_idx = C->unique();
   debug_only(_block_count = -1);
   debug_only(_blocks = (Block*)-1);
 #ifndef PRODUCT
@@ -565,12 +568,13 @@
     set_map(entry_map);
     do_method_entry();
   }
-  if (depth() == 1) {
+
+  if (depth() == 1 && !failing()) {
     // Add check to deoptimize the nmethod if RTM state was changed
     rtm_deopt();
   }
 
-  // Check for bailouts during method entry.
+  // Check for bailouts during method entry or RTM state check setup.
   if (failing()) {
     if (log)  log->done("parse");
     C->set_default_node_notes(caller_nn);
@@ -894,6 +898,10 @@
   for (uint i = 0; i < TypeFunc::Parms; i++) {
     caller.map()->set_req(i, ex_map->in(i));
   }
+  if (ex_map->has_replaced_nodes()) {
+    _replaced_nodes_for_exceptions = true;
+  }
+  caller.map()->transfer_replaced_nodes_from(ex_map, _new_idx);
   // ...and the exception:
   Node*          ex_oop        = saved_ex_oop(ex_map);
   SafePointNode* caller_ex_map = caller.make_exception_state(ex_oop);
@@ -962,7 +970,7 @@
   bool do_synch = method()->is_synchronized() && GenerateSynchronizationCode;
 
   // record exit from a method if compiled while Dtrace is turned on.
-  if (do_synch || C->env()->dtrace_method_probes()) {
+  if (do_synch || C->env()->dtrace_method_probes() || _replaced_nodes_for_exceptions) {
     // First move the exception list out of _exits:
     GraphKit kit(_exits.transfer_exceptions_into_jvms());
     SafePointNode* normal_map = kit.map();  // keep this guy safe
@@ -987,6 +995,9 @@
       if (C->env()->dtrace_method_probes()) {
         kit.make_dtrace_method_exit(method());
       }
+      if (_replaced_nodes_for_exceptions) {
+        kit.map()->apply_replaced_nodes();
+      }
       // Done with exception-path processing.
       ex_map = kit.make_exception_state(ex_oop);
       assert(ex_jvms->same_calls_as(ex_map->jvms()), "sanity");
@@ -1006,6 +1017,7 @@
       _exits.add_exception_state(ex_map);
     }
   }
+  _exits.map()->apply_replaced_nodes();
 }
 
 //-----------------------------create_entry_map-------------------------------
@@ -1020,6 +1032,9 @@
     return NULL;
   }
 
+  // clear current replaced nodes that are of no use from here on (map was cloned in build_exits).
+  _caller->map()->delete_replaced_nodes();
+
   // If this is an inlined method, we may have to do a receiver null check.
   if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
     GraphKit kit(_caller);
@@ -1043,6 +1058,8 @@
 
   SafePointNode* inmap = _caller->map();
   assert(inmap != NULL, "must have inmap");
+  // In case of null check on receiver above
+  map()->transfer_replaced_nodes_from(inmap, _new_idx);
 
   uint i;
 
@@ -1672,6 +1689,8 @@
       set_control(r->nonnull_req());
     }
 
+    map()->merge_replaced_nodes_with(newin);
+
     // newin has been subsumed into the lazy merge, and is now dead.
     set_block(save_block);
 
@@ -1939,7 +1958,7 @@
   // finalization.  In general this will fold up since the concrete
   // class is often visible so the access flags are constant.
   Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
-  Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
+  Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), klass_addr, TypeInstPtr::KLASS));
 
   Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
   Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
@@ -2076,6 +2095,13 @@
     phi->add_req(value);
   }
 
+  if (_first_return) {
+    _exits.map()->transfer_replaced_nodes_from(map(), _new_idx);
+    _first_return = false;
+  } else {
+    _exits.map()->merge_replaced_nodes_with(map());
+  }
+
   stop_and_kill_map();          // This CFG path dies here
 }
 
--- a/src/share/vm/opto/parse2.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/parse2.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -884,7 +884,7 @@
 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
 // very small but nonzero probabilities, which if confused with zero
 // counts would keep the program recompiling indefinitely.
-bool Parse::seems_never_taken(float prob) {
+bool Parse::seems_never_taken(float prob) const {
   return prob < PROB_MIN;
 }
 
@@ -895,53 +895,12 @@
 // if a path is never taken, its controlling comparison is
 // already acting in a stable fashion.  If the comparison
 // seems stable, we will put an expensive uncommon trap
-// on the untaken path.  To be conservative, and to allow
-// partially executed counted loops to be compiled fully,
-// we will plant uncommon traps only after pointer comparisons.
-bool Parse::seems_stable_comparison(BoolTest::mask btest, Node* cmp) {
-  for (int depth = 4; depth > 0; depth--) {
-    // The following switch can find CmpP here over half the time for
-    // dynamic language code rich with type tests.
-    // Code using counted loops or array manipulations (typical
-    // of benchmarks) will have many (>80%) CmpI instructions.
-    switch (cmp->Opcode()) {
-    case Op_CmpP:
-      // A never-taken null check looks like CmpP/BoolTest::eq.
-      // These certainly should be closed off as uncommon traps.
-      if (btest == BoolTest::eq)
-        return true;
-      // A never-failed type check looks like CmpP/BoolTest::ne.
-      // Let's put traps on those, too, so that we don't have to compile
-      // unused paths with indeterminate dynamic type information.
-      if (ProfileDynamicTypes)
-        return true;
-      return false;
-
-    case Op_CmpI:
-      // A small minority (< 10%) of CmpP are masked as CmpI,
-      // as if by boolean conversion ((p == q? 1: 0) != 0).
-      // Detect that here, even if it hasn't optimized away yet.
-      // Specifically, this covers the 'instanceof' operator.
-      if (btest == BoolTest::ne || btest == BoolTest::eq) {
-        if (_gvn.type(cmp->in(2))->singleton() &&
-            cmp->in(1)->is_Phi()) {
-          PhiNode* phi = cmp->in(1)->as_Phi();
-          int true_path = phi->is_diamond_phi();
-          if (true_path > 0 &&
-              _gvn.type(phi->in(1))->singleton() &&
-              _gvn.type(phi->in(2))->singleton()) {
-            // phi->region->if_proj->ifnode->bool->cmp
-            BoolNode* bol = phi->in(0)->in(1)->in(0)->in(1)->as_Bool();
-            btest = bol->_test._test;
-            cmp = bol->in(1);
-            continue;
-          }
-        }
-      }
-      return false;
-    }
+// on the untaken path.
+bool Parse::seems_stable_comparison() const {
+  if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) {
+    return false;
   }
-  return false;
+  return true;
 }
 
 //-------------------------------repush_if_args--------------------------------
@@ -1166,6 +1125,14 @@
   }
 }
 
+bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
+  // Don't want to speculate on uncommon traps when running with -Xcomp
+  if (!UseInterpreter) {
+    return false;
+  }
+  return (seems_never_taken(prob) && seems_stable_comparison());
+}
+
 //----------------------------adjust_map_after_if------------------------------
 // Adjust the JVM state to reflect the result of taking this path.
 // Basically, it means inspecting the CmpNode controlling this
@@ -1179,33 +1146,9 @@
 
   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
 
-  if (seems_never_taken(prob) && seems_stable_comparison(btest, c)) {
-    // If this might possibly turn into an implicit null check,
-    // and the null has never yet been seen, we need to generate
-    // an uncommon trap, so as to recompile instead of suffering
-    // with very slow branches.  (We'll get the slow branches if
-    // the program ever changes phase and starts seeing nulls here.)
-    //
-    // We do not inspect for a null constant, since a node may
-    // optimize to 'null' later on.
-    //
-    // Null checks, and other tests which expect inequality,
-    // show btest == BoolTest::eq along the non-taken branch.
-    // On the other hand, type tests, must-be-null tests,
-    // and other tests which expect pointer equality,
-    // show btest == BoolTest::ne along the non-taken branch.
-    // We prune both types of branches if they look unused.
+  if (path_is_suitable_for_uncommon_trap(prob)) {
     repush_if_args();
-    // We need to mark this branch as taken so that if we recompile we will
-    // see that it is possible. In the tiered system the interpreter doesn't
-    // do profiling and by the time we get to the lower tier from the interpreter
-    // the path may be cold again. Make sure it doesn't look untaken
-    if (is_fallthrough) {
-      profile_not_taken_branch(!ProfileInterpreter);
-    } else {
-      profile_taken_branch(iter().get_dest(), !ProfileInterpreter);
-    }
-    uncommon_trap(Deoptimization::Reason_unreached,
+    uncommon_trap(Deoptimization::Reason_unstable_if,
                   Deoptimization::Action_reinterpret,
                   NULL,
                   (is_fallthrough ? "taken always" : "taken never"));
--- a/src/share/vm/opto/parseHelper.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/parseHelper.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -156,22 +156,43 @@
   int klass_offset = oopDesc::klass_offset_in_bytes();
   Node* p = basic_plus_adr( ary, ary, klass_offset );
   // p's type is array-of-OOPS plus klass_offset
-  Node* array_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS) );
+  Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS));
   // Get the array klass
   const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
 
-  // array_klass's type is generally INexact array-of-oop.  Heroically
-  // cast the array klass to EXACT array and uncommon-trap if the cast
-  // fails.
+  // The type of array_klass is usually INexact array-of-oop.  Heroically
+  // cast array_klass to EXACT array and uncommon-trap if the cast fails.
+  // Make constant out of the inexact array klass, but use it only if the cast
+  // succeeds.
   bool always_see_exact_class = false;
   if (MonomorphicArrayCheck
-      && !too_many_traps(Deoptimization::Reason_array_check)) {
+      && !too_many_traps(Deoptimization::Reason_array_check)
+      && !tak->klass_is_exact()
+      && tak != TypeKlassPtr::OBJECT) {
+      // Regarding the fourth condition in the if-statement from above:
+      //
+      // If the compiler has determined that the type of array 'ary' (represented
+      // by 'array_klass') is java/lang/Object, the compiler must not assume that
+      // the array 'ary' is monomorphic.
+      //
+      // If 'ary' were of type java/lang/Object, this arraystore would have to fail,
+      // because it is not possible to perform a arraystore into an object that is not
+      // a "proper" array.
+      //
+      // Therefore, let's obtain at runtime the type of 'ary' and check if we can still
+      // successfully perform the store.
+      //
+      // The implementation reasons for the condition are the following:
+      //
+      // java/lang/Object is the superclass of all arrays, but it is represented by the VM
+      // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect
+      // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses.
+      //
+      // See issue JDK-8057622 for details.
+
     always_see_exact_class = true;
     // (If no MDO at all, hope for the best, until a trap actually occurs.)
-  }
 
-  // Is the array klass is exactly its defined type?
-  if (always_see_exact_class && !tak->klass_is_exact()) {
     // Make a constant out of the inexact array klass
     const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
     Node* con = makecon(extak);
@@ -202,11 +223,15 @@
   // Extract the array element class
   int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
   Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
-  Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );
+  // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true,
+  // we must set a control edge from the IfTrue node created by the uncommon_trap above to the
+  // LoadKlassNode.
+  Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL,
+                                                       immutable_memory(), p2, tak));
 
   // Check (the hard way) and throw if not a subklass.
   // Result is ignored, we just need the CFG effects.
-  gen_checkcast( obj, a_e_klass );
+  gen_checkcast(obj, a_e_klass);
 }
 
 
--- a/src/share/vm/opto/phaseX.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/phaseX.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1340,15 +1340,27 @@
       }
     }
 
-    if( use->is_Cmp() ) {       // Enable CMP/BOOL optimization
+    uint use_op = use->Opcode();
+    if(use->is_Cmp()) {       // Enable CMP/BOOL optimization
       add_users_to_worklist(use); // Put Bool on worklist
-      // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
-      // phi merging either 0 or 1 onto the worklist
       if (use->outcnt() > 0) {
         Node* bol = use->raw_out(0);
         if (bol->outcnt() > 0) {
           Node* iff = bol->raw_out(0);
-          if (iff->outcnt() == 2) {
+          if (use_op == Op_CmpI &&
+              iff->is_CountedLoopEnd()) {
+            CountedLoopEndNode* cle = iff->as_CountedLoopEnd();
+            if (cle->limit() == n && cle->phi() != NULL) {
+              // If an opaque node feeds into the limit condition of a
+              // CountedLoop, we need to process the Phi node for the
+              // induction variable when the opaque node is removed:
+              // the range of values taken by the Phi is now known and
+              // so its type is also known.
+              _worklist.push(cle->phi());
+            }
+          } else if (iff->outcnt() == 2) {
+            // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
+            // phi merging either 0 or 1 onto the worklist
             Node* ifproj0 = iff->raw_out(0);
             Node* ifproj1 = iff->raw_out(1);
             if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
@@ -1360,9 +1372,26 @@
           }
         }
       }
+      if (use_op == Op_CmpI) {
+        Node* in1 = use->in(1);
+        for (uint i = 0; i < in1->outcnt(); i++) {
+          if (in1->raw_out(i)->Opcode() == Op_CastII) {
+            Node* castii = in1->raw_out(i);
+            if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
+              Node* ifnode = castii->in(0)->in(0);
+              if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
+                // Reprocess a CastII node that may depend on an
+                // opaque node value when the opaque node is
+                // removed. In case it carries a dependency we can do
+                // a better job of computing its type.
+                _worklist.push(castii);
+              }
+            }
+          }
+        }
+      }
     }
 
-    uint use_op = use->Opcode();
     // If changed Cast input, check Phi users for simple cycles
     if( use->is_ConstraintCast() || use->is_CheckCastPP() ) {
       for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/opto/replacednodes.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "opto/cfgnode.hpp"
+#include "opto/phaseX.hpp"
+#include "opto/replacednodes.hpp"
+
+void ReplacedNodes::allocate_if_necessary() {
+  if (_replaced_nodes == NULL) {
+    _replaced_nodes = new GrowableArray<ReplacedNode>();
+  }
+}
+
+bool ReplacedNodes::is_empty() const {
+  return _replaced_nodes == NULL || _replaced_nodes->length() == 0;
+}
+
+bool ReplacedNodes::has_node(const ReplacedNode& r) const {
+  return _replaced_nodes->find(r) != -1;
+}
+
+bool ReplacedNodes::has_target_node(Node* n) const {
+  for (int i = 0; i < _replaced_nodes->length(); i++) {
+    if (_replaced_nodes->at(i).improved() == n) {
+      return true;
+    }
+  }
+  return false;
+}
+
+// Record replaced node if not seen before
+void ReplacedNodes::record(Node* initial, Node* improved) {
+  allocate_if_necessary();
+  ReplacedNode r(initial, improved);
+  if (!has_node(r)) {
+    _replaced_nodes->push(r);
+  }
+}
+
+// Copy replaced nodes from one map to another. idx is used to
+// identify nodes that are too new to be of interest in the target
+// node list.
+void ReplacedNodes::transfer_from(const ReplacedNodes& other, uint idx) {
+  if (other.is_empty()) {
+    return;
+  }
+  allocate_if_necessary();
+  for (int i = 0; i < other._replaced_nodes->length(); i++) {
+    ReplacedNode replaced = other._replaced_nodes->at(i);
+    // Only transfer the nodes that can actually be useful
+    if (!has_node(replaced) && (replaced.initial()->_idx < idx || has_target_node(replaced.initial()))) {
+      _replaced_nodes->push(replaced);
+    }
+  }
+}
+
+void ReplacedNodes::clone() {
+  if (_replaced_nodes != NULL) {
+    GrowableArray<ReplacedNode>* replaced_nodes_clone = new GrowableArray<ReplacedNode>();
+    replaced_nodes_clone->appendAll(_replaced_nodes);
+    _replaced_nodes = replaced_nodes_clone;
+  }
+}
+
+void ReplacedNodes::reset() {
+  if (_replaced_nodes != NULL) {
+    _replaced_nodes->clear();
+  }
+}
+
+// Perfom node replacement (used when returning to caller)
+void ReplacedNodes::apply(Node* n) {
+  if (is_empty()) {
+    return;
+  }
+  for (int i = 0; i < _replaced_nodes->length(); i++) {
+    ReplacedNode replaced = _replaced_nodes->at(i);
+    n->replace_edge(replaced.initial(), replaced.improved());
+  }
+}
+
+static void enqueue_use(Node* n, Node* use, Unique_Node_List& work) {
+  if (use->is_Phi()) {
+    Node* r = use->in(0);
+    assert(r->is_Region(), "Phi should have Region");
+    for (uint i = 1; i < use->req(); i++) {
+      if (use->in(i) == n) {
+        work.push(r->in(i));
+      }
+    }
+  } else {
+    work.push(use);
+  }
+}
+
+// Perfom node replacement following late inlining
+void ReplacedNodes::apply(Compile* C, Node* ctl) {
+  // ctl is the control on exit of the method that was late inlined
+  if (is_empty()) {
+    return;
+  }
+  for (int i = 0; i < _replaced_nodes->length(); i++) {
+    ReplacedNode replaced = _replaced_nodes->at(i);
+    Node* initial = replaced.initial();
+    Node* improved = replaced.improved();
+    assert (ctl != NULL && !ctl->is_top(), "replaced node should have actual control");
+
+    ResourceMark rm;
+    Unique_Node_List work;
+    // Go over all the uses of the node that is considered for replacement...
+    for (DUIterator j = initial->outs(); initial->has_out(j); j++) {
+      Node* use = initial->out(j);
+
+      if (use == improved || use->outcnt() == 0) {
+        continue;
+      }
+      work.clear();
+      enqueue_use(initial, use, work);
+      bool replace = true;
+      // Check that this use is dominated by ctl. Go ahead with the
+      // replacement if it is.
+      while (work.size() != 0 && replace) {
+        Node* n = work.pop();
+        if (use->outcnt() == 0) {
+          continue;
+        }
+        if (n->is_CFG() || (n->in(0) != NULL && !n->in(0)->is_top())) {
+          int depth = 0;
+          Node *m = n;
+          if (!n->is_CFG()) {
+            n = n->in(0);
+          }
+          assert(n->is_CFG(), "should be CFG now");
+          while(n != ctl) {
+            n = IfNode::up_one_dom(n);
+            depth++;
+            // limit search depth
+            if (depth >= 100 || n == NULL) {
+              replace = false;
+              break;
+            }
+          }
+        } else {
+          for (DUIterator k = n->outs(); n->has_out(k); k++) {
+            enqueue_use(n, n->out(k), work);
+          }
+        }
+      }
+      if (replace) {
+        bool is_in_table = C->initial_gvn()->hash_delete(use);
+        int replaced = use->replace_edge(initial, improved);
+        if (is_in_table) {
+          C->initial_gvn()->hash_find_insert(use);
+        }
+        C->record_for_igvn(use);
+
+        assert(replaced > 0, "inconsistent");
+        --j;
+      }
+    }
+  }
+}
+
+void ReplacedNodes::dump(outputStream *st) const {
+  if (!is_empty()) {
+    st->print("replaced nodes: ");
+    for (int i = 0; i < _replaced_nodes->length(); i++) {
+      st->print("%d->%d", _replaced_nodes->at(i).initial()->_idx, _replaced_nodes->at(i).improved()->_idx);
+      if (i < _replaced_nodes->length()-1) {
+        st->print(",");
+      }
+    }
+  }
+}
+
+// Merge 2 list of replaced node at a point where control flow paths merge
+void ReplacedNodes::merge_with(const ReplacedNodes& other) {
+  if (is_empty()) {
+    return;
+  }
+  if (other.is_empty()) {
+    reset();
+    return;
+  }
+  int shift = 0;
+  int len = _replaced_nodes->length();
+  for (int i = 0; i < len; i++) {
+    if (!other.has_node(_replaced_nodes->at(i))) {
+      shift++;
+    } else if (shift > 0) {
+      _replaced_nodes->at_put(i-shift, _replaced_nodes->at(i));
+    }
+  }
+  if (shift > 0) {
+    _replaced_nodes->trunc_to(len - shift);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/opto/replacednodes.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OPTO_REPLACEDNODES_HPP
+#define SHARE_VM_OPTO_REPLACEDNODES_HPP
+
+#include "opto/connode.hpp"
+
+// During parsing, when a node is "improved",
+// GraphKit::replace_in_map() is called to update the current map so
+// that the improved node is used from that point
+// on. GraphKit::replace_in_map() doesn't operate on the callers maps
+// and so some optimization opportunities may be lost. The
+// ReplacedNodes class addresses that problem.
+//
+// A ReplacedNodes object is a list of pair of nodes. Every
+// SafePointNode carries a ReplacedNodes object. Every time
+// GraphKit::replace_in_map() is called, a new pair of nodes is pushed
+// on the list of replaced nodes. When control flow paths merge, their
+// replaced nodes are also merged. When parsing exits a method to
+// return to a caller, the replaced nodes on the exit path are used to
+// update the caller's map.
+class ReplacedNodes VALUE_OBJ_CLASS_SPEC {
+ private:
+  class ReplacedNode VALUE_OBJ_CLASS_SPEC {
+  private:
+    Node* _initial;
+    Node* _improved;
+  public:
+    ReplacedNode() : _initial(NULL), _improved(NULL) {}
+    ReplacedNode(Node* initial, Node* improved) : _initial(initial), _improved(improved) {}
+    Node* initial() const  { return _initial; }
+    Node* improved() const { return _improved; }
+
+    bool operator==(const ReplacedNode& other) {
+      return _initial == other._initial && _improved == other._improved;
+    }
+  };
+  GrowableArray<ReplacedNode>* _replaced_nodes;
+
+  void allocate_if_necessary();
+  bool has_node(const ReplacedNode& r) const;
+  bool has_target_node(Node* n) const;
+
+ public:
+  ReplacedNodes()
+    : _replaced_nodes(NULL) {}
+
+  void clone();
+  void record(Node* initial, Node* improved);
+  void transfer_from(const ReplacedNodes& other, uint idx);
+  void reset();
+  void apply(Node* n);
+  void merge_with(const ReplacedNodes& other);
+  bool is_empty() const;
+  void dump(outputStream *st) const;
+  void apply(Compile* C, Node* ctl);
+};
+
+#endif // SHARE_VM_OPTO_REPLACEDNODES_HPP
--- a/src/share/vm/opto/runtime.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/runtime.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -898,6 +898,74 @@
   return TypeFunc::make(domain, range);
 }
 
+/*
+ * void implCompress(byte[] buf, int ofs)
+ */
+const TypeFunc* OptoRuntime::sha_implCompress_Type() {
+  // create input type (domain)
+  int num_args = 2;
+  int argcnt = num_args;
+  const Type** fields = TypeTuple::fields(argcnt);
+  int argp = TypeFunc::Parms;
+  fields[argp++] = TypePtr::NOTNULL; // buf
+  fields[argp++] = TypePtr::NOTNULL; // state
+  assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
+  const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
+
+  // no result type needed
+  fields = TypeTuple::fields(1);
+  fields[TypeFunc::Parms+0] = NULL; // void
+  const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
+  return TypeFunc::make(domain, range);
+}
+
+/*
+ * int implCompressMultiBlock(byte[] b, int ofs, int limit)
+ */
+const TypeFunc* OptoRuntime::digestBase_implCompressMB_Type() {
+  // create input type (domain)
+  int num_args = 4;
+  int argcnt = num_args;
+  const Type** fields = TypeTuple::fields(argcnt);
+  int argp = TypeFunc::Parms;
+  fields[argp++] = TypePtr::NOTNULL; // buf
+  fields[argp++] = TypePtr::NOTNULL; // state
+  fields[argp++] = TypeInt::INT;     // ofs
+  fields[argp++] = TypeInt::INT;     // limit
+  assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
+  const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
+
+  // returning ofs (int)
+  fields = TypeTuple::fields(1);
+  fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs
+  const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
+  return TypeFunc::make(domain, range);
+}
+
+const TypeFunc* OptoRuntime::multiplyToLen_Type() {
+  // create input type (domain)
+  int num_args      = 6;
+  int argcnt = num_args;
+  const Type** fields = TypeTuple::fields(argcnt);
+  int argp = TypeFunc::Parms;
+  fields[argp++] = TypePtr::NOTNULL;    // x
+  fields[argp++] = TypeInt::INT;        // xlen
+  fields[argp++] = TypePtr::NOTNULL;    // y
+  fields[argp++] = TypeInt::INT;        // ylen
+  fields[argp++] = TypePtr::NOTNULL;    // z
+  fields[argp++] = TypeInt::INT;        // zlen
+  assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
+  const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
+
+  // no result type needed
+  fields = TypeTuple::fields(1);
+  fields[TypeFunc::Parms+0] = NULL;
+  const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
+  return TypeFunc::make(domain, range);
+}
+
+
+
 //------------- Interpreter state access for on stack replacement
 const TypeFunc* OptoRuntime::osr_end_Type() {
   // create input type (domain)
--- a/src/share/vm/opto/runtime.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/runtime.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -300,6 +300,11 @@
   static const TypeFunc* aescrypt_block_Type();
   static const TypeFunc* cipherBlockChaining_aescrypt_Type();
 
+  static const TypeFunc* sha_implCompress_Type();
+  static const TypeFunc* digestBase_implCompressMB_Type();
+
+  static const TypeFunc* multiplyToLen_Type();
+
   static const TypeFunc* updateBytesCRC32_Type();
 
   // leaf on stack replacement interpreter accessor types
--- a/src/share/vm/opto/subnode.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/subnode.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1147,12 +1147,10 @@
 
 //------------------------------dump_spec-------------------------------------
 // Print special per-node info
-#ifndef PRODUCT
 void BoolTest::dump_on(outputStream *st) const {
   const char *msg[] = {"eq","gt","of","lt","ne","le","nof","ge"};
   st->print("%s", msg[_test]);
 }
-#endif
 
 //=============================================================================
 uint BoolNode::hash() const { return (Node::hash() << 3)|(_test._test+1); }
--- a/src/share/vm/opto/subnode.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/subnode.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -275,9 +275,7 @@
   mask commute( ) const { return mask("032147658"[_test]-'0'); }
   mask negate( ) const { return mask(_test^4); }
   bool is_canonical( ) const { return (_test == BoolTest::ne || _test == BoolTest::lt || _test == BoolTest::le || _test == BoolTest::overflow); }
-#ifndef PRODUCT
   void dump_on(outputStream *st) const;
-#endif
 };
 
 //------------------------------BoolNode---------------------------------------
--- a/src/share/vm/opto/superword.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/superword.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1374,6 +1374,20 @@
       if (n->is_Load()) {
         Node* ctl = n->in(MemNode::Control);
         Node* mem = first->in(MemNode::Memory);
+        SWPointer p1(n->as_Mem(), this);
+        // Identify the memory dependency for the new loadVector node by
+        // walking up through memory chain.
+        // This is done to give flexibility to the new loadVector node so that
+        // it can move above independent storeVector nodes.
+        while (mem->is_StoreVector()) {
+          SWPointer p2(mem->as_Mem(), this);
+          int cmp = p1.cmp(p2);
+          if (SWPointer::not_equal(cmp) || !SWPointer::comparable(cmp)) {
+            mem = mem->in(MemNode::Memory);
+          } else {
+            break; // dependent memory
+          }
+        }
         Node* adr = low_adr->in(MemNode::Address);
         const TypePtr* atyp = n->adr_type();
         vn = LoadVectorNode::make(C, opc, ctl, mem, adr, atyp, vlen, velt_basic_type(n));
--- a/src/share/vm/opto/type.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/opto/type.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -265,7 +265,7 @@
   // locking.
 
   Arena* save = current->type_arena();
-  Arena* shared_type_arena = new (mtCompiler)Arena();
+  Arena* shared_type_arena = new (mtCompiler)Arena(mtCompiler);
 
   current->set_type_arena(shared_type_arena);
   _shared_type_dict =
--- a/src/share/vm/precompiled/precompiled.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/precompiled/precompiled.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -193,11 +193,13 @@
 # include "runtime/mutexLocker.hpp"
 # include "runtime/objectMonitor.hpp"
 # include "runtime/orderAccess.hpp"
+# include "runtime/orderAccess.inline.hpp"
 # include "runtime/os.hpp"
 # include "runtime/osThread.hpp"
 # include "runtime/perfData.hpp"
 # include "runtime/perfMemory.hpp"
 # include "runtime/prefetch.hpp"
+# include "runtime/prefetch.inline.hpp"
 # include "runtime/reflection.hpp"
 # include "runtime/reflectionUtils.hpp"
 # include "runtime/registerMap.hpp"
@@ -218,10 +220,17 @@
 # include "runtime/vmThread.hpp"
 # include "runtime/vm_operations.hpp"
 # include "runtime/vm_version.hpp"
+# include "services/allocationSite.hpp"
 # include "services/lowMemoryDetector.hpp"
+# include "services/mallocTracker.hpp"
+# include "services/memBaseline.hpp"
 # include "services/memoryPool.hpp"
 # include "services/memoryService.hpp"
 # include "services/memoryUsage.hpp"
+# include "services/memReporter.hpp"
+# include "services/memTracker.hpp"
+# include "services/nmtCommon.hpp"
+# include "services/virtualMemoryTracker.hpp"
 # include "utilities/accessFlags.hpp"
 # include "utilities/array.hpp"
 # include "utilities/bitMap.hpp"
@@ -235,6 +244,7 @@
 # include "utilities/hashtable.hpp"
 # include "utilities/histogram.hpp"
 # include "utilities/macros.hpp"
+# include "utilities/nativeCallStack.hpp"
 # include "utilities/numberSeq.hpp"
 # include "utilities/ostream.hpp"
 # include "utilities/preserveException.hpp"
--- a/src/share/vm/prims/forte.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/forte.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -32,7 +32,7 @@
 #include "oops/oop.inline2.hpp"
 #include "prims/forte.hpp"
 #include "runtime/javaCalls.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vframe.hpp"
 #include "runtime/vframeArray.hpp"
 
--- a/src/share/vm/prims/jni.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/jni.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -71,11 +71,13 @@
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/jfieldIDWorkaround.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vm_operations.hpp"
+#include "services/memTracker.hpp"
 #include "services/runtimeService.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/defaultStream.hpp"
@@ -295,15 +297,6 @@
       "Bug in native code: jfieldID offset must address interior of object");
 }
 
-// Pick a reasonable higher bound for local capacity requested
-// for EnsureLocalCapacity and PushLocalFrame.  We don't want it too
-// high because a test (or very unusual application) may try to allocate
-// that many handles and run out of swap space.  An implementation is
-// permitted to allocate more handles than the ensured capacity, so this
-// value is set high enough to prevent compatibility problems.
-const int MAX_REASONABLE_LOCAL_CAPACITY = 4*K;
-
-
 // Wrapper to trace JNI functions
 
 #ifdef ASSERT
@@ -883,7 +876,8 @@
                                    env, capacity);
 #endif /* USDT2 */
   //%note jni_11
-  if (capacity < 0 || capacity > MAX_REASONABLE_LOCAL_CAPACITY) {
+  if (capacity < 0 ||
+      ((MaxJNILocalCapacity > 0) && (capacity > MaxJNILocalCapacity))) {
 #ifndef USDT2
     DTRACE_PROBE1(hotspot_jni, PushLocalFrame__return, JNI_ERR);
 #else /* USDT2 */
@@ -1042,7 +1036,8 @@
                                         env, capacity);
 #endif /* USDT2 */
   jint ret;
-  if (capacity >= 0 && capacity <= MAX_REASONABLE_LOCAL_CAPACITY) {
+  if (capacity >= 0 &&
+      ((MaxJNILocalCapacity <= 0) || (capacity <= MaxJNILocalCapacity))) {
     ret = JNI_OK;
   } else {
     ret = JNI_ERR;
@@ -3592,6 +3587,7 @@
     if (bad_address != NULL) {
       os::protect_memory(bad_address, size, os::MEM_PROT_READ,
                          /*is_committed*/false);
+      MemTracker::record_virtual_memory_type((void*)bad_address, mtInternal);
     }
   }
   return bad_address;
@@ -5067,6 +5063,7 @@
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #endif
+#include "memory/guardedMemory.hpp"
 #include "utilities/quickSort.hpp"
 #include "utilities/ostream.hpp"
 #if INCLUDE_VM_STRUCTS
@@ -5085,10 +5082,15 @@
 void TestMetachunk_test();
 void TestVirtualSpaceNode_test();
 void TestNewSize_test();
+void TestKlass_test();
+void Test_linked_list();
+void TestChunkedList_test();
 #if INCLUDE_ALL_GCS
 void TestOldFreeSpaceCalculation_test();
 void TestG1BiasedArray_test();
+void TestBufferingOopClosure_test();
 void TestCodeCacheRemSet_test();
+void FreeRegionList_test();
 #endif
 
 void execute_internal_vm_tests() {
@@ -5105,9 +5107,13 @@
     run_unit_test(arrayOopDesc::test_max_array_length());
     run_unit_test(CollectedHeap::test_is_in());
     run_unit_test(QuickSort::test_quick_sort());
+    run_unit_test(GuardedMemory::test_guarded_memory());
     run_unit_test(AltHashing::test_alt_hash());
     run_unit_test(test_loggc_filename());
     run_unit_test(TestNewSize_test());
+    run_unit_test(TestKlass_test());
+    run_unit_test(Test_linked_list());
+    run_unit_test(TestChunkedList_test());
 #if INCLUDE_VM_STRUCTS
     run_unit_test(VMStructs::test());
 #endif
@@ -5115,7 +5121,11 @@
     run_unit_test(TestOldFreeSpaceCalculation_test());
     run_unit_test(TestG1BiasedArray_test());
     run_unit_test(HeapRegionRemSet::test_prt());
+    run_unit_test(TestBufferingOopClosure_test());
     run_unit_test(TestCodeCacheRemSet_test());
+    if (UseG1GC) {
+      run_unit_test(FreeRegionList_test());
+    }
 #endif
     tty->print_cr("All internal VM tests passed");
   }
--- a/src/share/vm/prims/jniCheck.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/jniCheck.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "memory/guardedMemory.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
@@ -35,7 +36,7 @@
 #include "runtime/handles.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/jfieldIDWorkaround.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
 #ifdef TARGET_ARCH_x86
 # include "jniTypes_x86.hpp"
 #endif
@@ -323,6 +324,74 @@
   }
 }
 
+/*
+ * Copy and wrap array elements for bounds checking.
+ * Remember the original elements (GuardedMemory::get_tag())
+ */
+static void* check_jni_wrap_copy_array(JavaThread* thr, jarray array,
+    void* orig_elements) {
+  void* result;
+  IN_VM(
+    oop a = JNIHandles::resolve_non_null(array);
+    size_t len = arrayOop(a)->length() <<
+        TypeArrayKlass::cast(a->klass())->log2_element_size();
+    result = GuardedMemory::wrap_copy(orig_elements, len, orig_elements);
+  )
+  return result;
+}
+
+static void* check_wrapped_array(JavaThread* thr, const char* fn_name,
+    void* obj, void* carray, size_t* rsz) {
+  if (carray == NULL) {
+    tty->print_cr("%s: elements vector NULL" PTR_FORMAT, fn_name, p2i(obj));
+    NativeReportJNIFatalError(thr, "Elements vector NULL");
+  }
+  GuardedMemory guarded(carray);
+  void* orig_result = guarded.get_tag();
+  if (!guarded.verify_guards()) {
+    tty->print_cr("ReleasePrimitiveArrayCritical: release array failed bounds "
+        "check, incorrect pointer returned ? array: " PTR_FORMAT " carray: "
+        PTR_FORMAT, p2i(obj), p2i(carray));
+    guarded.print_on(tty);
+    NativeReportJNIFatalError(thr, "ReleasePrimitiveArrayCritical: "
+        "failed bounds check");
+  }
+  if (orig_result == NULL) {
+    tty->print_cr("ReleasePrimitiveArrayCritical: unrecognized elements. array: "
+        PTR_FORMAT " carray: " PTR_FORMAT, p2i(obj), p2i(carray));
+    guarded.print_on(tty);
+    NativeReportJNIFatalError(thr, "ReleasePrimitiveArrayCritical: "
+        "unrecognized elements");
+  }
+  if (rsz != NULL) {
+    *rsz = guarded.get_user_size();
+  }
+  return orig_result;
+}
+
+static void* check_wrapped_array_release(JavaThread* thr, const char* fn_name,
+    void* obj, void* carray, jint mode) {
+  size_t sz;
+  void* orig_result = check_wrapped_array(thr, fn_name, obj, carray, &sz);
+  switch (mode) {
+  case 0:
+    memcpy(orig_result, carray, sz);
+    GuardedMemory::free_copy(carray);
+    break;
+  case JNI_COMMIT:
+    memcpy(orig_result, carray, sz);
+    break;
+  case JNI_ABORT:
+    GuardedMemory::free_copy(carray);
+    break;
+  default:
+    tty->print_cr("%s: Unrecognized mode %i releasing array "
+        PTR_FORMAT " elements " PTR_FORMAT, fn_name, mode, p2i(obj), p2i(carray));
+    NativeReportJNIFatalError(thr, "Unrecognized array release mode");
+  }
+  return orig_result;
+}
+
 oop jniCheck::validate_handle(JavaThread* thr, jobject obj) {
   if (JNIHandles::is_frame_handle(thr, obj) ||
       JNIHandles::is_local_handle(thr, obj) ||
@@ -1314,7 +1383,7 @@
 JNI_END
 
 // Arbitrary (but well-known) tag
-const jint STRING_TAG = 0x47114711;
+const void* STRING_TAG = (void*)0x47114711;
 
 JNI_ENTRY_CHECKED(const jchar *,
   checked_jni_GetStringChars(JNIEnv *env,
@@ -1324,21 +1393,22 @@
     IN_VM(
       checkString(thr, str);
     )
-    jchar* newResult = NULL;
+    jchar* new_result = NULL;
     const jchar *result = UNCHECKED()->GetStringChars(env,str,isCopy);
     assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringChars didn't return a copy as expected");
     if (result != NULL) {
       size_t len = UNCHECKED()->GetStringLength(env,str) + 1; // + 1 for NULL termination
-      jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), mtInternal);
-      *tagLocation = STRING_TAG;
-      newResult = (jchar*) (tagLocation + 1);
-      memcpy(newResult, result, len * sizeof(jchar));
+      len *= sizeof(jchar);
+      new_result = (jchar*) GuardedMemory::wrap_copy(result, len, STRING_TAG);
+      if (new_result == NULL) {
+        vm_exit_out_of_memory(len, OOM_MALLOC_ERROR, "checked_jni_GetStringChars");
+      }
       // Avoiding call to UNCHECKED()->ReleaseStringChars() since that will fire unexpected dtrace probes
       // Note that the dtrace arguments for the allocated memory will not match up with this solution.
       FreeHeap((char*)result);
     }
     functionExit(env);
-    return newResult;
+    return new_result;
 JNI_END
 
 JNI_ENTRY_CHECKED(void,
@@ -1354,11 +1424,23 @@
        UNCHECKED()->ReleaseStringChars(env,str,chars);
     }
     else {
-       jint* tagLocation = ((jint*) chars) - 1;
-       if (*tagLocation != STRING_TAG) {
-          NativeReportJNIFatalError(thr, "ReleaseStringChars called on something not allocated by GetStringChars");
-       }
-       UNCHECKED()->ReleaseStringChars(env,str,(const jchar*)tagLocation);
+      GuardedMemory guarded((void*)chars);
+      if (!guarded.verify_guards()) {
+        tty->print_cr("ReleaseStringChars: release chars failed bounds check. "
+            "string: " PTR_FORMAT " chars: " PTR_FORMAT, p2i(str), p2i(chars));
+        guarded.print_on(tty);
+        NativeReportJNIFatalError(thr, "ReleaseStringChars: "
+            "release chars failed bounds check.");
+      }
+      if (guarded.get_tag() != STRING_TAG) {
+        tty->print_cr("ReleaseStringChars: called on something not allocated "
+            "by GetStringChars. string: " PTR_FORMAT " chars: " PTR_FORMAT,
+            p2i(str), p2i(chars));
+        NativeReportJNIFatalError(thr, "ReleaseStringChars called on something "
+            "not allocated by GetStringChars");
+      }
+       UNCHECKED()->ReleaseStringChars(env, str,
+           (const jchar*) guarded.release_for_freeing());
     }
     functionExit(env);
 JNI_END
@@ -1385,7 +1467,7 @@
 JNI_END
 
 // Arbitrary (but well-known) tag - different than GetStringChars
-const jint STRING_UTF_TAG = 0x48124812;
+const void* STRING_UTF_TAG = (void*) 0x48124812;
 
 JNI_ENTRY_CHECKED(const char *,
   checked_jni_GetStringUTFChars(JNIEnv *env,
@@ -1395,21 +1477,21 @@
     IN_VM(
       checkString(thr, str);
     )
-    char* newResult = NULL;
+    char* new_result = NULL;
     const char *result = UNCHECKED()->GetStringUTFChars(env,str,isCopy);
     assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringUTFChars didn't return a copy as expected");
     if (result != NULL) {
       size_t len = strlen(result) + 1; // + 1 for NULL termination
-      jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), mtInternal);
-      *tagLocation = STRING_UTF_TAG;
-      newResult = (char*) (tagLocation + 1);
-      strcpy(newResult, result);
+      new_result = (char*) GuardedMemory::wrap_copy(result, len, STRING_UTF_TAG);
+      if (new_result == NULL) {
+        vm_exit_out_of_memory(len, OOM_MALLOC_ERROR, "checked_jni_GetStringUTFChars");
+      }
       // Avoiding call to UNCHECKED()->ReleaseStringUTFChars() since that will fire unexpected dtrace probes
       // Note that the dtrace arguments for the allocated memory will not match up with this solution.
       FreeHeap((char*)result, mtInternal);
     }
     functionExit(env);
-    return newResult;
+    return new_result;
 JNI_END
 
 JNI_ENTRY_CHECKED(void,
@@ -1425,11 +1507,23 @@
        UNCHECKED()->ReleaseStringUTFChars(env,str,chars);
     }
     else {
-       jint* tagLocation = ((jint*) chars) - 1;
-       if (*tagLocation != STRING_UTF_TAG) {
-          NativeReportJNIFatalError(thr, "ReleaseStringUTFChars called on something not allocated by GetStringUTFChars");
-       }
-       UNCHECKED()->ReleaseStringUTFChars(env,str,(const char*)tagLocation);
+      GuardedMemory guarded((void*)chars);
+      if (!guarded.verify_guards()) {
+        tty->print_cr("ReleaseStringUTFChars: release chars failed bounds check. "
+            "string: " PTR_FORMAT " chars: " PTR_FORMAT, p2i(str), p2i(chars));
+        guarded.print_on(tty);
+        NativeReportJNIFatalError(thr, "ReleaseStringUTFChars: "
+            "release chars failed bounds check.");
+      }
+      if (guarded.get_tag() != STRING_UTF_TAG) {
+        tty->print_cr("ReleaseStringUTFChars: called on something not "
+            "allocated by GetStringUTFChars. string: " PTR_FORMAT " chars: "
+            PTR_FORMAT, p2i(str), p2i(chars));
+        NativeReportJNIFatalError(thr, "ReleaseStringUTFChars "
+            "called on something not allocated by GetStringUTFChars");
+      }
+      UNCHECKED()->ReleaseStringUTFChars(env, str,
+          (const char*) guarded.release_for_freeing());
     }
     functionExit(env);
 JNI_END
@@ -1514,6 +1608,9 @@
     ElementType *result = UNCHECKED()->Get##Result##ArrayElements(env, \
                                                                   array, \
                                                                   isCopy); \
+    if (result != NULL) { \
+      result = (ElementType *) check_jni_wrap_copy_array(thr, array, result); \
+    } \
     functionExit(env); \
     return result; \
 JNI_END
@@ -1538,12 +1635,10 @@
       check_primitive_array_type(thr, array, ElementTag); \
       ASSERT_OOPS_ALLOWED; \
       typeArrayOop a = typeArrayOop(JNIHandles::resolve_non_null(array)); \
-      /* cannot check validity of copy, unless every request is logged by
-       * checking code.  Implementation of this check is deferred until a
-       * subsequent release.
-       */ \
     ) \
-    UNCHECKED()->Release##Result##ArrayElements(env,array,elems,mode); \
+    ElementType* orig_result = (ElementType *) check_wrapped_array_release( \
+        thr, "checked_jni_Release"#Result"ArrayElements", array, elems, mode); \
+    UNCHECKED()->Release##Result##ArrayElements(env, array, orig_result, mode); \
     functionExit(env); \
 JNI_END
 
@@ -1694,6 +1789,9 @@
       check_is_primitive_array(thr, array);
     )
     void *result = UNCHECKED()->GetPrimitiveArrayCritical(env, array, isCopy);
+    if (result != NULL) {
+      result = check_jni_wrap_copy_array(thr, array, result);
+    }
     functionExit(env);
     return result;
 JNI_END
@@ -1707,10 +1805,9 @@
     IN_VM(
       check_is_primitive_array(thr, array);
     )
-    /* The Hotspot JNI code does not use the parameters, so just check the
-     * array parameter as a minor sanity check
-     */
-    UNCHECKED()->ReleasePrimitiveArrayCritical(env, array, carray, mode);
+    // Check the element array...
+    void* orig_result = check_wrapped_array_release(thr, "ReleasePrimitiveArrayCritical", array, carray, mode);
+    UNCHECKED()->ReleasePrimitiveArrayCritical(env, array, orig_result, mode);
     functionExit(env);
 JNI_END
 
--- a/src/share/vm/prims/jvm.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/jvm.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -24,10 +24,15 @@
 
 #include "precompiled.hpp"
 #include "classfile/classLoader.hpp"
+#include "classfile/classLoaderExt.hpp"
 #include "classfile/javaAssertions.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#endif
 #include "classfile/vmSymbols.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "interpreter/bytecode.hpp"
@@ -51,6 +56,7 @@
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/jfieldIDWorkaround.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/perfData.hpp"
 #include "runtime/reflection.hpp"
@@ -390,6 +396,14 @@
     }
   }
 
+  const char* enableSharedLookupCache = "false";
+#if INCLUDE_CDS
+  if (ClassLoaderExt::is_lookup_cache_enabled()) {
+    enableSharedLookupCache = "true";
+  }
+#endif
+  PUTPROP(props, "sun.cds.enableSharedLookupCache", enableSharedLookupCache);
+
   return properties;
 JVM_END
 
@@ -591,13 +605,14 @@
 
   // Make shallow object copy
   const int size = obj->size();
-  oop new_obj = NULL;
+  oop new_obj_oop = NULL;
   if (obj->is_array()) {
     const int length = ((arrayOop)obj())->length();
-    new_obj = CollectedHeap::array_allocate(klass, size, length, CHECK_NULL);
+    new_obj_oop = CollectedHeap::array_allocate(klass, size, length, CHECK_NULL);
   } else {
-    new_obj = CollectedHeap::obj_allocate(klass, size, CHECK_NULL);
+    new_obj_oop = CollectedHeap::obj_allocate(klass, size, CHECK_NULL);
   }
+
   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
   // is modifying a reference field in the clonee, a non-oop-atomic copy might
   // be suspended in the middle of copying the pointer and end up with parts
@@ -608,24 +623,41 @@
   // The same is true of StubRoutines::object_copy and the various oop_copy
   // variants, and of the code generated by the inline_native_clone intrinsic.
   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
-  Copy::conjoint_jlongs_atomic((jlong*)obj(), (jlong*)new_obj,
+  Copy::conjoint_jlongs_atomic((jlong*)obj(), (jlong*)new_obj_oop,
                                (size_t)align_object_size(size) / HeapWordsPerLong);
   // Clear the header
-  new_obj->init_mark();
+  new_obj_oop->init_mark();
 
   // Store check (mark entire object and let gc sort it out)
   BarrierSet* bs = Universe::heap()->barrier_set();
   assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
-  bs->write_region(MemRegion((HeapWord*)new_obj, size));
+  bs->write_region(MemRegion((HeapWord*)new_obj_oop, size));
+
+  Handle new_obj(THREAD, new_obj_oop);
+  // Special handling for MemberNames.  Since they contain Method* metadata, they
+  // must be registered so that RedefineClasses can fix metadata contained in them.
+  if (java_lang_invoke_MemberName::is_instance(new_obj()) &&
+      java_lang_invoke_MemberName::is_method(new_obj())) {
+    Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(new_obj());
+    // MemberName may be unresolved, so doesn't need registration until resolved.
+    if (method != NULL) {
+      methodHandle m(THREAD, method);
+      // This can safepoint and redefine method, so need both new_obj and method
+      // in a handle, for two different reasons.  new_obj can move, method can be
+      // deleted if nothing is using it on the stack.
+      m->method_holder()->add_member_name(new_obj());
+    }
+  }
 
   // Caution: this involves a java upcall, so the clone should be
   // "gc-robust" by this stage.
   if (klass->has_finalizer()) {
     assert(obj->is_instance(), "should be instanceOop");
-    new_obj = InstanceKlass::register_finalizer(instanceOop(new_obj), CHECK_NULL);
+    new_obj_oop = InstanceKlass::register_finalizer(instanceOop(new_obj()), CHECK_NULL);
+    new_obj = Handle(THREAD, new_obj_oop);
   }
 
-  return JNIHandles::make_local(env, oop(new_obj));
+  return JNIHandles::make_local(env, new_obj());
 JVM_END
 
 // java.lang.Compiler ////////////////////////////////////////////////////
@@ -763,6 +795,36 @@
 JVM_END
 
 
+JVM_ENTRY(jboolean, JVM_KnownToNotExist(JNIEnv *env, jobject loader, const char *classname))
+  JVMWrapper("JVM_KnownToNotExist");
+#if INCLUDE_CDS
+  return ClassLoaderExt::known_to_not_exist(env, loader, classname, CHECK_(false));
+#else
+  return false;
+#endif
+JVM_END
+
+
+JVM_ENTRY(jobjectArray, JVM_GetResourceLookupCacheURLs(JNIEnv *env, jobject loader))
+  JVMWrapper("JVM_GetResourceLookupCacheURLs");
+#if INCLUDE_CDS
+  return ClassLoaderExt::get_lookup_cache_urls(env, loader, CHECK_NULL);
+#else
+  return NULL;
+#endif
+JVM_END
+
+
+JVM_ENTRY(jintArray, JVM_GetResourceLookupCache(JNIEnv *env, jobject loader, const char *resource_name))
+  JVMWrapper("JVM_GetResourceLookupCache");
+#if INCLUDE_CDS
+  return ClassLoaderExt::get_lookup_cache(env, loader, resource_name, CHECK_NULL);
+#else
+  return NULL;
+#endif
+JVM_END
+
+
 // Returns a class loaded by the bootstrap class loader; or null
 // if not found.  ClassNotFoundException is not thrown.
 //
@@ -1034,7 +1096,15 @@
                                                               h_loader,
                                                               Handle(),
                                                               CHECK_NULL);
-
+#if INCLUDE_CDS
+  if (k == NULL) {
+    // If the class is not already loaded, try to see if it's in the shared
+    // archive for the current classloader (h_loader).
+    instanceKlassHandle ik = SystemDictionaryShared::find_or_load_shared_class(
+        klass_name, h_loader, CHECK_NULL);
+    k = ik();
+  }
+#endif
   return (k == NULL) ? NULL :
             (jclass) JNIHandles::make_local(env, k->java_mirror());
 JVM_END
@@ -4476,7 +4546,7 @@
 
 JVM_ENTRY(void, JVM_GetVersionInfo(JNIEnv* env, jvm_version_info* info, size_t info_size))
 {
-  memset(info, 0, sizeof(info_size));
+  memset(info, 0, info_size);
 
   info->jvm_version = Abstract_VM_Version::jvm_version();
   info->update_version = 0;          /* 0 in HotSpot Express VM */
--- a/src/share/vm/prims/jvm.h	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/jvm.h	Fri Apr 10 16:58:26 2015 -0700
@@ -1548,6 +1548,31 @@
 JNIEXPORT jobjectArray JNICALL
 JVM_GetThreadStateNames(JNIEnv* env, jint javaThreadState, jintArray values);
 
+/*
+ * Returns true if the JVM's lookup cache indicates that this class is
+ * known to NOT exist for the given loader.
+ */
+JNIEXPORT jboolean JNICALL
+JVM_KnownToNotExist(JNIEnv *env, jobject loader, const char *classname);
+
+/*
+ * Returns an array of all URLs that are stored in the JVM's lookup cache
+ * for the given loader. NULL if the lookup cache is unavailable.
+ */
+JNIEXPORT jobjectArray JNICALL
+JVM_GetResourceLookupCacheURLs(JNIEnv *env, jobject loader);
+
+/*
+ * Returns an array of all URLs that *may* contain the resource_name for the
+ * given loader. This function returns an integer array, each element
+ * of which can be used to index into the array returned by
+ * JVM_GetResourceLookupCacheURLs of the same loader to determine the
+ * URLs.
+ */
+JNIEXPORT jintArray JNICALL
+JVM_GetResourceLookupCache(JNIEnv *env, jobject loader, const char *resource_name);
+
+
 /* =========================================================================
  * The following defines a private JVM interface that the JDK can query
  * for the JVM version and capabilities.  sun.misc.Version defines
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -54,6 +54,7 @@
 void JvmtiClassFileReconstituter::write_field_infos() {
   HandleMark hm(thread());
   Array<AnnotationArray*>* fields_anno = ikh()->fields_annotations();
+  Array<AnnotationArray*>* fields_type_anno = ikh()->fields_type_annotations();
 
   // Compute the real number of Java fields
   int java_fields = ikh()->java_fields_count();
@@ -68,6 +69,7 @@
     // int offset = ikh()->field_offset( index );
     int generic_signature_index = fs.generic_signature_index();
     AnnotationArray* anno = fields_anno == NULL ? NULL : fields_anno->at(fs.index());
+    AnnotationArray* type_anno = fields_type_anno == NULL ? NULL : fields_type_anno->at(fs.index());
 
     // JVMSpec|   field_info {
     // JVMSpec|         u2 access_flags;
@@ -93,6 +95,9 @@
     if (anno != NULL) {
       ++attr_count;     // has RuntimeVisibleAnnotations attribute
     }
+    if (type_anno != NULL) {
+      ++attr_count;     // has RuntimeVisibleTypeAnnotations attribute
+    }
 
     write_u2(attr_count);
 
@@ -110,6 +115,9 @@
     if (anno != NULL) {
       write_annotations_attribute("RuntimeVisibleAnnotations", anno);
     }
+    if (type_anno != NULL) {
+      write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno);
+    }
   }
 }
 
@@ -550,6 +558,7 @@
   AnnotationArray* anno = method->annotations();
   AnnotationArray* param_anno = method->parameter_annotations();
   AnnotationArray* default_anno = method->annotation_default();
+  AnnotationArray* type_anno = method->type_annotations();
 
   // skip generated default interface methods
   if (method->is_overpass()) {
@@ -585,6 +594,9 @@
   if (param_anno != NULL) {
     ++attr_count;     // has RuntimeVisibleParameterAnnotations attribute
   }
+  if (type_anno != NULL) {
+    ++attr_count;     // has RuntimeVisibleTypeAnnotations attribute
+  }
 
   write_u2(attr_count);
   if (const_method->code_size() > 0) {
@@ -609,6 +621,9 @@
   if (param_anno != NULL) {
     write_annotations_attribute("RuntimeVisibleParameterAnnotations", param_anno);
   }
+  if (type_anno != NULL) {
+    write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno);
+  }
 }
 
 // Write the class attributes portion of ClassFile structure
@@ -618,6 +633,7 @@
   u2 inner_classes_length = inner_classes_attribute_length();
   Symbol* generic_signature = ikh()->generic_signature();
   AnnotationArray* anno = ikh()->class_annotations();
+  AnnotationArray* type_anno = ikh()->class_type_annotations();
 
   int attr_count = 0;
   if (generic_signature != NULL) {
@@ -635,6 +651,9 @@
   if (anno != NULL) {
     ++attr_count;     // has RuntimeVisibleAnnotations attribute
   }
+  if (type_anno != NULL) {
+    ++attr_count;     // has RuntimeVisibleTypeAnnotations attribute
+  }
   if (cpool()->operands() != NULL) {
     ++attr_count;
   }
@@ -656,6 +675,9 @@
   if (anno != NULL) {
     write_annotations_attribute("RuntimeVisibleAnnotations", anno);
   }
+  if (type_anno != NULL) {
+    write_annotations_attribute("RuntimeVisibleTypeAnnotations", type_anno);
+  }
   if (cpool()->operands() != NULL) {
     write_bootstrapmethod_attribute();
   }
--- a/src/share/vm/prims/jvmtiEnv.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/jvmtiEnv.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/classLoaderExt.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "interpreter/bytecodeStream.hpp"
@@ -475,7 +476,7 @@
     if (TraceClassLoading) {
       tty->print_cr("[Opened %s]", zip_entry->name());
     }
-    ClassLoader::add_to_list(zip_entry);
+    ClassLoaderExt::append_boot_classpath(zip_entry);
     return JVMTI_ERROR_NONE;
   } else {
     return JVMTI_ERROR_WRONG_PHASE;
--- a/src/share/vm/prims/jvmtiEnvBase.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/jvmtiEnvBase.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -41,6 +41,7 @@
 #include "runtime/objectMonitor.hpp"
 #include "runtime/objectMonitor.inline.hpp"
 #include "runtime/signature.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vframe.hpp"
 #include "runtime/vframe_hp.hpp"
 #include "runtime/vmThread.hpp"
--- a/src/share/vm/prims/jvmtiExport.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/jvmtiExport.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -47,7 +47,7 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/objectMonitor.hpp"
 #include "runtime/objectMonitor.inline.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vframe.hpp"
 #include "services/attachListener.hpp"
 #include "services/serviceUtil.hpp"
--- a/src/share/vm/prims/jvmtiRawMonitor.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/jvmtiRawMonitor.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,7 +25,8 @@
 #include "precompiled.hpp"
 #include "prims/jvmtiRawMonitor.hpp"
 #include "runtime/interfaceSupport.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/orderAccess.inline.hpp"
+#include "runtime/thread.inline.hpp"
 
 GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiRawMonitor*>(1,true);
 
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -135,7 +135,7 @@
 
   // Mark methods seen on stack and everywhere else so old methods are not
   // cleaned up if they're on the stack.
-  MetadataOnStackMark md_on_stack;
+  MetadataOnStackMark md_on_stack(true);
   HandleMark hm(thread);   // make sure any handles created are deleted
                            // before the stack walk again.
 
@@ -1569,6 +1569,29 @@
     return false;
   }
 
+  // rewrite constant pool references in the class_type_annotations:
+  if (!rewrite_cp_refs_in_class_type_annotations(scratch_class, THREAD)) {
+    // propagate failure back to caller
+    return false;
+  }
+
+  // rewrite constant pool references in the fields_type_annotations:
+  if (!rewrite_cp_refs_in_fields_type_annotations(scratch_class, THREAD)) {
+    // propagate failure back to caller
+    return false;
+  }
+
+  // rewrite constant pool references in the methods_type_annotations:
+  if (!rewrite_cp_refs_in_methods_type_annotations(scratch_class, THREAD)) {
+    // propagate failure back to caller
+    return false;
+  }
+
+  // There can be type annotations in the Code part of a method_info attribute.
+  // These annotations are not accessible, even by reflection.
+  // Currently they are not even parsed by the ClassFileParser.
+  // If runtime access is added they will also need to be rewritten.
+
   // rewrite source file name index:
   u2 source_file_name_idx = scratch_class->source_file_name_index();
   if (source_file_name_idx != 0) {
@@ -2239,6 +2262,588 @@
 } // end rewrite_cp_refs_in_methods_default_annotations()
 
 
+// Rewrite constant pool references in a class_type_annotations field.
+bool VM_RedefineClasses::rewrite_cp_refs_in_class_type_annotations(
+       instanceKlassHandle scratch_class, TRAPS) {
+
+  AnnotationArray* class_type_annotations = scratch_class->class_type_annotations();
+  if (class_type_annotations == NULL || class_type_annotations->length() == 0) {
+    // no class_type_annotations so nothing to do
+    return true;
+  }
+
+  RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+    ("class_type_annotations length=%d", class_type_annotations->length()));
+
+  int byte_i = 0;  // byte index into class_type_annotations
+  return rewrite_cp_refs_in_type_annotations_typeArray(class_type_annotations,
+      byte_i, "ClassFile", THREAD);
+} // end rewrite_cp_refs_in_class_type_annotations()
+
+
+// Rewrite constant pool references in a fields_type_annotations field.
+bool VM_RedefineClasses::rewrite_cp_refs_in_fields_type_annotations(
+       instanceKlassHandle scratch_class, TRAPS) {
+
+  Array<AnnotationArray*>* fields_type_annotations = scratch_class->fields_type_annotations();
+  if (fields_type_annotations == NULL || fields_type_annotations->length() == 0) {
+    // no fields_type_annotations so nothing to do
+    return true;
+  }
+
+  RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+    ("fields_type_annotations length=%d", fields_type_annotations->length()));
+
+  for (int i = 0; i < fields_type_annotations->length(); i++) {
+    AnnotationArray* field_type_annotations = fields_type_annotations->at(i);
+    if (field_type_annotations == NULL || field_type_annotations->length() == 0) {
+      // this field does not have any annotations so skip it
+      continue;
+    }
+
+    int byte_i = 0;  // byte index into field_type_annotations
+    if (!rewrite_cp_refs_in_type_annotations_typeArray(field_type_annotations,
+           byte_i, "field_info", THREAD)) {
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("bad field_type_annotations at %d", i));
+      // propagate failure back to caller
+      return false;
+    }
+  }
+
+  return true;
+} // end rewrite_cp_refs_in_fields_type_annotations()
+
+
+// Rewrite constant pool references in a methods_type_annotations field.
+bool VM_RedefineClasses::rewrite_cp_refs_in_methods_type_annotations(
+       instanceKlassHandle scratch_class, TRAPS) {
+
+  for (int i = 0; i < scratch_class->methods()->length(); i++) {
+    Method* m = scratch_class->methods()->at(i);
+    AnnotationArray* method_type_annotations = m->constMethod()->type_annotations();
+
+    if (method_type_annotations == NULL || method_type_annotations->length() == 0) {
+      // this method does not have any annotations so skip it
+      continue;
+    }
+
+    RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("methods type_annotations length=%d", method_type_annotations->length()));
+
+    int byte_i = 0;  // byte index into method_type_annotations
+    if (!rewrite_cp_refs_in_type_annotations_typeArray(method_type_annotations,
+           byte_i, "method_info", THREAD)) {
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("bad method_type_annotations at %d", i));
+      // propagate failure back to caller
+      return false;
+    }
+  }
+
+  return true;
+} // end rewrite_cp_refs_in_methods_type_annotations()
+
+
+// Rewrite constant pool references in a type_annotations
+// field. This "structure" is adapted from the
+// RuntimeVisibleTypeAnnotations_attribute described in
+// section 4.7.20 of the Java SE 8 Edition of the VM spec:
+//
+// type_annotations_typeArray {
+//   u2              num_annotations;
+//   type_annotation annotations[num_annotations];
+// }
+//
+bool VM_RedefineClasses::rewrite_cp_refs_in_type_annotations_typeArray(
+       AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
+       const char * location_mesg, TRAPS) {
+
+  if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+    // not enough room for num_annotations field
+    RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+      ("length() is too small for num_annotations field"));
+    return false;
+  }
+
+  u2 num_annotations = Bytes::get_Java_u2((address)
+                         type_annotations_typeArray->adr_at(byte_i_ref));
+  byte_i_ref += 2;
+
+  RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+    ("num_type_annotations=%d", num_annotations));
+
+  int calc_num_annotations = 0;
+  for (; calc_num_annotations < num_annotations; calc_num_annotations++) {
+    if (!rewrite_cp_refs_in_type_annotation_struct(type_annotations_typeArray,
+           byte_i_ref, location_mesg, THREAD)) {
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("bad type_annotation_struct at %d", calc_num_annotations));
+      // propagate failure back to caller
+      return false;
+    }
+  }
+  assert(num_annotations == calc_num_annotations, "sanity check");
+
+  if (byte_i_ref != type_annotations_typeArray->length()) {
+    RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+      ("read wrong amount of bytes at end of processing "
+       "type_annotations_typeArray (%d of %d bytes were read)",
+       byte_i_ref, type_annotations_typeArray->length()));
+    return false;
+  }
+
+  return true;
+} // end rewrite_cp_refs_in_type_annotations_typeArray()
+
+
+// Rewrite constant pool references in a type_annotation
+// field. This "structure" is adapted from the
+// RuntimeVisibleTypeAnnotations_attribute described in
+// section 4.7.20 of the Java SE 8 Edition of the VM spec:
+//
+// type_annotation {
+//   u1 target_type;
+//   union {
+//     type_parameter_target;
+//     supertype_target;
+//     type_parameter_bound_target;
+//     empty_target;
+//     method_formal_parameter_target;
+//     throws_target;
+//     localvar_target;
+//     catch_target;
+//     offset_target;
+//     type_argument_target;
+//   } target_info;
+//   type_path target_path;
+//   annotation anno;
+// }
+//
+bool VM_RedefineClasses::rewrite_cp_refs_in_type_annotation_struct(
+       AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
+       const char * location_mesg, TRAPS) {
+
+  if (!skip_type_annotation_target(type_annotations_typeArray,
+         byte_i_ref, location_mesg, THREAD)) {
+    return false;
+  }
+
+  if (!skip_type_annotation_type_path(type_annotations_typeArray,
+         byte_i_ref, THREAD)) {
+    return false;
+  }
+
+  if (!rewrite_cp_refs_in_annotation_struct(type_annotations_typeArray,
+         byte_i_ref, THREAD)) {
+    return false;
+  }
+
+  return true;
+} // end rewrite_cp_refs_in_type_annotation_struct()
+
+
+// Read, verify and skip over the target_type and target_info part
+// so that rewriting can continue in the later parts of the struct.
+//
+// u1 target_type;
+// union {
+//   type_parameter_target;
+//   supertype_target;
+//   type_parameter_bound_target;
+//   empty_target;
+//   method_formal_parameter_target;
+//   throws_target;
+//   localvar_target;
+//   catch_target;
+//   offset_target;
+//   type_argument_target;
+// } target_info;
+//
+bool VM_RedefineClasses::skip_type_annotation_target(
+       AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
+       const char * location_mesg, TRAPS) {
+
+  if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
+    // not enough room for a target_type let alone the rest of a type_annotation
+    RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+      ("length() is too small for a target_type"));
+    return false;
+  }
+
+  u1 target_type = type_annotations_typeArray->at(byte_i_ref);
+  byte_i_ref += 1;
+  RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("target_type=0x%.2x", target_type));
+  RC_TRACE_WITH_THREAD(0x02000000, THREAD, ("location=%s", location_mesg));
+
+  // Skip over target_info
+  switch (target_type) {
+    case 0x00:
+    // kind: type parameter declaration of generic class or interface
+    // location: ClassFile
+    case 0x01:
+    // kind: type parameter declaration of generic method or constructor
+    // location: method_info
+
+    {
+      // struct:
+      // type_parameter_target {
+      //   u1 type_parameter_index;
+      // }
+      //
+      if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
+        RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+          ("length() is too small for a type_parameter_target"));
+        return false;
+      }
+
+      u1 type_parameter_index = type_annotations_typeArray->at(byte_i_ref);
+      byte_i_ref += 1;
+
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("type_parameter_target: type_parameter_index=%d",
+         type_parameter_index));
+    } break;
+
+    case 0x10:
+    // kind: type in extends clause of class or interface declaration
+    //       (including the direct superclass of an anonymous class declaration),
+    //       or in implements clause of interface declaration
+    // location: ClassFile
+
+    {
+      // struct:
+      // supertype_target {
+      //   u2 supertype_index;
+      // }
+      //
+      if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+        RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+          ("length() is too small for a supertype_target"));
+        return false;
+      }
+
+      u2 supertype_index = Bytes::get_Java_u2((address)
+                             type_annotations_typeArray->adr_at(byte_i_ref));
+      byte_i_ref += 2;
+
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("supertype_target: supertype_index=%d", supertype_index));
+    } break;
+
+    case 0x11:
+    // kind: type in bound of type parameter declaration of generic class or interface
+    // location: ClassFile
+    case 0x12:
+    // kind: type in bound of type parameter declaration of generic method or constructor
+    // location: method_info
+
+    {
+      // struct:
+      // type_parameter_bound_target {
+      //   u1 type_parameter_index;
+      //   u1 bound_index;
+      // }
+      //
+      if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+        RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+          ("length() is too small for a type_parameter_bound_target"));
+        return false;
+      }
+
+      u1 type_parameter_index = type_annotations_typeArray->at(byte_i_ref);
+      byte_i_ref += 1;
+      u1 bound_index = type_annotations_typeArray->at(byte_i_ref);
+      byte_i_ref += 1;
+
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("type_parameter_bound_target: type_parameter_index=%d, bound_index=%d",
+         type_parameter_index, bound_index));
+    } break;
+
+    case 0x13:
+    // kind: type in field declaration
+    // location: field_info
+    case 0x14:
+    // kind: return type of method, or type of newly constructed object
+    // location: method_info
+    case 0x15:
+    // kind: receiver type of method or constructor
+    // location: method_info
+
+    {
+      // struct:
+      // empty_target {
+      // }
+      //
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("empty_target"));
+    } break;
+
+    case 0x16:
+    // kind: type in formal parameter declaration of method, constructor, or lambda expression
+    // location: method_info
+
+    {
+      // struct:
+      // formal_parameter_target {
+      //   u1 formal_parameter_index;
+      // }
+      //
+      if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
+        RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+          ("length() is too small for a formal_parameter_target"));
+        return false;
+      }
+
+      u1 formal_parameter_index = type_annotations_typeArray->at(byte_i_ref);
+      byte_i_ref += 1;
+
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("formal_parameter_target: formal_parameter_index=%d",
+         formal_parameter_index));
+    } break;
+
+    case 0x17:
+    // kind: type in throws clause of method or constructor
+    // location: method_info
+
+    {
+      // struct:
+      // throws_target {
+      //   u2 throws_type_index
+      // }
+      //
+      if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+        RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+          ("length() is too small for a throws_target"));
+        return false;
+      }
+
+      u2 throws_type_index = Bytes::get_Java_u2((address)
+                               type_annotations_typeArray->adr_at(byte_i_ref));
+      byte_i_ref += 2;
+
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("throws_target: throws_type_index=%d", throws_type_index));
+    } break;
+
+    case 0x40:
+    // kind: type in local variable declaration
+    // location: Code
+    case 0x41:
+    // kind: type in resource variable declaration
+    // location: Code
+
+    {
+      // struct:
+      // localvar_target {
+      //   u2 table_length;
+      //   struct {
+      //     u2 start_pc;
+      //     u2 length;
+      //     u2 index;
+      //   } table[table_length];
+      // }
+      //
+      if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+        // not enough room for a table_length let alone the rest of a localvar_target
+        RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+          ("length() is too small for a localvar_target table_length"));
+        return false;
+      }
+
+      u2 table_length = Bytes::get_Java_u2((address)
+                          type_annotations_typeArray->adr_at(byte_i_ref));
+      byte_i_ref += 2;
+
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("localvar_target: table_length=%d", table_length));
+
+      int table_struct_size = 2 + 2 + 2; // 3 u2 variables per table entry
+      int table_size = table_length * table_struct_size;
+
+      if ((byte_i_ref + table_size) > type_annotations_typeArray->length()) {
+        // not enough room for a table
+        RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+          ("length() is too small for a table array of length %d", table_length));
+        return false;
+      }
+
+      // Skip over table
+      byte_i_ref += table_size;
+    } break;
+
+    case 0x42:
+    // kind: type in exception parameter declaration
+    // location: Code
+
+    {
+      // struct:
+      // catch_target {
+      //   u2 exception_table_index;
+      // }
+      //
+      if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+        RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+          ("length() is too small for a catch_target"));
+        return false;
+      }
+
+      u2 exception_table_index = Bytes::get_Java_u2((address)
+                                   type_annotations_typeArray->adr_at(byte_i_ref));
+      byte_i_ref += 2;
+
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("catch_target: exception_table_index=%d", exception_table_index));
+    } break;
+
+    case 0x43:
+    // kind: type in instanceof expression
+    // location: Code
+    case 0x44:
+    // kind: type in new expression
+    // location: Code
+    case 0x45:
+    // kind: type in method reference expression using ::new
+    // location: Code
+    case 0x46:
+    // kind: type in method reference expression using ::Identifier
+    // location: Code
+
+    {
+      // struct:
+      // offset_target {
+      //   u2 offset;
+      // }
+      //
+      if ((byte_i_ref + 2) > type_annotations_typeArray->length()) {
+        RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+          ("length() is too small for a offset_target"));
+        return false;
+      }
+
+      u2 offset = Bytes::get_Java_u2((address)
+                    type_annotations_typeArray->adr_at(byte_i_ref));
+      byte_i_ref += 2;
+
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("offset_target: offset=%d", offset));
+    } break;
+
+    case 0x47:
+    // kind: type in cast expression
+    // location: Code
+    case 0x48:
+    // kind: type argument for generic constructor in new expression or
+    //       explicit constructor invocation statement
+    // location: Code
+    case 0x49:
+    // kind: type argument for generic method in method invocation expression
+    // location: Code
+    case 0x4A:
+    // kind: type argument for generic constructor in method reference expression using ::new
+    // location: Code
+    case 0x4B:
+    // kind: type argument for generic method in method reference expression using ::Identifier
+    // location: Code
+
+    {
+      // struct:
+      // type_argument_target {
+      //   u2 offset;
+      //   u1 type_argument_index;
+      // }
+      //
+      if ((byte_i_ref + 3) > type_annotations_typeArray->length()) {
+        RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+          ("length() is too small for a type_argument_target"));
+        return false;
+      }
+
+      u2 offset = Bytes::get_Java_u2((address)
+                    type_annotations_typeArray->adr_at(byte_i_ref));
+      byte_i_ref += 2;
+      u1 type_argument_index = type_annotations_typeArray->at(byte_i_ref);
+      byte_i_ref += 1;
+
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("type_argument_target: offset=%d, type_argument_index=%d",
+         offset, type_argument_index));
+    } break;
+
+    default:
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("unknown target_type"));
+#ifdef ASSERT
+      ShouldNotReachHere();
+#endif
+      return false;
+  }
+
+  return true;
+} // end skip_type_annotation_target()
+
+
+// Read, verify and skip over the type_path part so that rewriting
+// can continue in the later parts of the struct.
+//
+// type_path {
+//   u1 path_length;
+//   {
+//     u1 type_path_kind;
+//     u1 type_argument_index;
+//   } path[path_length];
+// }
+//
+bool VM_RedefineClasses::skip_type_annotation_type_path(
+       AnnotationArray* type_annotations_typeArray, int &byte_i_ref, TRAPS) {
+
+  if ((byte_i_ref + 1) > type_annotations_typeArray->length()) {
+    // not enough room for a path_length let alone the rest of the type_path
+    RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+      ("length() is too small for a type_path"));
+    return false;
+  }
+
+  u1 path_length = type_annotations_typeArray->at(byte_i_ref);
+  byte_i_ref += 1;
+
+  RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+    ("type_path: path_length=%d", path_length));
+
+  int calc_path_length = 0;
+  for (; calc_path_length < path_length; calc_path_length++) {
+    if ((byte_i_ref + 1 + 1) > type_annotations_typeArray->length()) {
+      // not enough room for a path
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("length() is too small for path entry %d of %d",
+         calc_path_length, path_length));
+      return false;
+    }
+
+    u1 type_path_kind = type_annotations_typeArray->at(byte_i_ref);
+    byte_i_ref += 1;
+    u1 type_argument_index = type_annotations_typeArray->at(byte_i_ref);
+    byte_i_ref += 1;
+
+    RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+      ("type_path: path[%d]: type_path_kind=%d, type_argument_index=%d",
+       calc_path_length, type_path_kind, type_argument_index));
+
+    if (type_path_kind > 3 || (type_path_kind != 3 && type_argument_index != 0)) {
+      // not enough room for a path
+      RC_TRACE_WITH_THREAD(0x02000000, THREAD,
+        ("inconsistent type_path values"));
+      return false;
+    }
+  }
+  assert(path_length == calc_path_length, "sanity check");
+
+  return true;
+} // end skip_type_annotation_type_path()
+
+
 // Rewrite constant pool references in the method's stackmap table.
 // These "structures" are adapted from the StackMapTable_attribute that
 // is described in section 4.8.4 of the 6.0 version of the VM spec
@@ -3223,23 +3828,6 @@
 
 void VM_RedefineClasses::swap_annotations(instanceKlassHandle the_class,
                                           instanceKlassHandle scratch_class) {
-  // Since there is currently no rewriting of type annotations indexes
-  // into the CP, we null out type annotations on scratch_class before
-  // we swap annotations with the_class rather than facing the
-  // possibility of shipping annotations with broken indexes to
-  // Java-land.
-  ClassLoaderData* loader_data = scratch_class->class_loader_data();
-  AnnotationArray* new_class_type_annotations = scratch_class->class_type_annotations();
-  if (new_class_type_annotations != NULL) {
-    MetadataFactory::free_array<u1>(loader_data, new_class_type_annotations);
-    scratch_class->annotations()->set_class_type_annotations(NULL);
-  }
-  Array<AnnotationArray*>* new_field_type_annotations = scratch_class->fields_type_annotations();
-  if (new_field_type_annotations != NULL) {
-    Annotations::free_contents(loader_data, new_field_type_annotations);
-    scratch_class->annotations()->set_fields_type_annotations(NULL);
-  }
-
   // Swap annotation fields values
   Annotations* old_annotations = the_class->annotations();
   the_class->set_annotations(scratch_class->annotations());
--- a/src/share/vm/prims/jvmtiRedefineClasses.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/jvmtiRedefineClasses.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -457,6 +457,17 @@
     instanceKlassHandle scratch_class, TRAPS);
   bool rewrite_cp_refs_in_element_value(
     AnnotationArray* class_annotations, int &byte_i_ref, TRAPS);
+  bool rewrite_cp_refs_in_type_annotations_typeArray(
+    AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
+    const char * location_mesg, TRAPS);
+  bool rewrite_cp_refs_in_type_annotation_struct(
+    AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
+    const char * location_mesg, TRAPS);
+  bool skip_type_annotation_target(
+    AnnotationArray* type_annotations_typeArray, int &byte_i_ref,
+    const char * location_mesg, TRAPS);
+  bool skip_type_annotation_type_path(
+    AnnotationArray* type_annotations_typeArray, int &byte_i_ref, TRAPS);
   bool rewrite_cp_refs_in_fields_annotations(
     instanceKlassHandle scratch_class, TRAPS);
   void rewrite_cp_refs_in_method(methodHandle method,
@@ -468,6 +479,12 @@
     instanceKlassHandle scratch_class, TRAPS);
   bool rewrite_cp_refs_in_methods_parameter_annotations(
     instanceKlassHandle scratch_class, TRAPS);
+  bool rewrite_cp_refs_in_class_type_annotations(
+    instanceKlassHandle scratch_class, TRAPS);
+  bool rewrite_cp_refs_in_fields_type_annotations(
+    instanceKlassHandle scratch_class, TRAPS);
+  bool rewrite_cp_refs_in_methods_type_annotations(
+    instanceKlassHandle scratch_class, TRAPS);
   void rewrite_cp_refs_in_stack_map_table(methodHandle method, TRAPS);
   void rewrite_cp_refs_in_verification_type_info(
          address& stackmap_addr_ref, address stackmap_end, u2 frame_i,
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -3019,7 +3019,7 @@
 
   // If there are any non-perm roots in the code cache, visit them.
   blk.set_kind(JVMTI_HEAP_REFERENCE_OTHER);
-  CodeBlobToOopClosure look_in_blobs(&blk, false);
+  CodeBlobToOopClosure look_in_blobs(&blk, !CodeBlobToOopClosure::FixRelocations);
   CodeCache::scavenge_root_nmethods_do(&look_in_blobs);
 
   return true;
--- a/src/share/vm/prims/jvmtiThreadState.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/jvmtiThreadState.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -27,6 +27,7 @@
 
 #include "prims/jvmtiEnvThreadState.hpp"
 #include "prims/jvmtiThreadState.hpp"
+#include "runtime/thread.inline.hpp"
 
 // JvmtiEnvThreadStateIterator implementation
 
--- a/src/share/vm/prims/methodHandles.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/methodHandles.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -29,7 +29,6 @@
 #include "interpreter/oopMapCache.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/oopFactory.hpp"
-#include "prims/jvmtiRedefineClassesTrace.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/javaCalls.hpp"
@@ -271,9 +270,12 @@
   // This is done eagerly, since it is readily available without
   // constructing any new objects.
   // TO DO: maybe intern mname_oop
-  m->method_holder()->add_member_name(m->method_idnum(), mname);
-
-  return mname();
+  if (m->method_holder()->add_member_name(mname)) {
+    return mname();
+  } else {
+    // Redefinition caused this to fail.  Return NULL (and an exception?)
+    return NULL;
+  }
 }
 
 oop MethodHandles::init_field_MemberName(Handle mname, fieldDescriptor& fd, bool is_setter) {
@@ -946,63 +948,27 @@
   }
 }
 
-void MemberNameTable::add_member_name(int index, jweak mem_name_wref) {
+void MemberNameTable::add_member_name(jweak mem_name_wref) {
   assert_locked_or_safepoint(MemberNameTable_lock);
-  this->at_put_grow(index, mem_name_wref);
-}
-
-// Return a member name oop or NULL.
-oop MemberNameTable::get_member_name(int index) {
-  assert_locked_or_safepoint(MemberNameTable_lock);
-
-  jweak ref = this->at(index);
-  oop mem_name = JNIHandles::resolve(ref);
-  return mem_name;
+  this->push(mem_name_wref);
 }
 
 #if INCLUDE_JVMTI
-oop MemberNameTable::find_member_name_by_method(Method* old_method) {
-  assert_locked_or_safepoint(MemberNameTable_lock);
-  oop found = NULL;
-  int len = this->length();
-
-  for (int idx = 0; idx < len; idx++) {
-    oop mem_name = JNIHandles::resolve(this->at(idx));
-    if (mem_name == NULL) {
-      continue;
-    }
-    Method* method = (Method*)java_lang_invoke_MemberName::vmtarget(mem_name);
-    if (method == old_method) {
-      found = mem_name;
-      break;
-    }
-  }
-  return found;
-}
-
-// It is called at safepoint only
+// It is called at safepoint only for RedefineClasses
 void MemberNameTable::adjust_method_entries(Method** old_methods, Method** new_methods,
                                             int methods_length, bool *trace_name_printed) {
   assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
-  // search the MemberNameTable for uses of either obsolete or EMCP methods
+  // For each redefined method
   for (int j = 0; j < methods_length; j++) {
     Method* old_method = old_methods[j];
     Method* new_method = new_methods[j];
-    oop mem_name = find_member_name_by_method(old_method);
-    if (mem_name != NULL) {
-      java_lang_invoke_MemberName::adjust_vmtarget(mem_name, new_method);
 
-      if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
-        if (!(*trace_name_printed)) {
-          // RC_TRACE_MESG macro has an embedded ResourceMark
-          RC_TRACE_MESG(("adjust: name=%s",
-                         old_method->method_holder()->external_name()));
-          *trace_name_printed = true;
-        }
-        // RC_TRACE macro has an embedded ResourceMark
-        RC_TRACE(0x00400000, ("MemberName method update: %s(%s)",
-                              new_method->name()->as_C_string(),
-                              new_method->signature()->as_C_string()));
+    // search the MemberNameTable for uses of either obsolete or EMCP methods
+    for (int idx = 0; idx < length(); idx++) {
+      oop mem_name = JNIHandles::resolve(this->at(idx));
+      if (mem_name != NULL) {
+        java_lang_invoke_MemberName::adjust_vmtarget(mem_name, old_method, new_method,
+                                                     trace_name_printed);
       }
     }
   }
--- a/src/share/vm/prims/methodHandles.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/methodHandles.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -236,18 +236,14 @@
  public:
   MemberNameTable(int methods_cnt);
   ~MemberNameTable();
-  void add_member_name(int index, jweak mem_name_ref);
-  oop  get_member_name(int index);
+  void add_member_name(jweak mem_name_ref);
 
 #if INCLUDE_JVMTI
- public:
   // RedefineClasses() API support:
   // If a MemberName refers to old_method then update it
   // to refer to new_method.
   void adjust_method_entries(Method** old_methods, Method** new_methods,
                              int methods_length, bool *trace_name_printed);
- private:
-  oop find_member_name_by_method(Method* old_method);
 #endif // INCLUDE_JVMTI
 };
 
--- a/src/share/vm/prims/unsafe.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/unsafe.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -33,6 +33,8 @@
 #include "prims/jvm.h"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
+#include "runtime/prefetch.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/synchronizer.hpp"
 #include "services/threadService.hpp"
@@ -951,6 +953,14 @@
   }
 UNSAFE_END
 
+static jobject get_class_loader(JNIEnv* env, jclass cls) {
+  if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(cls))) {
+    return NULL;
+  }
+  Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls));
+  oop loader = k->class_loader();
+  return JNIHandles::make_local(env, loader);
+}
 
 UNSAFE_ENTRY(jclass, Unsafe_DefineClass0(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length))
   UnsafeWrapper("Unsafe_DefineClass");
@@ -959,7 +969,7 @@
 
     int depthFromDefineClass0 = 1;
     jclass  caller = JVM_GetCallerClass(env, depthFromDefineClass0);
-    jobject loader = (caller == NULL) ? NULL : JVM_GetClassLoader(env, caller);
+    jobject loader = (caller == NULL) ? NULL : get_class_loader(env, caller);
     jobject pd     = (caller == NULL) ? NULL : JVM_GetProtectionDomain(env, caller);
 
     return Unsafe_DefineClass_impl(env, name, data, offset, length, loader, pd);
--- a/src/share/vm/prims/whitebox.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/whitebox.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 
+#include "memory/metadataFactory.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
 
@@ -36,21 +37,26 @@
 #include "runtime/arguments.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/os.hpp"
+#include "utilities/array.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/exceptions.hpp"
 
 #if INCLUDE_ALL_GCS
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #endif // INCLUDE_ALL_GCS
 
-#ifdef INCLUDE_NMT
+#if INCLUDE_NMT
+#include "services/mallocSiteTable.hpp"
 #include "services/memTracker.hpp"
+#include "utilities/nativeCallStack.hpp"
 #endif // INCLUDE_NMT
 
 #include "compiler/compileBroker.hpp"
+#include "jvmtifiles/jvmtiEnv.hpp"
 #include "runtime/compilationPolicy.hpp"
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
@@ -99,6 +105,51 @@
   return closure.found();
 WB_END
 
+WB_ENTRY(jboolean, WB_ClassKnownToNotExist(JNIEnv* env, jobject o, jobject loader, jstring name))
+  ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
+  const char* class_name = env->GetStringUTFChars(name, NULL);
+  jboolean result = JVM_KnownToNotExist(env, loader, class_name);
+  env->ReleaseStringUTFChars(name, class_name);
+  return result;
+WB_END
+
+WB_ENTRY(jobjectArray, WB_GetLookupCacheURLs(JNIEnv* env, jobject o, jobject loader))
+  ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
+  return JVM_GetResourceLookupCacheURLs(env, loader);
+WB_END
+
+WB_ENTRY(jintArray, WB_GetLookupCacheMatches(JNIEnv* env, jobject o, jobject loader, jstring name))
+  ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
+  const char* resource_name = env->GetStringUTFChars(name, NULL);
+  jintArray result = JVM_GetResourceLookupCache(env, loader, resource_name);
+
+  env->ReleaseStringUTFChars(name, resource_name);
+  return result;
+WB_END
+
+WB_ENTRY(void, WB_AddToBootstrapClassLoaderSearch(JNIEnv* env, jobject o, jstring segment)) {
+#if INCLUDE_JVMTI
+  ResourceMark rm;
+  const char* seg = java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(segment));
+  JvmtiEnv* jvmti_env = JvmtiEnv::create_a_jvmti(JVMTI_VERSION);
+  jvmtiError err = jvmti_env->AddToBootstrapClassLoaderSearch(seg);
+  assert(err == JVMTI_ERROR_NONE, "must not fail");
+#endif
+}
+WB_END
+
+WB_ENTRY(void, WB_AddToSystemClassLoaderSearch(JNIEnv* env, jobject o, jstring segment)) {
+#if INCLUDE_JVMTI
+  ResourceMark rm;
+  const char* seg = java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(segment));
+  JvmtiEnv* jvmti_env = JvmtiEnv::create_a_jvmti(JVMTI_VERSION);
+  jvmtiError err = jvmti_env->AddToSystemClassLoaderSearch(seg);
+  assert(err == JVMTI_ERROR_NONE, "must not fail");
+#endif
+}
+WB_END
+
+
 WB_ENTRY(jlong, WB_GetCompressedOopsMaxHeapSize(JNIEnv* env, jobject o)) {
   return (jlong)Arguments::max_heap_for_compressed_oops();
 }
@@ -219,6 +270,30 @@
                                         (size_t) magnitude, (size_t) iterations);
 WB_END
 
+WB_ENTRY(jboolean, WB_isObjectInOldGen(JNIEnv* env, jobject o, jobject obj))
+  oop p = JNIHandles::resolve(obj);
+#if INCLUDE_ALL_GCS
+  if (UseG1GC) {
+    G1CollectedHeap* g1 = G1CollectedHeap::heap();
+    const HeapRegion* hr = g1->heap_region_containing(p);
+    if (hr == NULL) {
+      return false;
+    }
+    return !(hr->is_young());
+  } else if (UseParallelGC) {
+    ParallelScavengeHeap* psh = ParallelScavengeHeap::heap();
+    return !psh->is_in_young(p);
+  }
+#endif // INCLUDE_ALL_GCS
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  return !gch->is_in_young(p);
+WB_END
+
+WB_ENTRY(jlong, WB_GetObjectSize(JNIEnv* env, jobject o, jobject obj))
+  oop p = JNIHandles::resolve(obj);
+  return p->size() * HeapWordSize;
+WB_END
+
 #if INCLUDE_ALL_GCS
 WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
@@ -229,7 +304,7 @@
 
 WB_ENTRY(jlong, WB_G1NumFreeRegions(JNIEnv* env, jobject o))
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
-  size_t nr = g1->free_regions();
+  size_t nr = g1->num_free_regions();
   return (jlong)nr;
 WB_END
 
@@ -249,12 +324,16 @@
 // NMT picks it up correctly
 WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
   jlong addr = 0;
+  addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
+  return addr;
+WB_END
 
-  if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
-    addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
-  }
-
-  return addr;
+// Alloc memory with pseudo call stack. The test can create psudo malloc
+// allocation site to stress the malloc tracking.
+WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size, jint pseudo_stack))
+  address pc = (address)(size_t)pseudo_stack;
+  NativeCallStack stack(&pc, 1);
+  return (jlong)(uintptr_t)os::malloc(size, mtTest, stack);
 WB_END
 
 // Free the memory allocated by NMTAllocTest
@@ -265,10 +344,8 @@
 WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
   jlong addr = 0;
 
-  if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
     addr = (jlong)(uintptr_t)os::reserve_memory(size);
     MemTracker::record_virtual_memory_type((address)addr, mtTest);
-  }
 
   return addr;
 WB_END
@@ -287,20 +364,46 @@
   os::release_memory((char *)(uintptr_t)addr, size);
 WB_END
 
-// Block until the current generation of NMT data to be merged, used to reliably test the NMT feature
-WB_ENTRY(jboolean, WB_NMTWaitForDataMerge(JNIEnv* env))
-
-  if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
-    return false;
-  }
-
-  return MemTracker::wbtest_wait_for_data_merge();
+WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
+  return MemTracker::tracking_level() == NMT_detail;
 WB_END
 
-WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
-  return MemTracker::tracking_level() == MemTracker::NMT_detail;
+WB_ENTRY(jboolean, WB_NMTChangeTrackingLevel(JNIEnv* env))
+  // Test that we can downgrade NMT levels but not upgrade them.
+  if (MemTracker::tracking_level() == NMT_off) {
+    MemTracker::transition_to(NMT_off);
+    return MemTracker::tracking_level() == NMT_off;
+  } else {
+    assert(MemTracker::tracking_level() == NMT_detail, "Should start out as detail tracking");
+    MemTracker::transition_to(NMT_summary);
+    assert(MemTracker::tracking_level() == NMT_summary, "Should be summary now");
+
+    // Can't go to detail once NMT is set to summary.
+    MemTracker::transition_to(NMT_detail);
+    assert(MemTracker::tracking_level() == NMT_summary, "Should still be summary now");
+
+    // Shutdown sets tracking level to minimal.
+    MemTracker::shutdown();
+    assert(MemTracker::tracking_level() == NMT_minimal, "Should be minimal now");
+
+    // Once the tracking level is minimal, we cannot increase to summary.
+    // The code ignores this request instead of asserting because if the malloc site
+    // table overflows in another thread, it tries to change the code to summary.
+    MemTracker::transition_to(NMT_summary);
+    assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now");
+
+    // Really can never go up to detail, verify that the code would never do this.
+    MemTracker::transition_to(NMT_detail);
+    assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now");
+    return MemTracker::tracking_level() == NMT_minimal;
+  }
 WB_END
 
+WB_ENTRY(jint, WB_NMTGetHashSize(JNIEnv* env, jobject o))
+  int hash_size = MallocSiteTable::hash_buckets();
+  assert(hash_size > 0, "NMT hash_size should be > 0");
+  return (jint)hash_size;
+WB_END
 #endif // INCLUDE_NMT
 
 static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
@@ -322,19 +425,10 @@
   CHECK_JNI_EXCEPTION_(env, result);
   MutexLockerEx mu(Compile_lock);
   methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
-  nmethod* code;
   if (is_osr) {
-    int bci = InvocationEntryBci;
-    while ((code = mh->lookup_osr_nmethod_for(bci, CompLevel_none, false)) != NULL) {
-      code->mark_for_deoptimization();
-      ++result;
-      bci = code->osr_entry_bci() + 1;
-    }
-  } else {
-    code = mh->code();
-  }
-  if (code != NULL) {
-    code->mark_for_deoptimization();
+    result += mh->mark_osr_nmethods();
+  } else if (mh->code() != NULL) {
+    mh->code()->mark_for_deoptimization();
     ++result;
   }
   result += CodeCache::mark_for_deoptimization(mh());
@@ -495,12 +589,165 @@
 
 #ifdef TIERED
     mcs->set_rate(0.0F);
-    mh->set_prev_event_count(0, THREAD);
-    mh->set_prev_time(0, THREAD);
+    mh->set_prev_event_count(0);
+    mh->set_prev_time(0);
 #endif
   }
 WB_END
 
+template <typename T>
+static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAt)(const char*, T*)) {
+  if (name == NULL) {
+    return false;
+  }
+  ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
+  const char* flag_name = env->GetStringUTFChars(name, NULL);
+  bool result = (*TAt)(flag_name, value);
+  env->ReleaseStringUTFChars(name, flag_name);
+  return result;
+}
+
+template <typename T>
+static bool SetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAtPut)(const char*, T*, Flag::Flags)) {
+  if (name == NULL) {
+    return false;
+  }
+  ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
+  const char* flag_name = env->GetStringUTFChars(name, NULL);
+  bool result = (*TAtPut)(flag_name, value, Flag::INTERNAL);
+  env->ReleaseStringUTFChars(name, flag_name);
+  return result;
+}
+
+template <typename T>
+static jobject box(JavaThread* thread, JNIEnv* env, Symbol* name, Symbol* sig, T value) {
+  ResourceMark rm(thread);
+  jclass clazz = env->FindClass(name->as_C_string());
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  jmethodID methodID = env->GetStaticMethodID(clazz,
+        vmSymbols::valueOf_name()->as_C_string(),
+        sig->as_C_string());
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  jobject result = env->CallStaticObjectMethod(clazz, methodID, value);
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  return result;
+}
+
+static jobject booleanBox(JavaThread* thread, JNIEnv* env, jboolean value) {
+  return box(thread, env, vmSymbols::java_lang_Boolean(), vmSymbols::Boolean_valueOf_signature(), value);
+}
+static jobject integerBox(JavaThread* thread, JNIEnv* env, jint value) {
+  return box(thread, env, vmSymbols::java_lang_Integer(), vmSymbols::Integer_valueOf_signature(), value);
+}
+static jobject longBox(JavaThread* thread, JNIEnv* env, jlong value) {
+  return box(thread, env, vmSymbols::java_lang_Long(), vmSymbols::Long_valueOf_signature(), value);
+}
+/* static jobject floatBox(JavaThread* thread, JNIEnv* env, jfloat value) {
+  return box(thread, env, vmSymbols::java_lang_Float(), vmSymbols::Float_valueOf_signature(), value);
+}*/
+static jobject doubleBox(JavaThread* thread, JNIEnv* env, jdouble value) {
+  return box(thread, env, vmSymbols::java_lang_Double(), vmSymbols::Double_valueOf_signature(), value);
+}
+
+WB_ENTRY(jobject, WB_GetBooleanVMFlag(JNIEnv* env, jobject o, jstring name))
+  bool result;
+  if (GetVMFlag <bool> (thread, env, name, &result, &CommandLineFlags::boolAt)) {
+    ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
+    return booleanBox(thread, env, result);
+  }
+  return NULL;
+WB_END
+
+WB_ENTRY(jobject, WB_GetIntxVMFlag(JNIEnv* env, jobject o, jstring name))
+  intx result;
+  if (GetVMFlag <intx> (thread, env, name, &result, &CommandLineFlags::intxAt)) {
+    ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
+    return longBox(thread, env, result);
+  }
+  return NULL;
+WB_END
+
+WB_ENTRY(jobject, WB_GetUintxVMFlag(JNIEnv* env, jobject o, jstring name))
+  uintx result;
+  if (GetVMFlag <uintx> (thread, env, name, &result, &CommandLineFlags::uintxAt)) {
+    ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
+    return longBox(thread, env, result);
+  }
+  return NULL;
+WB_END
+
+WB_ENTRY(jobject, WB_GetUint64VMFlag(JNIEnv* env, jobject o, jstring name))
+  uint64_t result;
+  if (GetVMFlag <uint64_t> (thread, env, name, &result, &CommandLineFlags::uint64_tAt)) {
+    ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
+    return longBox(thread, env, result);
+  }
+  return NULL;
+WB_END
+
+WB_ENTRY(jobject, WB_GetDoubleVMFlag(JNIEnv* env, jobject o, jstring name))
+  double result;
+  if (GetVMFlag <double> (thread, env, name, &result, &CommandLineFlags::doubleAt)) {
+    ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
+    return doubleBox(thread, env, result);
+  }
+  return NULL;
+WB_END
+
+WB_ENTRY(jstring, WB_GetStringVMFlag(JNIEnv* env, jobject o, jstring name))
+  ccstr ccstrResult;
+  if (GetVMFlag <ccstr> (thread, env, name, &ccstrResult, &CommandLineFlags::ccstrAt)) {
+    ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
+    jstring result = env->NewStringUTF(ccstrResult);
+    CHECK_JNI_EXCEPTION_(env, NULL);
+    return result;
+  }
+  return NULL;
+WB_END
+
+WB_ENTRY(void, WB_SetBooleanVMFlag(JNIEnv* env, jobject o, jstring name, jboolean value))
+  bool result = value == JNI_TRUE ? true : false;
+  SetVMFlag <bool> (thread, env, name, &result, &CommandLineFlags::boolAtPut);
+WB_END
+
+WB_ENTRY(void, WB_SetIntxVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
+  intx result = value;
+  SetVMFlag <intx> (thread, env, name, &result, &CommandLineFlags::intxAtPut);
+WB_END
+
+WB_ENTRY(void, WB_SetUintxVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
+  uintx result = value;
+  SetVMFlag <uintx> (thread, env, name, &result, &CommandLineFlags::uintxAtPut);
+WB_END
+
+WB_ENTRY(void, WB_SetUint64VMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
+  uint64_t result = value;
+  SetVMFlag <uint64_t> (thread, env, name, &result, &CommandLineFlags::uint64_tAtPut);
+WB_END
+
+WB_ENTRY(void, WB_SetDoubleVMFlag(JNIEnv* env, jobject o, jstring name, jdouble value))
+  double result = value;
+  SetVMFlag <double> (thread, env, name, &result, &CommandLineFlags::doubleAtPut);
+WB_END
+
+WB_ENTRY(void, WB_SetStringVMFlag(JNIEnv* env, jobject o, jstring name, jstring value))
+  ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
+  const char* ccstrValue = (value == NULL) ? NULL : env->GetStringUTFChars(value, NULL);
+  ccstr ccstrResult = ccstrValue;
+  bool needFree;
+  {
+    ThreadInVMfromNative ttvfn(thread); // back to VM
+    needFree = SetVMFlag <ccstr> (thread, env, name, &ccstrResult, &CommandLineFlags::ccstrAtPut);
+  }
+  if (value != NULL) {
+    env->ReleaseStringUTFChars(value, ccstrValue);
+  }
+  if (needFree) {
+    FREE_C_HEAP_ARRAY(char, ccstrResult, mtInternal);
+  }
+WB_END
+
+
 WB_ENTRY(jboolean, WB_IsInStringTable(JNIEnv* env, jobject o, jstring javaString))
   ResourceMark rm(THREAD);
   int len;
@@ -511,8 +758,17 @@
 WB_ENTRY(void, WB_FullGC(JNIEnv* env, jobject o))
   Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(true);
   Universe::heap()->collect(GCCause::_last_ditch_collection);
+#if INCLUDE_ALL_GCS
+  if (UseG1GC) {
+    // Needs to be cleared explicitly for G1
+    Universe::heap()->collector_policy()->set_should_clear_all_soft_refs(false);
+  }
+#endif // INCLUDE_ALL_GCS
 WB_END
 
+WB_ENTRY(void, WB_YoungGC(JNIEnv* env, jobject o))
+  Universe::heap()->collect(GCCause::_wb_young_gc);
+WB_END
 
 WB_ENTRY(void, WB_ReadReservedMemory(JNIEnv* env, jobject o))
   // static+volatile in order to force the read to happen
@@ -559,11 +815,7 @@
     return result;
   }
 
-  clazz = env->FindClass(vmSymbols::java_lang_Integer()->as_C_string());
-  CHECK_JNI_EXCEPTION_(env, NULL);
-  jmethodID constructor = env->GetMethodID(clazz, vmSymbols::object_initializer_name()->as_C_string(), vmSymbols::int_void_signature()->as_C_string());
-  CHECK_JNI_EXCEPTION_(env, NULL);
-  jobject obj = env->NewObject(clazz, constructor, code->comp_level());
+  jobject obj = integerBox(thread, env, code->comp_level());
   CHECK_JNI_EXCEPTION_(env, NULL);
   env->SetObjectArrayElement(result, 0, obj);
 
@@ -576,6 +828,62 @@
 WB_END
 
 
+int WhiteBox::array_bytes_to_length(size_t bytes) {
+  return Array<u1>::bytes_to_length(bytes);
+}
+
+WB_ENTRY(jlong, WB_AllocateMetaspace(JNIEnv* env, jobject wb, jobject class_loader, jlong size))
+  if (size < 0) {
+    THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
+        err_msg("WB_AllocateMetaspace: size is negative: " JLONG_FORMAT, size));
+  }
+
+  oop class_loader_oop = JNIHandles::resolve(class_loader);
+  ClassLoaderData* cld = class_loader_oop != NULL
+      ? java_lang_ClassLoader::loader_data(class_loader_oop)
+      : ClassLoaderData::the_null_class_loader_data();
+
+  void* metadata = MetadataFactory::new_writeable_array<u1>(cld, WhiteBox::array_bytes_to_length((size_t)size), thread);
+
+  return (jlong)(uintptr_t)metadata;
+WB_END
+
+WB_ENTRY(void, WB_FreeMetaspace(JNIEnv* env, jobject wb, jobject class_loader, jlong addr, jlong size))
+  oop class_loader_oop = JNIHandles::resolve(class_loader);
+  ClassLoaderData* cld = class_loader_oop != NULL
+      ? java_lang_ClassLoader::loader_data(class_loader_oop)
+      : ClassLoaderData::the_null_class_loader_data();
+
+  MetadataFactory::free_array(cld, (Array<u1>*)(uintptr_t)addr);
+WB_END
+
+WB_ENTRY(jlong, WB_IncMetaspaceCapacityUntilGC(JNIEnv* env, jobject wb, jlong inc))
+  if (inc < 0) {
+    THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
+        err_msg("WB_IncMetaspaceCapacityUntilGC: inc is negative: " JLONG_FORMAT, inc));
+  }
+
+  jlong max_size_t = (jlong) ((size_t) -1);
+  if (inc > max_size_t) {
+    THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
+        err_msg("WB_IncMetaspaceCapacityUntilGC: inc does not fit in size_t: " JLONG_FORMAT, inc));
+  }
+
+  size_t new_cap_until_GC = 0;
+  size_t aligned_inc = align_size_down((size_t) inc, Metaspace::commit_alignment());
+  bool success = MetaspaceGC::inc_capacity_until_GC(aligned_inc, &new_cap_until_GC);
+  if (!success) {
+    THROW_MSG_0(vmSymbols::java_lang_IllegalStateException(),
+                "WB_IncMetaspaceCapacityUntilGC: could not increase capacity until GC "
+                "due to contention with another thread");
+  }
+  return (jlong) new_cap_until_GC;
+WB_END
+
+WB_ENTRY(jlong, WB_MetaspaceCapacityUntilGC(JNIEnv* env, jobject wb))
+  return (jlong) MetaspaceGC::capacity_until_GC();
+WB_END
+
 //Some convenience methods to deal with objects from java
 int WhiteBox::offset_for_field(const char* field_name, oop object,
     Symbol* signature_symbol) {
@@ -626,17 +934,58 @@
   return ret;
 }
 
+void WhiteBox::register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread, JNINativeMethod* method_array, int method_count) {
+  ResourceMark rm;
+  ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
+
+  //  one by one registration natives for exception catching
+  jclass no_such_method_error_klass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string());
+  CHECK_JNI_EXCEPTION(env);
+  for (int i = 0, n = method_count; i < n; ++i) {
+    // Skip dummy entries
+    if (method_array[i].fnPtr == NULL) continue;
+    if (env->RegisterNatives(wbclass, &method_array[i], 1) != 0) {
+      jthrowable throwable_obj = env->ExceptionOccurred();
+      if (throwable_obj != NULL) {
+        env->ExceptionClear();
+        if (env->IsInstanceOf(throwable_obj, no_such_method_error_klass)) {
+          // NoSuchMethodError is thrown when a method can't be found or a method is not native.
+          // Ignoring the exception since it is not preventing use of other WhiteBox methods.
+          tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s",
+              method_array[i].name, method_array[i].signature);
+        }
+      } else {
+        // Registration failed unexpectedly.
+        tty->print_cr("Warning: unexpected error on register of sun.hotspot.WhiteBox::%s%s. All methods will be unregistered",
+            method_array[i].name, method_array[i].signature);
+        env->UnregisterNatives(wbclass);
+        break;
+      }
+    }
+  }
+}
 
 #define CC (char*)
 
 static JNINativeMethod methods[] = {
   {CC"getObjectAddress",   CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectAddress  },
+  {CC"getObjectSize",      CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectSize     },
+  {CC"isObjectInOldGen",   CC"(Ljava/lang/Object;)Z", (void*)&WB_isObjectInOldGen  },
   {CC"getHeapOopSize",     CC"()I",                   (void*)&WB_GetHeapOopSize    },
   {CC"isClassAlive0",      CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive      },
+  {CC"classKnownToNotExist",
+                           CC"(Ljava/lang/ClassLoader;Ljava/lang/String;)Z",(void*)&WB_ClassKnownToNotExist},
+  {CC"getLookupCacheURLs", CC"(Ljava/lang/ClassLoader;)[Ljava/net/URL;",    (void*)&WB_GetLookupCacheURLs},
+  {CC"getLookupCacheMatches", CC"(Ljava/lang/ClassLoader;Ljava/lang/String;)[I",
+                                                      (void*)&WB_GetLookupCacheMatches},
   {CC"parseCommandLine",
       CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
       (void*) &WB_ParseCommandLine
   },
+  {CC"addToBootstrapClassLoaderSearch", CC"(Ljava/lang/String;)V",
+                                                      (void*)&WB_AddToBootstrapClassLoaderSearch},
+  {CC"addToSystemClassLoaderSearch",    CC"(Ljava/lang/String;)V",
+                                                      (void*)&WB_AddToSystemClassLoaderSearch},
   {CC"getCompressedOopsMaxHeapSize", CC"()J",
       (void*)&WB_GetCompressedOopsMaxHeapSize},
   {CC"printHeapSizes",     CC"()V",                   (void*)&WB_PrintHeapSizes    },
@@ -651,13 +1000,15 @@
 #endif // INCLUDE_ALL_GCS
 #if INCLUDE_NMT
   {CC"NMTMalloc",           CC"(J)J",                 (void*)&WB_NMTMalloc          },
+  {CC"NMTMallocWithPseudoStack", CC"(JI)J",           (void*)&WB_NMTMallocWithPseudoStack},
   {CC"NMTFree",             CC"(J)V",                 (void*)&WB_NMTFree            },
   {CC"NMTReserveMemory",    CC"(J)J",                 (void*)&WB_NMTReserveMemory   },
   {CC"NMTCommitMemory",     CC"(JJ)V",                (void*)&WB_NMTCommitMemory    },
   {CC"NMTUncommitMemory",   CC"(JJ)V",                (void*)&WB_NMTUncommitMemory  },
   {CC"NMTReleaseMemory",    CC"(JJ)V",                (void*)&WB_NMTReleaseMemory   },
-  {CC"NMTWaitForDataMerge", CC"()Z",                  (void*)&WB_NMTWaitForDataMerge},
   {CC"NMTIsDetailSupported",CC"()Z",                  (void*)&WB_NMTIsDetailSupported},
+  {CC"NMTChangeTrackingLevel", CC"()Z",               (void*)&WB_NMTChangeTrackingLevel},
+  {CC"NMTGetHashSize",      CC"()I",                  (void*)&WB_NMTGetHashSize     },
 #endif // INCLUDE_NMT
   {CC"deoptimizeAll",      CC"()V",                   (void*)&WB_DeoptimizeAll     },
   {CC"deoptimizeMethod",   CC"(Ljava/lang/reflect/Executable;Z)I",
@@ -684,9 +1035,35 @@
       CC"(Ljava/lang/reflect/Executable;II)Z",        (void*)&WB_EnqueueMethodForCompilation},
   {CC"clearMethodState",
       CC"(Ljava/lang/reflect/Executable;)V",          (void*)&WB_ClearMethodState},
-  {CC"isInStringTable",   CC"(Ljava/lang/String;)Z",  (void*)&WB_IsInStringTable  },
+  {CC"setBooleanVMFlag",   CC"(Ljava/lang/String;Z)V",(void*)&WB_SetBooleanVMFlag},
+  {CC"setIntxVMFlag",      CC"(Ljava/lang/String;J)V",(void*)&WB_SetIntxVMFlag},
+  {CC"setUintxVMFlag",     CC"(Ljava/lang/String;J)V",(void*)&WB_SetUintxVMFlag},
+  {CC"setUint64VMFlag",    CC"(Ljava/lang/String;J)V",(void*)&WB_SetUint64VMFlag},
+  {CC"setDoubleVMFlag",    CC"(Ljava/lang/String;D)V",(void*)&WB_SetDoubleVMFlag},
+  {CC"setStringVMFlag",    CC"(Ljava/lang/String;Ljava/lang/String;)V",
+                                                      (void*)&WB_SetStringVMFlag},
+  {CC"getBooleanVMFlag",   CC"(Ljava/lang/String;)Ljava/lang/Boolean;",
+                                                      (void*)&WB_GetBooleanVMFlag},
+  {CC"getIntxVMFlag",      CC"(Ljava/lang/String;)Ljava/lang/Long;",
+                                                      (void*)&WB_GetIntxVMFlag},
+  {CC"getUintxVMFlag",     CC"(Ljava/lang/String;)Ljava/lang/Long;",
+                                                      (void*)&WB_GetUintxVMFlag},
+  {CC"getUint64VMFlag",    CC"(Ljava/lang/String;)Ljava/lang/Long;",
+                                                      (void*)&WB_GetUint64VMFlag},
+  {CC"getDoubleVMFlag",    CC"(Ljava/lang/String;)Ljava/lang/Double;",
+                                                      (void*)&WB_GetDoubleVMFlag},
+  {CC"getStringVMFlag",    CC"(Ljava/lang/String;)Ljava/lang/String;",
+                                                      (void*)&WB_GetStringVMFlag},
+  {CC"isInStringTable",    CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable  },
   {CC"fullGC",   CC"()V",                             (void*)&WB_FullGC },
+  {CC"youngGC",  CC"()V",                             (void*)&WB_YoungGC },
   {CC"readReservedMemory", CC"()V",                   (void*)&WB_ReadReservedMemory },
+  {CC"allocateMetaspace",
+     CC"(Ljava/lang/ClassLoader;J)J",                 (void*)&WB_AllocateMetaspace },
+  {CC"freeMetaspace",
+     CC"(Ljava/lang/ClassLoader;JJ)V",                (void*)&WB_FreeMetaspace },
+  {CC"incMetaspaceCapacityUntilGC", CC"(J)J",         (void*)&WB_IncMetaspaceCapacityUntilGC },
+  {CC"metaspaceCapacityUntilGC", CC"()J",             (void*)&WB_MetaspaceCapacityUntilGC },
   {CC"getCPUFeatures",     CC"()Ljava/lang/String;",  (void*)&WB_GetCPUFeatures     },
   {CC"getNMethod",         CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
                                                       (void*)&WB_GetNMethod         },
@@ -701,35 +1078,9 @@
       instanceKlassHandle ikh = instanceKlassHandle(JNIHandles::resolve(wbclass)->klass());
       Handle loader(ikh->class_loader());
       if (loader.is_null()) {
-        ResourceMark rm;
-        ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
-        bool result = true;
-        //  one by one registration natives for exception catching
-        jclass exceptionKlass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string());
-        CHECK_JNI_EXCEPTION(env);
-        for (int i = 0, n = sizeof(methods) / sizeof(methods[0]); i < n; ++i) {
-          if (env->RegisterNatives(wbclass, methods + i, 1) != 0) {
-            result = false;
-            jthrowable throwable_obj = env->ExceptionOccurred();
-            if (throwable_obj != NULL) {
-              env->ExceptionClear();
-              if (env->IsInstanceOf(throwable_obj, exceptionKlass)) {
-                // j.l.NoSuchMethodError is thrown when a method can't be found or a method is not native
-                // ignoring the exception
-                tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s", methods[i].name, methods[i].signature);
-              }
-            } else {
-              // register is failed w/o exception or w/ unexpected exception
-              tty->print_cr("Warning: unexpected error on register of sun.hotspot.WhiteBox::%s%s. All methods will be unregistered", methods[i].name, methods[i].signature);
-              env->UnregisterNatives(wbclass);
-              break;
-            }
-          }
-        }
-
-        if (result) {
-          WhiteBox::set_used();
-        }
+        WhiteBox::register_methods(env, wbclass, thread, methods, sizeof(methods) / sizeof(methods[0]));
+        WhiteBox::register_extended(env, wbclass, thread);
+        WhiteBox::set_used();
       }
     }
   }
--- a/src/share/vm/prims/whitebox.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/prims/whitebox.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -29,6 +29,8 @@
 
 #include "memory/allocation.hpp"
 #include "oops/oopsHierarchy.hpp"
+#include "oops/symbol.hpp"
+#include "runtime/interfaceSupport.hpp"
 
 // Entry macro to transition from JNI to VM state.
 
@@ -62,6 +64,11 @@
     Symbol* signature_symbol);
   static const char* lookup_jstring(const char* field_name, oop object);
   static bool lookup_bool(const char* field_name, oop object);
+
+  static int array_bytes_to_length(size_t bytes);
+  static void register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread,
+    JNINativeMethod* method_array, int method_count);
+  static void register_extended(JNIEnv* env, jclass wbclass, JavaThread* thread);
 };
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/prims/whitebox_ext.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "prims/whitebox.hpp"
+
+void WhiteBox::register_extended(JNIEnv* env, jclass wbclass, JavaThread* thread) { }
--- a/src/share/vm/runtime/advancedThresholdPolicy.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -75,11 +75,14 @@
 
 // update_rate() is called from select_task() while holding a compile queue lock.
 void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) {
-  JavaThread* THREAD = JavaThread::current();
+  // Skip update if counters are absent.
+  // Can't allocate them since we are holding compile queue lock.
+  if (m->method_counters() == NULL)  return;
+
   if (is_old(m)) {
     // We don't remove old methods from the queue,
     // so we can just zero the rate.
-    m->set_rate(0, THREAD);
+    m->set_rate(0);
     return;
   }
 
@@ -95,14 +98,15 @@
   if (delta_s >= TieredRateUpdateMinTime) {
     // And we must've taken the previous point at least 1ms before.
     if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
-      m->set_prev_time(t, THREAD);
-      m->set_prev_event_count(event_count, THREAD);
-      m->set_rate((float)delta_e / (float)delta_t, THREAD); // Rate is events per millisecond
-    } else
+      m->set_prev_time(t);
+      m->set_prev_event_count(event_count);
+      m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
+    } else {
       if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
         // If nothing happened for 25ms, zero the rate. Don't modify prev values.
-        m->set_rate(0, THREAD);
+        m->set_rate(0);
       }
+    }
   }
 }
 
@@ -167,7 +171,6 @@
   for (CompileTask* task = compile_queue->first(); task != NULL;) {
     CompileTask* next_task = task->next();
     Method* method = task->method();
-    MethodData* mdo = method->method_data();
     update_rate(t, method);
     if (max_task == NULL) {
       max_task = task;
@@ -179,8 +182,7 @@
           print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
         }
         task->log_task_dequeued("stale");
-        CompileTaskWrapper ctw(task); // Frees the task
-        compile_queue->remove(task);
+        compile_queue->remove_and_mark_stale(task);
         method->clear_queued_for_compilation();
         task = next_task;
         continue;
@@ -339,8 +341,8 @@
  * c. 0 -> (3->2) -> 4.
  *    In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
  *    to enable the profiling to fully occur at level 0. In this case we change the compilation level
- *    of the method to 2, because it'll allow it to run much faster without full profiling while c2
- *    is compiling.
+ *    of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
+ *    without full profiling while c2 is compiling.
  *
  * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
  *    After a method was once compiled with C1 it can be identified as trivial and be compiled to
@@ -486,7 +488,7 @@
   if (should_create_mdo(mh(), level)) {
     create_mdo(mh, thread);
   }
-  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
+  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
     CompLevel next_level = call_event(mh(), level);
     if (next_level != level) {
       compile(mh, InvocationEntryBci, next_level, thread);
@@ -510,7 +512,7 @@
     CompLevel next_osr_level = loop_event(imh(), level);
     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
     // At the very least compile the OSR version
-    if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) {
+    if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
       compile(imh, bci, next_osr_level, thread);
     }
 
@@ -544,7 +546,7 @@
           nm->make_not_entrant();
         }
       }
-      if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
+      if (!CompileBroker::compilation_is_in_queue(mh)) {
         // Fix up next_level if necessary to avoid deopts
         if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
           next_level = CompLevel_full_profile;
@@ -556,7 +558,7 @@
     } else {
       cur_level = comp_level(imh());
       next_level = call_event(imh(), cur_level);
-      if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
+      if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) {
         compile(imh, InvocationEntryBci, next_level, thread);
       }
     }
--- a/src/share/vm/runtime/arguments.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/classLoader.hpp"
 #include "classfile/javaAssertions.hpp"
 #include "classfile/symbolTable.hpp"
 #include "compiler/compilerOracle.hpp"
@@ -34,12 +35,14 @@
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/arguments_ext.hpp"
 #include "runtime/globals_extension.hpp"
 #include "runtime/java.hpp"
 #include "services/management.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
+#include "utilities/stringUtils.hpp"
 #include "utilities/taskqueue.hpp"
 #ifdef TARGET_OS_FAMILY_linux
 # include "os_linux.inline.hpp"
@@ -63,7 +66,7 @@
 #endif // INCLUDE_ALL_GCS
 
 // Note: This is a special bug reporting site for the JVM
-#define DEFAULT_VENDOR_URL_BUG "http://bugreport.sun.com/bugreport/crash.jsp"
+#define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp"
 #define DEFAULT_JAVA_LAUNCHER  "generic"
 
 // Disable options not supported in this release, with a warning if they
@@ -102,6 +105,8 @@
 bool   Arguments::_has_profile                  = false;
 size_t Arguments::_conservative_max_heap_alignment = 0;
 uintx  Arguments::_min_heap_size                = 0;
+uintx  Arguments::_min_heap_free_ratio          = 0;
+uintx  Arguments::_max_heap_free_ratio          = 0;
 Arguments::Mode Arguments::_mode                = _mixed;
 bool   Arguments::_java_compiler                = false;
 bool   Arguments::_xdebug_mode                  = false;
@@ -305,6 +310,8 @@
   { "UseMPSS",                       JDK_Version::jdk(8), JDK_Version::jdk(9) },
   { "UseStringCache",                JDK_Version::jdk(8), JDK_Version::jdk(9) },
   { "UseOldInlining",                JDK_Version::jdk(9), JDK_Version::jdk(10) },
+  { "AutoShutdownNMT",               JDK_Version::jdk(9), JDK_Version::jdk(10) },
+  { "CompilationRepeat",             JDK_Version::jdk(8), JDK_Version::jdk(9) },
 #ifdef PRODUCT
   { "DesiredMethodLimit",
                            JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
@@ -1124,11 +1131,11 @@
 // Conflict: required to use shared spaces (-Xshare:on), but
 // incompatible command line options were chosen.
 
-static void no_shared_spaces() {
+static void no_shared_spaces(const char* message) {
   if (RequireSharedSpaces) {
     jio_fprintf(defaultStream::error_stream(),
       "Class data sharing is inconsistent with other specified options.\n");
-    vm_exit_during_initialization("Unable to use shared archive.", NULL);
+    vm_exit_during_initialization("Unable to use shared archive.", message);
   } else {
     FLAG_SET_DEFAULT(UseSharedSpaces, false);
   }
@@ -1154,6 +1161,32 @@
   }
 }
 
+/**
+ * Returns the minimum number of compiler threads needed to run the JVM. The following
+ * configurations are possible.
+ *
+ * 1) The JVM is build using an interpreter only. As a result, the minimum number of
+ *    compiler threads is 0.
+ * 2) The JVM is build using the compiler(s) and tiered compilation is disabled. As
+ *    a result, either C1 or C2 is used, so the minimum number of compiler threads is 1.
+ * 3) The JVM is build using the compiler(s) and tiered compilation is enabled. However,
+ *    the option "TieredStopAtLevel < CompLevel_full_optimization". As a result, only
+ *    C1 can be used, so the minimum number of compiler threads is 1.
+ * 4) The JVM is build using the compilers and tiered compilation is enabled. The option
+ *    'TieredStopAtLevel = CompLevel_full_optimization' (the default value). As a result,
+ *    the minimum number of compiler threads is 2.
+ */
+int Arguments::get_min_number_of_compiler_threads() {
+#if !defined(COMPILER1) && !defined(COMPILER2) && !defined(SHARK) && !defined(COMPILERGRAAL)
+  return 0;   // case 1
+#else
+  if (!TieredCompilation || (TieredStopAtLevel < CompLevel_full_optimization)) {
+    return 1; // case 2 or case 3
+  }
+  return 2;   // case 4 (tiered)
+#endif
+}
+
 #if INCLUDE_ALL_GCS
 static void disable_adaptive_size_policy(const char* collector_name) {
   if (UseAdaptiveSizePolicy) {
@@ -1414,10 +1447,26 @@
                 (int)ObjectAlignmentInBytes, os::vm_page_size());
     return false;
   }
+  if(SurvivorAlignmentInBytes == 0) {
+    SurvivorAlignmentInBytes = ObjectAlignmentInBytes;
+  } else {
+    if (!is_power_of_2(SurvivorAlignmentInBytes)) {
+      jio_fprintf(defaultStream::error_stream(),
+            "error: SurvivorAlignmentInBytes=%d must be power of 2\n",
+            (int)SurvivorAlignmentInBytes);
+      return false;
+    }
+    if (SurvivorAlignmentInBytes < ObjectAlignmentInBytes) {
+      jio_fprintf(defaultStream::error_stream(),
+          "error: SurvivorAlignmentInBytes=%d must be greater than ObjectAlignmentInBytes=%d \n",
+          (int)SurvivorAlignmentInBytes, (int)ObjectAlignmentInBytes);
+      return false;
+    }
+  }
   return true;
 }
 
-uintx Arguments::max_heap_for_compressed_oops() {
+size_t Arguments::max_heap_for_compressed_oops() {
   // Avoid sign flip.
   assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
   // We need to fit both the NULL page and the heap into the memory budget, while
@@ -1525,28 +1574,31 @@
     heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
   }
 #endif // INCLUDE_ALL_GCS
-  _conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(),
-    CollectorPolicy::compute_heap_alignment());
+  _conservative_max_heap_alignment = MAX4(heap_alignment,
+                                          (size_t)os::vm_allocation_granularity(),
+                                          os::max_page_size(),
+                                          CollectorPolicy::compute_heap_alignment());
+}
+
+void Arguments::select_gc_ergonomically() {
+  if (os::is_server_class_machine()) {
+    if (should_auto_select_low_pause_collector()) {
+      FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true);
+    } else {
+      FLAG_SET_ERGO(bool, UseParallelGC, true);
+    }
+  }
+}
+
+void Arguments::select_gc() {
+  if (!gc_selected()) {
+    ArgumentsExt::select_gc_ergonomically();
+  }
 }
 
 void Arguments::set_ergonomics_flags() {
-
-  if (os::is_server_class_machine()) {
-    // If no other collector is requested explicitly,
-    // let the VM select the collector based on
-    // machine class and automatic selection policy.
-    if (!UseSerialGC &&
-        !UseConcMarkSweepGC &&
-        !UseG1GC &&
-        !UseParNewGC &&
-        FLAG_IS_DEFAULT(UseParallelGC)) {
-      if (should_auto_select_low_pause_collector()) {
-        FLAG_SET_ERGO(bool, UseConcMarkSweepGC, true);
-      } else {
-        FLAG_SET_ERGO(bool, UseParallelGC, true);
-      }
-    }
-  }
+  select_gc();
+
 #if defined(COMPILER2) || defined(GRAAL)
   // Shared spaces work fine with other GCs but causes bytecode rewriting
   // to be disabled, which hurts interpreter performance and decreases
@@ -1555,7 +1607,7 @@
   // at link time, or rewrite bytecodes in non-shared methods.
   if (!DumpSharedSpaces && !RequireSharedSpaces &&
       (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
-    no_shared_spaces();
+    no_shared_spaces("COMPILER2 default: -Xshare:auto | off, have to manually setup to on.");
   }
 #endif
 
@@ -1598,9 +1650,11 @@
     // unless the user actually sets these flags.
     if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
       FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
+      _min_heap_free_ratio = MinHeapFreeRatio;
     }
     if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
       FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
+      _max_heap_free_ratio = MaxHeapFreeRatio;
     }
   }
 
@@ -1639,6 +1693,12 @@
                      Abstract_VM_Version::parallel_worker_threads());
   }
 
+#if INCLUDE_ALL_GCS
+  if (G1ConcRefinementThreads == 0) {
+    FLAG_SET_DEFAULT(G1ConcRefinementThreads, ParallelGCThreads);
+  }
+#endif
+
   // MarkStackSize will be set (if it hasn't been set by the user)
   // when concurrent marking is initialized.
   // Its value will be based upon the number of parallel marking threads.
@@ -1664,6 +1724,46 @@
   }
 }
 
+#if !INCLUDE_ALL_GCS
+#ifdef ASSERT
+static bool verify_serial_gc_flags() {
+  return (UseSerialGC &&
+        !(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC ||
+          UseParallelGC || UseParallelOldGC));
+}
+#endif // ASSERT
+#endif // INCLUDE_ALL_GCS
+
+void Arguments::set_gc_specific_flags() {
+#if INCLUDE_ALL_GCS
+  // Set per-collector flags
+  if (UseParallelGC || UseParallelOldGC) {
+    set_parallel_gc_flags();
+  } else if (UseConcMarkSweepGC) { // Should be done before ParNew check below
+    set_cms_and_parnew_gc_flags();
+  } else if (UseParNewGC) {  // Skipped if CMS is set above
+    set_parnew_gc_flags();
+  } else if (UseG1GC) {
+    set_g1_gc_flags();
+  }
+  check_deprecated_gcs();
+  check_deprecated_gc_flags();
+  if (AssumeMP && !UseSerialGC) {
+    if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
+      warning("If the number of processors is expected to increase from one, then"
+              " you should configure the number of parallel GC threads appropriately"
+              " using -XX:ParallelGCThreads=N");
+    }
+  }
+  if (MinHeapFreeRatio == 100) {
+    // Keeping the heap 100% free is hard ;-) so limit it to 99%.
+    FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
+  }
+#else // INCLUDE_ALL_GCS
+  assert(verify_serial_gc_flags(), "SerialGC unset");
+#endif // INCLUDE_ALL_GCS
+}
+
 julong Arguments::limit_by_allocatable_memory(julong limit) {
   julong max_allocatable;
   julong result = limit;
@@ -1887,16 +1987,6 @@
   return false;
 }
 
-#if !INCLUDE_ALL_GCS
-#ifdef ASSERT
-static bool verify_serial_gc_flags() {
-  return (UseSerialGC &&
-        !(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC ||
-          UseParallelGC || UseParallelOldGC));
-}
-#endif // ASSERT
-#endif // INCLUDE_ALL_GCS
-
 // check if do gclog rotation
 // +UseGCLogFileRotation is a must,
 // no gc log rotation when log file not supplied or
@@ -1975,6 +2065,8 @@
                   MaxHeapFreeRatio);
     return false;
   }
+  // This does not set the flag itself, but stores the value in a safe place for later usage.
+  _min_heap_free_ratio = min_heap_free_ratio;
   return true;
 }
 
@@ -1989,11 +2081,13 @@
                   MinHeapFreeRatio);
     return false;
   }
+  // This does not set the flag itself, but stores the value in a safe place for later usage.
+  _max_heap_free_ratio = max_heap_free_ratio;
   return true;
 }
 
 // Check consistency of GC selection
-bool Arguments::check_gc_consistency() {
+bool Arguments::check_gc_consistency_user() {
   check_gclog_consistency();
   bool status = true;
   // Ensure that the user has not selected conflicting sets
@@ -2159,7 +2253,7 @@
     FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
   }
 
-  status = status && check_gc_consistency();
+  status = status && check_gc_consistency_user();
   status = status && check_stack_pages();
 
   if (CMSIncrementalMode) {
@@ -2342,10 +2436,13 @@
   status = status && verify_percentage(MarkSweepDeadRatio, "MarkSweepDeadRatio");
 
   status = status && verify_min_value(MarkSweepAlwaysCompactCount, 1, "MarkSweepAlwaysCompactCount");
+#ifdef COMPILER1
+  status = status && verify_min_value(ValueMapInitialSize, 1, "ValueMapInitialSize");
+#endif
 
   if (PrintNMTStatistics) {
 #if INCLUDE_NMT
-    if (MemTracker::tracking_level() == MemTracker::NMT_off) {
+    if (MemTracker::tracking_level() == NMT_off) {
 #endif // INCLUDE_NMT
       warning("PrintNMTStatistics is disabled, because native memory tracking is not enabled");
       PrintNMTStatistics = false;
@@ -2432,6 +2529,12 @@
   status &= verify_interval(SafepointPollOffset, 0, os::vm_page_size() - BytesPerWord, "SafepointPollOffset");
 #endif
 
+  int min_number_of_compiler_threads = get_min_number_of_compiler_threads();
+  // The default CICompilerCount's value is CI_COMPILER_COUNT.
+  assert(min_number_of_compiler_threads <= CI_COMPILER_COUNT, "minimum should be less or equal default number");
+  // Check the minimum number of compiler threads
+  status &=verify_min_value(CICompilerCount, min_number_of_compiler_threads, "CICompilerCount");
+
   return status;
 }
 
@@ -2911,6 +3014,23 @@
 #endif
     // -D
     } else if (match_option(option, "-D", &tail)) {
+      if (CheckEndorsedAndExtDirs) {
+        if (match_option(option, "-Djava.endorsed.dirs=", &tail)) {
+          // abort if -Djava.endorsed.dirs is set
+          jio_fprintf(defaultStream::output_stream(),
+            "-Djava.endorsed.dirs will not be supported in a future release.\n"
+            "Refer to JEP 220 for details (http://openjdk.java.net/jeps/220).\n");
+          return JNI_EINVAL;
+        }
+        if (match_option(option, "-Djava.ext.dirs=", &tail)) {
+          // abort if -Djava.ext.dirs is set
+          jio_fprintf(defaultStream::output_stream(),
+            "-Djava.ext.dirs will not be supported in a future release.\n"
+            "Refer to JEP 220 for details (http://openjdk.java.net/jeps/220).\n");
+          return JNI_EINVAL;
+        }
+      }
+
       if (!add_property(tail)) {
         return JNI_ENOMEM;
       }
@@ -3289,6 +3409,15 @@
     }
   }
 
+  // PrintSharedArchiveAndExit will turn on
+  //   -Xshare:on
+  //   -XX:+TraceClassPaths
+  if (PrintSharedArchiveAndExit) {
+    FLAG_SET_CMDLINE(bool, UseSharedSpaces, true);
+    FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true);
+    FLAG_SET_CMDLINE(bool, TraceClassPaths, true);
+  }
+
   // Change the default value for flags  which have different default values
   // when working with older JDKs.
 #ifdef LINUX
@@ -3297,9 +3426,195 @@
     FLAG_SET_DEFAULT(UseLinuxPosixThreadCPUClocks, false);
   }
 #endif // LINUX
+  fix_appclasspath();
   return JNI_OK;
 }
 
+// Remove all empty paths from the app classpath (if IgnoreEmptyClassPaths is enabled)
+//
+// This is necessary because some apps like to specify classpath like -cp foo.jar:${XYZ}:bar.jar
+// in their start-up scripts. If XYZ is empty, the classpath will look like "-cp foo.jar::bar.jar".
+// Java treats such empty paths as if the user specified "-cp foo.jar:.:bar.jar". I.e., an empty
+// path is treated as the current directory.
+//
+// This causes problems with CDS, which requires that all directories specified in the classpath
+// must be empty. In most cases, applications do NOT want to load classes from the current
+// directory anyway. Adding -XX:+IgnoreEmptyClassPaths will make these applications' start-up
+// scripts compatible with CDS.
+void Arguments::fix_appclasspath() {
+  if (IgnoreEmptyClassPaths) {
+    const char separator = *os::path_separator();
+    const char* src = _java_class_path->value();
+
+    // skip over all the leading empty paths
+    while (*src == separator) {
+      src ++;
+    }
+
+    char* copy = AllocateHeap(strlen(src) + 1, mtInternal);
+    strncpy(copy, src, strlen(src) + 1);
+
+    // trim all trailing empty paths
+    for (char* tail = copy + strlen(copy) - 1; tail >= copy && *tail == separator; tail--) {
+      *tail = '\0';
+    }
+
+    char from[3] = {separator, separator, '\0'};
+    char to  [2] = {separator, '\0'};
+    while (StringUtils::replace_no_expand(copy, from, to) > 0) {
+      // Keep replacing "::" -> ":" until we have no more "::" (non-windows)
+      // Keep replacing ";;" -> ";" until we have no more ";;" (windows)
+    }
+
+    _java_class_path->set_value(copy);
+    FreeHeap(copy); // a copy was made by set_value, so don't need this anymore
+  }
+
+  if (!PrintSharedArchiveAndExit) {
+    ClassLoader::trace_class_path("[classpath: ", _java_class_path->value());
+  }
+}
+
+static bool has_jar_files(const char* directory) {
+  DIR* dir = os::opendir(directory);
+  if (dir == NULL) return false;
+
+  struct dirent *entry;
+  char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtInternal);
+  bool hasJarFile = false;
+  while (!hasJarFile && (entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
+    const char* name = entry->d_name;
+    const char* ext = name + strlen(name) - 4;
+    hasJarFile = ext > name && (os::file_name_strcmp(ext, ".jar") == 0);
+  }
+  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
+  os::closedir(dir);
+  return hasJarFile ;
+}
+
+// returns the number of directories in the given path containing JAR files
+// If the skip argument is not NULL, it will skip that directory
+static int check_non_empty_dirs(const char* path, const char* type, const char* skip) {
+  const char separator = *os::path_separator();
+  const char* const end = path + strlen(path);
+  int nonEmptyDirs = 0;
+  while (path < end) {
+    const char* tmp_end = strchr(path, separator);
+    if (tmp_end == NULL) {
+      if ((skip == NULL || strcmp(path, skip) != 0) && has_jar_files(path)) {
+        nonEmptyDirs++;
+        jio_fprintf(defaultStream::output_stream(),
+          "Non-empty %s directory: %s\n", type, path);
+      }
+      path = end;
+    } else {
+      char* dirpath = NEW_C_HEAP_ARRAY(char, tmp_end - path + 1, mtInternal);
+      memcpy(dirpath, path, tmp_end - path);
+      dirpath[tmp_end - path] = '\0';
+      if ((skip == NULL || strcmp(dirpath, skip) != 0) && has_jar_files(dirpath)) {
+        nonEmptyDirs++;
+        jio_fprintf(defaultStream::output_stream(),
+          "Non-empty %s directory: %s\n", type, dirpath);
+      }
+      FREE_C_HEAP_ARRAY(char, dirpath, mtInternal);
+      path = tmp_end + 1;
+    }
+  }
+  return nonEmptyDirs;
+}
+
+// Returns true if endorsed standards override mechanism and extension mechanism
+// are not used.
+static bool check_endorsed_and_ext_dirs() {
+  if (!CheckEndorsedAndExtDirs)
+    return true;
+
+  char endorsedDir[JVM_MAXPATHLEN];
+  char extDir[JVM_MAXPATHLEN];
+  const char* fileSep = os::file_separator();
+  jio_snprintf(endorsedDir, sizeof(endorsedDir), "%s%slib%sendorsed",
+               Arguments::get_java_home(), fileSep, fileSep);
+  jio_snprintf(extDir, sizeof(extDir), "%s%slib%sext",
+               Arguments::get_java_home(), fileSep, fileSep);
+
+  // check endorsed directory
+  int nonEmptyDirs = check_non_empty_dirs(Arguments::get_endorsed_dir(), "endorsed", NULL);
+
+  // check the extension directories but skip the default lib/ext directory
+  nonEmptyDirs += check_non_empty_dirs(Arguments::get_ext_dirs(), "extension", extDir);
+
+  // List of JAR files installed in the default lib/ext directory.
+  // -XX:+CheckEndorsedAndExtDirs checks if any non-JDK file installed
+  static const char* jdk_ext_jars[] = {
+      "access-bridge-32.jar",
+      "access-bridge-64.jar",
+      "access-bridge.jar",
+      "cldrdata.jar",
+      "dnsns.jar",
+      "jaccess.jar",
+      "jfxrt.jar",
+      "localedata.jar",
+      "nashorn.jar",
+      "sunec.jar",
+      "sunjce_provider.jar",
+      "sunmscapi.jar",
+      "sunpkcs11.jar",
+      "ucrypto.jar",
+      "zipfs.jar",
+      NULL
+  };
+
+  // check if the default lib/ext directory has any non-JDK jar files; if so, error
+  DIR* dir = os::opendir(extDir);
+  if (dir != NULL) {
+    int num_ext_jars = 0;
+    struct dirent *entry;
+    char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(extDir), mtInternal);
+    while ((entry = os::readdir(dir, (dirent *) dbuf)) != NULL) {
+      const char* name = entry->d_name;
+      const char* ext = name + strlen(name) - 4;
+      if (ext > name && (os::file_name_strcmp(ext, ".jar") == 0)) {
+        bool is_jdk_jar = false;
+        const char* jarfile = NULL;
+        for (int i=0; (jarfile = jdk_ext_jars[i]) != NULL; i++) {
+          if (os::file_name_strcmp(name, jarfile) == 0) {
+            is_jdk_jar = true;
+            break;
+          }
+        }
+        if (!is_jdk_jar) {
+          jio_fprintf(defaultStream::output_stream(),
+            "%s installed in <JAVA_HOME>/lib/ext\n", name);
+          num_ext_jars++;
+        }
+      }
+    }
+    FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
+    os::closedir(dir);
+    if (num_ext_jars > 0) {
+      nonEmptyDirs += 1;
+    }
+  }
+
+  // check if the default lib/endorsed directory exists; if so, error
+  dir = os::opendir(endorsedDir);
+  if (dir != NULL) {
+    jio_fprintf(defaultStream::output_stream(), "<JAVA_HOME>/lib/endorsed exists\n");
+    os::closedir(dir);
+    nonEmptyDirs += 1;
+  }
+
+  if (nonEmptyDirs > 0) {
+    jio_fprintf(defaultStream::output_stream(),
+      "Endorsed standards override mechanism and extension mechanism "
+      "will not be supported in a future release.\n"
+      "Refer to JEP 220 for details (http://openjdk.java.net/jeps/220).\n");
+    return false;
+  }
+
+  return true;
+}
+
 jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_required) {
   // This must be done after all -D arguments have been processed.
   scp_p->expand_endorsed();
@@ -3318,6 +3633,10 @@
     Arguments::set_sysclasspath(scp_p->combined_path());
   }
 
+  if (!check_endorsed_and_ext_dirs()) {
+    return JNI_ERR;
+  }
+
   // This must be done after all arguments have been processed.
   // java_compiler() true means set to "NONE" or empty.
   if (java_compiler() && !xdebug_mode()) {
@@ -3479,9 +3798,8 @@
         "Cannot dump shared archive when UseCompressedOops or UseCompressedClassPointers is off.", NULL);
     }
   } else {
-    // UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces.
     if (!UseCompressedOops || !UseCompressedClassPointers) {
-      no_shared_spaces();
+      no_shared_spaces("UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces.");
     }
 #endif
   }
@@ -3565,6 +3883,8 @@
   bool settings_file_specified = false;
   bool needs_hotspotrc_warning = false;
 
+  ArgumentsExt::process_options(args);
+
   const char* flags_file;
   int index;
   for (index = 0; index < args->nOptions; index++) {
@@ -3595,15 +3915,24 @@
       CommandLineFlags::printFlags(tty, false);
       vm_exit(0);
     }
+#if INCLUDE_NMT
     if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
-#if INCLUDE_NMT
-      MemTracker::init_tracking_options(tail);
-#else
-      jio_fprintf(defaultStream::error_stream(),
-        "Native Memory Tracking is not supported in this VM\n");
-      return JNI_ERR;
+      // The launcher did not setup nmt environment variable properly.
+      if (!MemTracker::check_launcher_nmt_support(tail)) {
+        warning("Native Memory Tracking did not setup properly, using wrong launcher?");
+      }
+
+      // Verify if nmt option is valid.
+      if (MemTracker::verify_nmt_option()) {
+        // Late initialization, still in single-threaded mode.
+        if (MemTracker::tracking_level() >= NMT_summary) {
+          MemTracker::init();
+        }
+      } else {
+        vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
+      }
+    }
 #endif
-    }
 
 
 #ifndef PRODUCT
@@ -3741,7 +4070,7 @@
     FLAG_SET_DEFAULT(UseSharedSpaces, false);
     FLAG_SET_DEFAULT(PrintSharedSpaces, false);
   }
-  no_shared_spaces();
+  no_shared_spaces("CDS Disabled");
 #endif // INCLUDE_CDS
 
   return JNI_OK;
@@ -3755,7 +4084,7 @@
   set_shared_spaces_flags();
 
   // Check the GC selections again.
-  if (!check_gc_consistency()) {
+  if (!ArgumentsExt::check_gc_consistency_ergo()) {
     return JNI_EINVAL;
   }
 
@@ -3777,33 +4106,7 @@
   // Set heap size based on available physical memory
   set_heap_size();
 
-#if INCLUDE_ALL_GCS
-  // Set per-collector flags
-  if (UseParallelGC || UseParallelOldGC) {
-    set_parallel_gc_flags();
-  } else if (UseConcMarkSweepGC) { // Should be done before ParNew check below
-    set_cms_and_parnew_gc_flags();
-  } else if (UseParNewGC) {  // Skipped if CMS is set above
-    set_parnew_gc_flags();
-  } else if (UseG1GC) {
-    set_g1_gc_flags();
-  }
-  check_deprecated_gcs();
-  check_deprecated_gc_flags();
-  if (AssumeMP && !UseSerialGC) {
-    if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
-      warning("If the number of processors is expected to increase from one, then"
-              " you should configure the number of parallel GC threads appropriately"
-              " using -XX:ParallelGCThreads=N");
-    }
-  }
-  if (MinHeapFreeRatio == 100) {
-    // Keeping the heap 100% free is hard ;-) so limit it to 99%.
-    FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
-  }
-#else // INCLUDE_ALL_GCS
-  assert(verify_serial_gc_flags(), "SerialGC unset");
-#endif // INCLUDE_ALL_GCS
+  ArgumentsExt::set_gc_specific_flags();
 
   // Initialize Metaspace flags and alignments.
   Metaspace::ergo_initialize();
@@ -3862,10 +4165,6 @@
     // nothing to use the profiling, turn if off
     FLAG_SET_DEFAULT(TypeProfileLevel, 0);
   }
-  if (UseTypeSpeculation && FLAG_IS_DEFAULT(ReplaceInParentMaps)) {
-    // Doing the replace in parent maps helps speculation
-    FLAG_SET_DEFAULT(ReplaceInParentMaps, true);
-  }
 #endif
 
   if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) {
@@ -3923,18 +4222,24 @@
 }
 
 jint Arguments::adjust_after_os() {
-#if INCLUDE_ALL_GCS
-  if (UseParallelGC || UseParallelOldGC) {
-    if (UseNUMA) {
+  if (UseNUMA) {
+    if (UseParallelGC || UseParallelOldGC) {
       if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
-        FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
+         FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
       }
-      // For those collectors or operating systems (eg, Windows) that do
-      // not support full UseNUMA, we will map to UseNUMAInterleaving for now
-      UseNUMAInterleaving = true;
     }
-  }
-#endif // INCLUDE_ALL_GCS
+    // UseNUMAInterleaving is set to ON for all collectors and
+    // platforms when UseNUMA is set to ON. NUMA-aware collectors
+    // such as the parallel collector for Linux and Solaris will
+    // interleave old gen and survivor spaces on top of NUMA
+    // allocation policy for the eden space.
+    // Non NUMA-aware collectors such as CMS, G1 and Serial-GC on
+    // all platforms and ParallelGC on Windows will interleave all
+    // of the heap spaces across NUMA nodes.
+    if (FLAG_IS_DEFAULT(UseNUMAInterleaving)) {
+      FLAG_SET_ERGO(bool, UseNUMAInterleaving, true);
+    }
+  }
   return JNI_OK;
 }
 
--- a/src/share/vm/runtime/arguments.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/arguments.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -289,7 +289,11 @@
   // Value of the conservative maximum heap alignment needed
   static size_t  _conservative_max_heap_alignment;
 
-  static uintx  _min_heap_size;
+  static uintx _min_heap_size;
+
+  // Used to store original flag values
+  static uintx _min_heap_free_ratio;
+  static uintx _max_heap_free_ratio;
 
   // -Xrun arguments
   static AgentLibraryList _libraryList;
@@ -328,6 +332,7 @@
 
   // Tiered
   static void set_tiered_flags();
+  static int  get_min_number_of_compiler_threads();
   // CMS/ParNew garbage collectors
   static void set_parnew_gc_flags();
   static void set_cms_and_parnew_gc_flags();
@@ -339,6 +344,7 @@
   static void set_conservative_max_heap_alignment();
   static void set_use_compressed_oops();
   static void set_use_compressed_klass_ptrs();
+  static void select_gc();
   static void set_ergonomics_flags();
   static void set_shared_spaces_flags();
   // limits the given memory size by the maximum amount of memory this process is
@@ -455,6 +461,10 @@
   // Adjusts the arguments after the OS have adjusted the arguments
   static jint adjust_after_os();
 
+  static void set_gc_specific_flags();
+  static inline bool gc_selected(); // whether a gc has been selected
+  static void select_gc_ergonomically();
+
   // Verifies that the given value will fit as a MinHeapFreeRatio. If not, an error
   // message is returned in the provided buffer.
   static bool verify_MinHeapFreeRatio(FormatBuffer<80>& err_msg, uintx min_heap_free_ratio);
@@ -464,7 +474,8 @@
   static bool verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_heap_free_ratio);
 
   // Check for consistency in the selection of the garbage collector.
-  static bool check_gc_consistency();
+  static bool check_gc_consistency_user();        // Check user-selected gc
+  static inline bool check_gc_consistency_ergo(); // Check ergonomic-selected gc
   static void check_deprecated_gcs();
   static void check_deprecated_gc_flags();
   // Check consistecy or otherwise of VM argument settings
@@ -525,6 +536,10 @@
   static uintx min_heap_size()              { return _min_heap_size; }
   static void  set_min_heap_size(uintx v)   { _min_heap_size = v;  }
 
+  // Returns the original values of -XX:MinHeapFreeRatio and -XX:MaxHeapFreeRatio
+  static uintx min_heap_free_ratio()        { return _min_heap_free_ratio; }
+  static uintx max_heap_free_ratio()        { return _max_heap_free_ratio; }
+
   // -Xrun
   static AgentLibrary* libraries()          { return _libraryList.first(); }
   static bool init_libraries_at_startup()   { return !_libraryList.is_empty(); }
@@ -588,18 +603,32 @@
     _meta_index_dir  = meta_index_dir;
   }
 
-  static char *get_java_home() { return _java_home->value(); }
-  static char *get_dll_dir() { return _sun_boot_library_path->value(); }
-  static char *get_endorsed_dir() { return _java_endorsed_dirs->value(); }
-  static char *get_sysclasspath() { return _sun_boot_class_path->value(); }
+  static char* get_java_home() { return _java_home->value(); }
+  static char* get_dll_dir() { return _sun_boot_library_path->value(); }
+  static char* get_endorsed_dir() { return _java_endorsed_dirs->value(); }
+  static char* get_sysclasspath() { return _sun_boot_class_path->value(); }
   static char* get_meta_index_path() { return _meta_index_path; }
   static char* get_meta_index_dir()  { return _meta_index_dir;  }
+  static char* get_ext_dirs() { return _java_ext_dirs->value(); }
+  static char* get_appclasspath() { return _java_class_path->value(); }
+  static void  fix_appclasspath();
 
   // Operation modi
-  static Mode mode()                        { return _mode; }
+  static Mode mode()                { return _mode; }
+  static bool is_interpreter_only() { return mode() == _int; }
+
 
   // Utility: copies src into buf, replacing "%%" with "%" and "%p" with pid.
   static bool copy_expand_pid(const char* src, size_t srclen, char* buf, size_t buflen);
 };
 
+bool Arguments::gc_selected() {
+  return UseConcMarkSweepGC || UseG1GC || UseParallelGC || UseParallelOldGC ||
+    UseParNewGC || UseSerialGC;
+}
+
+bool Arguments::check_gc_consistency_ergo() {
+  return check_gc_consistency_user();
+}
+
 #endif // SHARE_VM_RUNTIME_ARGUMENTS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/arguments_ext.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
+#define SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/arguments.hpp"
+
+class ArgumentsExt: AllStatic {
+public:
+  static inline void select_gc_ergonomically();
+  static inline void set_gc_specific_flags();
+  static inline bool check_gc_consistency_ergo();
+  static        void process_options(const JavaVMInitArgs* args) {}
+};
+
+void ArgumentsExt::select_gc_ergonomically() {
+  Arguments::select_gc_ergonomically();
+}
+
+void ArgumentsExt::set_gc_specific_flags() {
+  Arguments::set_gc_specific_flags();
+}
+
+bool ArgumentsExt::check_gc_consistency_ergo() {
+  return Arguments::check_gc_consistency_ergo();
+}
+
+#endif // SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
--- a/src/share/vm/runtime/deoptimization.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/deoptimization.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -223,6 +223,8 @@
   assert(vf->is_compiled_frame(), "Wrong frame type");
   chunk->push(compiledVFrame::cast(vf));
 
+  bool realloc_failures = false;
+
 #if defined(COMPILER2) || defined(GRAAL)
   // Reallocate the non-escaping objects and restore their fields. Then
   // relock objects if synchronization on them was eliminated.
@@ -240,7 +242,8 @@
       // It is not guaranteed that we can get such information here only
       // by analyzing bytecode in deoptimized frames. This is why this flag
       // is set during method compilation (see Compile::Process_OopMap_Node()).
-      bool save_oop_result = chunk->at(0)->scope()->return_oop();
+      // If the previous frame was popped, we don't have a result.
+      bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution();
       Handle return_value;
       if (save_oop_result) {
         // Reallocation may trigger GC. If deoptimization happened on return from
@@ -254,19 +257,16 @@
           tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, (void *)result, thread);
         }
       }
-      bool reallocated = false;
       if (objects != NULL) {
         JRT_BLOCK
-          reallocated = realloc_objects(thread, &deoptee, objects, THREAD);
+          realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD);
         JRT_END
-      }
-      if (reallocated) {
-        reassign_fields(&deoptee, &map, objects);
+        reassign_fields(&deoptee, &map, objects, realloc_failures);
 #ifndef PRODUCT
         if (TraceDeoptimization) {
           ttyLocker ttyl;
           tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
-          print_objects(objects);
+          print_objects(objects, realloc_failures);
         }
 #endif
       }
@@ -286,7 +286,7 @@
         assert (cvf->scope() != NULL,"expect only compiled java frames");
         GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
         if (monitors->is_nonempty()) {
-          relock_objects(monitors, thread);
+          relock_objects(monitors, thread, realloc_failures);
 #ifndef PRODUCT
           if (PrintDeoptimizationDetails) {
             ttyLocker ttyl;
@@ -297,11 +297,16 @@
                   first = false;
                   tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
                 }
-                tty->print_cr("     object <" INTPTR_FORMAT "> locked", (void *)mi->owner());
+                if (mi->owner_is_scalar_replaced()) {
+                  Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
+                  tty->print_cr("     failed reallocation for klass %s", k->external_name());
+                } else {
+                  tty->print_cr("     object <" INTPTR_FORMAT "> locked", (void *)mi->owner());
+                }
               }
             }
           }
-#endif // !PRODUCT
+#endif
         }
       }
 #ifdef COMPILER2
@@ -315,9 +320,14 @@
   // out the java state residing in the vframeArray will be missed.
   No_Safepoint_Verifier no_safepoint;
 
-  vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk);
+  vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures);
+#if defined(COMPILER2) || defined(GRAAL)
+  if (realloc_failures) {
+    pop_frames_failed_reallocs(thread, array);
+  }
+#endif
 
-  assert(thread->vframe_array_head() == NULL, "Pending deopt!");;
+  assert(thread->vframe_array_head() == NULL, "Pending deopt!");
   thread->set_vframe_array_head(array);
 
   // Now that the vframeArray has been created if we have any deferred local writes
@@ -719,7 +729,7 @@
              (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
                                                                     top_frame_expression_stack_adjustment))) ||
             (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
-            (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute) &&
+            (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) &&
              (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
             )) {
         ttyLocker ttyl;
@@ -773,6 +783,8 @@
   int exception_line = thread->exception_line();
   thread->clear_pending_exception();
 
+  bool failures = false;
+
   for (int i = 0; i < objects->length(); i++) {
     assert(objects->at(i)->is_object(), "invalid debug information");
     ObjectValue* sv = (ObjectValue*) objects->at(i);
@@ -782,27 +794,34 @@
 
     if (k->oop_is_instance()) {
       InstanceKlass* ik = InstanceKlass::cast(k());
-      obj = ik->allocate_instance(CHECK_(false));
+      obj = ik->allocate_instance(THREAD);
     } else if (k->oop_is_typeArray()) {
       TypeArrayKlass* ak = TypeArrayKlass::cast(k());
       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
       int len = sv->field_size() / type2size[ak->element_type()];
-      obj = ak->allocate(len, CHECK_(false));
+      obj = ak->allocate(len, THREAD);
     } else if (k->oop_is_objArray()) {
       ObjArrayKlass* ak = ObjArrayKlass::cast(k());
-      obj = ak->allocate(sv->field_size(), CHECK_(false));
+      obj = ak->allocate(sv->field_size(), THREAD);
     }
 
-    assert(obj != NULL, "allocation failed");
+    if (obj == NULL) {
+      failures = true;
+    }
+
     assert(sv->value().is_null(), "redundant reallocation");
+    assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
+    CLEAR_PENDING_EXCEPTION;
     sv->set_value(obj);
   }
 
-  if (pending_exception.not_null()) {
+  if (failures) {
+    THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
+  } else if (pending_exception.not_null()) {
     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
   }
 
-  return true;
+  return failures;
 }
 
 // restore elements of an eliminated type array
@@ -1012,12 +1031,15 @@
 }
 
 // restore fields of all eliminated objects and arrays
-void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects) {
+void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
   for (int i = 0; i < objects->length(); i++) {
     ObjectValue* sv = (ObjectValue*) objects->at(i);
     KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
     Handle obj = sv->value();
-    assert(obj.not_null(), "reallocation was missed");
+    assert(obj.not_null() || realloc_failures, "reallocation was missed");
+    if (obj.is_null()) {
+      continue;
+    }
     if (PrintDeoptimizationDetails) {
       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
     }
@@ -1036,34 +1058,36 @@
 
 
 // relock objects for which synchronization was eliminated
-void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread) {
+void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) {
   for (int i = 0; i < monitors->length(); i++) {
     MonitorInfo* mon_info = monitors->at(i);
     if (mon_info->eliminated()) {
-      assert(mon_info->owner() != NULL, "reallocation was missed");
-      Handle obj = Handle(mon_info->owner());
-      markOop mark = obj->mark();
-      if (UseBiasedLocking && mark->has_bias_pattern()) {
-        // New allocated objects may have the mark set to anonymously biased.
-        // Also the deoptimized method may called methods with synchronization
-        // where the thread-local object is bias locked to the current thread.
-        assert(mark->is_biased_anonymously() ||
-               mark->biased_locker() == thread, "should be locked to current thread");
-        // Reset mark word to unbiased prototype.
-        markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
-        obj->set_mark(unbiased_prototype);
+      assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
+      if (!mon_info->owner_is_scalar_replaced()) {
+        Handle obj = Handle(mon_info->owner());
+        markOop mark = obj->mark();
+        if (UseBiasedLocking && mark->has_bias_pattern()) {
+          // New allocated objects may have the mark set to anonymously biased.
+          // Also the deoptimized method may called methods with synchronization
+          // where the thread-local object is bias locked to the current thread.
+          assert(mark->is_biased_anonymously() ||
+                 mark->biased_locker() == thread, "should be locked to current thread");
+          // Reset mark word to unbiased prototype.
+          markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+          obj->set_mark(unbiased_prototype);
+        }
+        BasicLock* lock = mon_info->lock();
+        ObjectSynchronizer::slow_enter(obj, lock, thread);
+        assert(mon_info->owner()->is_locked(), "object must be locked now");
       }
-      BasicLock* lock = mon_info->lock();
-      ObjectSynchronizer::slow_enter(obj, lock, thread);
     }
-    assert(mon_info->owner()->is_locked(), "object must be locked now");
   }
 }
 
 
 #ifndef PRODUCT
 // print information about reallocated objects
-void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
+void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
   fieldDescriptor fd;
 
   for (int i = 0; i < objects->length(); i++) {
@@ -1073,10 +1097,15 @@
 
     tty->print("     object <" INTPTR_FORMAT "> of type ", (void *)sv->value()());
     k->print_value();
-    tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
+    assert(obj.not_null() || realloc_failures, "reallocation was missed");
+    if (obj.is_null()) {
+      tty->print(" allocation failed");
+    } else {
+      tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
+    }
     tty->cr();
 
-    if (Verbose) {
+    if (Verbose && !obj.is_null()) {
       k->oop_print_on(obj(), tty);
     }
   }
@@ -1084,7 +1113,7 @@
 #endif
 #endif // COMPILER2 || GRAAL
 
-vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) {
+vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
   Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, fr.pc(), fr.sp());
 
 #ifndef PRODUCT
@@ -1127,7 +1156,7 @@
   // Since the Java thread being deoptimized will eventually adjust it's own stack,
   // the vframeArray containing the unpacking information is allocated in the C heap.
   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
-  vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr);
+  vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
 
   // Compare the vframeArray to the collected vframes
   assert(array->structural_compare(thread, chunk), "just checking");
@@ -1142,6 +1171,33 @@
   return array;
 }
 
+#if defined(COMPILER2) || defined(GRAAL)
+void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
+  // Reallocation of some scalar replaced objects failed. Record
+  // that we need to pop all the interpreter frames for the
+  // deoptimized compiled frame.
+  assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
+  thread->set_frames_to_pop_failed_realloc(array->frames());
+  // Unlock all monitors here otherwise the interpreter will see a
+  // mix of locked and unlocked monitors (because of failed
+  // reallocations of synchronized objects) and be confused.
+  for (int i = 0; i < array->frames(); i++) {
+    MonitorChunk* monitors = array->element(i)->monitors();
+    if (monitors != NULL) {
+      for (int j = 0; j < monitors->number_of_monitors(); j++) {
+        BasicObjectLock* src = monitors->at(j);
+        if (src->obj() != NULL) {
+          ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread);
+        }
+      }
+      array->element(i)->free_monitors(thread);
+#ifdef ASSERT
+      array->element(i)->set_removed_monitors();
+#endif
+    }
+  }
+}
+#endif
 
 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
   GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
@@ -2085,6 +2141,7 @@
   "loop_limit_check",
   "speculate_class_check",
   "rtm_state_change",
+  "unstable_if"
 #ifdef GRAAL
   "aliasing",
   "transfer_to_interpreter",
--- a/src/share/vm/runtime/deoptimization.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/deoptimization.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -74,6 +74,7 @@
     Reason_loop_limit_check,      // compiler generated loop limits check failed
     Reason_speculate_class_check, // saw unexpected object class from type speculation
     Reason_rtm_state_change,      // rtm state change detected
+    Reason_unstable_if,           // a branch predicted always false was taken
 #ifdef GRAAL
     Reason_aliasing,              // optimistic assumption about aliasing failed
     Reason_transfer_to_interpreter, // explicit transferToInterpreter()
@@ -82,7 +83,6 @@
     Reason_jsr_mismatch,
 #endif
     Reason_LIMIT,
-
     // Note:  Keep this enum in sync. with _trap_reason_name.
     Reason_RECORDED_LIMIT = Reason_bimorphic  // some are not recorded per bc
     // Note:  Reason_RECORDED_LIMIT should be < 8 to fit into 3 bits of
@@ -145,13 +145,14 @@
   static bool realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS);
   static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type);
   static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj);
-  static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects);
-  static void relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread);
-  NOT_PRODUCT(static void print_objects(GrowableArray<ScopeValue*>* objects);)
+  static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures);
+  static void relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures);
+  static void pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array);
+  NOT_PRODUCT(static void print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures);)
 #endif // COMPILER2 || GRAAL
 
   public:
-  static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk);
+  static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures);
 
   // Interface used for unpacking deoptimized frames
 
@@ -264,7 +265,7 @@
   // @argument id.         id of frame that should be deoptimized.
   static void deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason);
 
-  // if thread is not the current thread then execute
+  // If thread is not the current thread then execute
   // VM_DeoptimizeFrame otherwise deoptimize directly.
   static void deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason);
   static void deoptimize_frame(JavaThread* thread, intptr_t* id);
@@ -354,6 +355,8 @@
       return Reason_null_check;           // recorded per BCI as a null check
     else if (reason == Reason_speculate_class_check)
       return Reason_class_check;
+    else if (reason == Reason_unstable_if)
+      return Reason_intrinsic;
     else
       return Reason_none;
   }
--- a/src/share/vm/runtime/fprofiler.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/fprofiler.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -38,6 +38,7 @@
 #include "runtime/stubCodeGenerator.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/task.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vframe.hpp"
 #include "utilities/macros.hpp"
 
--- a/src/share/vm/runtime/frame.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/frame.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -900,7 +900,7 @@
 }
 
 
-void frame::oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f,
+void frame::oops_interpreted_do(OopClosure* f, CLDClosure* cld_f,
     const RegisterMap* map, bool query_oop_map_cache) {
   assert(is_interpreted_frame(), "Not an interpreted frame");
   assert(map != NULL, "map must be set");
@@ -1140,7 +1140,7 @@
 }
 
 
-void frame::oops_do_internal(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
+void frame::oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
 #ifndef PRODUCT
   // simulate GC crash here to dump java thread in error report
   if (CrashGCForDumpingJavaThread) {
--- a/src/share/vm/runtime/frame.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/frame.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -419,19 +419,19 @@
 
   // Oops-do's
   void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f);
-  void oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true);
+  void oops_interpreted_do(OopClosure* f, CLDClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true);
 
  private:
   void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f);
 
   // Iteration of oops
-  void oops_do_internal(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
+  void oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
   void oops_entry_do(OopClosure* f, const RegisterMap* map);
   void oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map);
   int adjust_offset(Method* method, int index); // helper for above fn
  public:
   // Memory management
-  void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cld_f, cf, map, true); }
+  void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cld_f, cf, map, true); }
   void nmethods_do(CodeBlobClosure* cf);
 
   // RedefineClasses support for finding live interpreted methods on the stack
--- a/src/share/vm/runtime/globals.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/globals.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -246,6 +246,11 @@
   return is_unlocked_ext();
 }
 
+void Flag::unlock_diagnostic() {
+  assert(is_diagnostic(), "sanity");
+  _flags = Flags(_flags & ~KIND_DIAGNOSTIC);
+}
+
 // Get custom message for this locked flag, or return NULL if
 // none is available.
 void Flag::get_locked_message(char* buf, int buflen) const {
@@ -624,7 +629,7 @@
   e.commit();
 }
 
-bool CommandLineFlags::boolAt(char* name, size_t len, bool* value) {
+bool CommandLineFlags::boolAt(const char* name, size_t len, bool* value) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_bool()) return false;
@@ -632,7 +637,7 @@
   return true;
 }
 
-bool CommandLineFlags::boolAtPut(char* name, size_t len, bool* value, Flag::Flags origin) {
+bool CommandLineFlags::boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_bool()) return false;
@@ -652,7 +657,7 @@
   faddr->set_origin(origin);
 }
 
-bool CommandLineFlags::intxAt(char* name, size_t len, intx* value) {
+bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_intx()) return false;
@@ -660,7 +665,7 @@
   return true;
 }
 
-bool CommandLineFlags::intxAtPut(char* name, size_t len, intx* value, Flag::Flags origin) {
+bool CommandLineFlags::intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_intx()) return false;
@@ -680,7 +685,7 @@
   faddr->set_origin(origin);
 }
 
-bool CommandLineFlags::uintxAt(char* name, size_t len, uintx* value) {
+bool CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_uintx()) return false;
@@ -688,7 +693,7 @@
   return true;
 }
 
-bool CommandLineFlags::uintxAtPut(char* name, size_t len, uintx* value, Flag::Flags origin) {
+bool CommandLineFlags::uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_uintx()) return false;
@@ -708,7 +713,7 @@
   faddr->set_origin(origin);
 }
 
-bool CommandLineFlags::uint64_tAt(char* name, size_t len, uint64_t* value) {
+bool CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_uint64_t()) return false;
@@ -716,7 +721,7 @@
   return true;
 }
 
-bool CommandLineFlags::uint64_tAtPut(char* name, size_t len, uint64_t* value, Flag::Flags origin) {
+bool CommandLineFlags::uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_uint64_t()) return false;
@@ -736,7 +741,7 @@
   faddr->set_origin(origin);
 }
 
-bool CommandLineFlags::doubleAt(char* name, size_t len, double* value) {
+bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_double()) return false;
@@ -744,7 +749,7 @@
   return true;
 }
 
-bool CommandLineFlags::doubleAtPut(char* name, size_t len, double* value, Flag::Flags origin) {
+bool CommandLineFlags::doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_double()) return false;
@@ -764,7 +769,7 @@
   faddr->set_origin(origin);
 }
 
-bool CommandLineFlags::ccstrAt(char* name, size_t len, ccstr* value) {
+bool CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_ccstr()) return false;
@@ -772,9 +777,7 @@
   return true;
 }
 
-// Contract:  Flag will make private copy of the incoming value.
-// Outgoing value is always malloc-ed, and caller MUST call free.
-bool CommandLineFlags::ccstrAtPut(char* name, size_t len, ccstr* value, Flag::Flags origin) {
+bool CommandLineFlags::ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_ccstr()) return false;
@@ -797,7 +800,6 @@
   return true;
 }
 
-// Contract:  Flag will make private copy of the incoming value.
 void CommandLineFlagsEx::ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type");
--- a/src/share/vm/runtime/globals.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/globals.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -210,7 +210,7 @@
 #define CI_COMPILER_COUNT 0
 #else
 
-#ifdef COMPILER2
+#if defined(COMPILER2) || defined(COMPILERGRAAL)
 #define CI_COMPILER_COUNT 2
 #else
 #define CI_COMPILER_COUNT 1
@@ -328,6 +328,8 @@
   bool is_writeable_ext() const;
   bool is_external_ext() const;
 
+  void unlock_diagnostic();
+
   void get_locked_message(char*, int) const;
   void get_locked_message_ext(char*, int) const;
 
@@ -378,35 +380,37 @@
 
 class CommandLineFlags {
  public:
-  static bool boolAt(char* name, size_t len, bool* value);
-  static bool boolAt(char* name, bool* value)      { return boolAt(name, strlen(name), value); }
-  static bool boolAtPut(char* name, size_t len, bool* value, Flag::Flags origin);
-  static bool boolAtPut(char* name, bool* value, Flag::Flags origin)   { return boolAtPut(name, strlen(name), value, origin); }
+  static bool boolAt(const char* name, size_t len, bool* value);
+  static bool boolAt(const char* name, bool* value)      { return boolAt(name, strlen(name), value); }
+  static bool boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin);
+  static bool boolAtPut(const char* name, bool* value, Flag::Flags origin)   { return boolAtPut(name, strlen(name), value, origin); }
 
-  static bool intxAt(char* name, size_t len, intx* value);
-  static bool intxAt(char* name, intx* value)      { return intxAt(name, strlen(name), value); }
-  static bool intxAtPut(char* name, size_t len, intx* value, Flag::Flags origin);
-  static bool intxAtPut(char* name, intx* value, Flag::Flags origin)   { return intxAtPut(name, strlen(name), value, origin); }
+  static bool intxAt(const char* name, size_t len, intx* value);
+  static bool intxAt(const char* name, intx* value)      { return intxAt(name, strlen(name), value); }
+  static bool intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin);
+  static bool intxAtPut(const char* name, intx* value, Flag::Flags origin)   { return intxAtPut(name, strlen(name), value, origin); }
 
-  static bool uintxAt(char* name, size_t len, uintx* value);
-  static bool uintxAt(char* name, uintx* value)    { return uintxAt(name, strlen(name), value); }
-  static bool uintxAtPut(char* name, size_t len, uintx* value, Flag::Flags origin);
-  static bool uintxAtPut(char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
+  static bool uintxAt(const char* name, size_t len, uintx* value);
+  static bool uintxAt(const char* name, uintx* value)    { return uintxAt(name, strlen(name), value); }
+  static bool uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin);
+  static bool uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
 
-  static bool uint64_tAt(char* name, size_t len, uint64_t* value);
-  static bool uint64_tAt(char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); }
-  static bool uint64_tAtPut(char* name, size_t len, uint64_t* value, Flag::Flags origin);
-  static bool uint64_tAtPut(char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
+  static bool uint64_tAt(const char* name, size_t len, uint64_t* value);
+  static bool uint64_tAt(const char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); }
+  static bool uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin);
+  static bool uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
 
-  static bool doubleAt(char* name, size_t len, double* value);
-  static bool doubleAt(char* name, double* value)    { return doubleAt(name, strlen(name), value); }
-  static bool doubleAtPut(char* name, size_t len, double* value, Flag::Flags origin);
-  static bool doubleAtPut(char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
+  static bool doubleAt(const char* name, size_t len, double* value);
+  static bool doubleAt(const char* name, double* value)    { return doubleAt(name, strlen(name), value); }
+  static bool doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin);
+  static bool doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
 
-  static bool ccstrAt(char* name, size_t len, ccstr* value);
-  static bool ccstrAt(char* name, ccstr* value)    { return ccstrAt(name, strlen(name), value); }
-  static bool ccstrAtPut(char* name, size_t len, ccstr* value, Flag::Flags origin);
-  static bool ccstrAtPut(char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); }
+  static bool ccstrAt(const char* name, size_t len, ccstr* value);
+  static bool ccstrAt(const char* name, ccstr* value)    { return ccstrAt(name, strlen(name), value); }
+  // Contract:  Flag will make private copy of the incoming value.
+  // Outgoing value is always malloc-ed, and caller MUST call free.
+  static bool ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin);
+  static bool ccstrAtPut(const char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); }
 
   // Returns false if name is not a command line flag.
   static bool wasSetOnCmdline(const char* name, bool* value);
@@ -610,6 +614,9 @@
   product(bool, UseAES, false,                                              \
           "Control whether AES instructions can be used on x86/x64")        \
                                                                             \
+  product(bool, UseSHA, false,                                              \
+          "Control whether SHA instructions can be used on SPARC")          \
+                                                                            \
   product(uintx, LargePageSizeInBytes, 0,                                   \
           "Large page size (0 to let VM choose the page size)")             \
                                                                             \
@@ -716,6 +723,15 @@
   product(bool, UseAESIntrinsics, false,                                    \
           "Use intrinsics for AES versions of crypto")                      \
                                                                             \
+  product(bool, UseSHA1Intrinsics, false,                                   \
+          "Use intrinsics for SHA-1 crypto hash function")                  \
+                                                                            \
+  product(bool, UseSHA256Intrinsics, false,                                 \
+          "Use intrinsics for SHA-224 and SHA-256 crypto hash functions")   \
+                                                                            \
+  product(bool, UseSHA512Intrinsics, false,                                 \
+          "Use intrinsics for SHA-384 and SHA-512 crypto hash functions")   \
+                                                                            \
   product(bool, UseCRC32Intrinsics, false,                                  \
           "use intrinsics for java.util.zip.CRC32")                         \
                                                                             \
@@ -944,11 +960,6 @@
   diagnostic(bool, PrintNMTStatistics, false,                               \
           "Print native memory tracking summary data if it is on")          \
                                                                             \
-  diagnostic(bool, AutoShutdownNMT, true,                                   \
-          "Automatically shutdown native memory tracking under stress "     \
-          "situations. When set to false, native memory tracking tries to " \
-          "stay alive at the expense of JVM performance")                   \
-                                                                            \
   diagnostic(bool, LogCompilation, false,                                   \
           "Log compilation activity in detail to LogFile")                  \
                                                                             \
@@ -1086,6 +1097,9 @@
   product(bool, ClassUnloading, true,                                       \
           "Do unloading of classes")                                        \
                                                                             \
+  product(bool, ClassUnloadingWithConcurrentMark, true,                     \
+          "Do unloading of classes with a concurrent marking cycle")        \
+                                                                            \
   develop(bool, DisableStartThread, false,                                  \
           "Disable starting of additional Java threads "                    \
           "(for debugging only)")                                           \
@@ -1220,9 +1234,17 @@
   product(bool, CheckJNICalls, false,                                       \
           "Verify all arguments to JNI calls")                              \
                                                                             \
+  product(bool, CheckEndorsedAndExtDirs, false,                             \
+          "Verify the endorsed and extension directories are not used")     \
+                                                                            \
   product(bool, UseFastJNIAccessors, true,                                  \
           "Use optimized versions of Get<Primitive>Field")                  \
                                                                             \
+  product(intx, MaxJNILocalCapacity, 65536,                                 \
+          "Maximum allowable local JNI handle capacity to "                 \
+          "EnsureLocalCapacity() and PushLocalFrame(), "                    \
+          "where <= 0 is unlimited, default: 65536")                        \
+                                                                            \
   product(bool, EagerXrunInit, false,                                       \
           "Eagerly initialize -Xrun libraries; allows startup profiling, "  \
           "but not all -Xrun libraries may support the state of the VM "    \
@@ -1947,6 +1969,10 @@
           "not just one of the generations (e.g., G1). A value of 0 "       \
           "denotes 'do constant GC cycles'.")                               \
                                                                             \
+  manageable(intx, CMSTriggerInterval, -1,                                  \
+          "Commence a CMS collection cycle (at least) every so many "       \
+          "milliseconds (0 permanently, -1 disabled)")                      \
+                                                                            \
   product(bool, UseCMSInitiatingOccupancyOnly, false,                       \
           "Only use occupancy as a criterion for starting a CMS collection")\
                                                                             \
@@ -2311,6 +2337,9 @@
   manageable(bool, PrintGCTimeStamps, false,                                \
           "Print timestamps at garbage collection")                         \
                                                                             \
+  manageable(bool, PrintGCID, false,                                        \
+          "Print an identifier for each garbage collection")                \
+                                                                            \
   product(bool, PrintGCTaskTimeStamps, false,                               \
           "Print timestamps for individual gc worker thread tasks")         \
                                                                             \
@@ -2334,6 +2363,12 @@
   notproduct(bool, TraceScavenge, false,                                    \
           "Trace scavenge")                                                 \
                                                                             \
+  product(bool, IgnoreEmptyClassPaths, false,                               \
+          "Ignore empty path elements in -classpath")                       \
+                                                                            \
+  product(bool, TraceClassPaths, false,                                     \
+          "Trace processing of class paths")                                \
+                                                                            \
   product_rw(bool, TraceClassLoading, false,                                \
           "Trace all classes loaded")                                       \
                                                                             \
@@ -3794,6 +3829,13 @@
   product(bool, PrintSharedSpaces, false,                                   \
           "Print usage of shared spaces")                                   \
                                                                             \
+  product(bool, PrintSharedArchiveAndExit, false,                           \
+          "Print shared archive file contents")                             \
+                                                                            \
+  product(bool, PrintSharedDictionary, false,                               \
+          "If PrintSharedArchiveAndExit is true, also print the shared "    \
+          "dictionary")                                                     \
+                                                                            \
   product(uintx, SharedReadWriteSize,  NOT_LP64(12*M) LP64_ONLY(16*M),      \
           "Size of read-write space for metadata (in bytes)")               \
                                                                             \
@@ -3814,6 +3856,10 @@
           "support JSR 292 (method handles, invokedynamic, "                \
           "anonymous classes")                                              \
                                                                             \
+  diagnostic(bool, IgnoreUnverifiableClassesDuringDump, false,              \
+          "Do not quit -Xshare:dump even if we encounter unverifiable "     \
+          "classes. Just exclude them from the shared dictionary.")         \
+                                                                            \
   diagnostic(bool, PrintMethodHandleStubs, false,                           \
           "Print generated stub code for method handles")                   \
                                                                             \
@@ -3903,12 +3949,25 @@
   product(bool, PrintGCCause, true,                                         \
           "Include GC cause in GC logging")                                 \
                                                                             \
+  experimental(intx, SurvivorAlignmentInBytes, 0,                           \
+           "Default survivor space alignment in bytes")                     \
+                                                                            \
   product(bool , AllowNonVirtualCalls, false,                               \
           "Obey the ACC_SUPER flag and allow invokenonvirtual calls")       \
                                                                             \
+  product(ccstr, DumpLoadedClassList, NULL,                                 \
+          "Dump the names all loaded classes, that could be stored into "   \
+          "the CDS archive, in the specified file")                         \
+                                                                            \
+  product(ccstr, SharedClassListFile, NULL,                                 \
+          "Override the default CDS class list")                            \
+                                                                            \
   diagnostic(ccstr, SharedArchiveFile, NULL,                                \
           "Override the default location of the CDS archive file")          \
                                                                             \
+  product(ccstr, ExtraSharedClassListFile, NULL,                            \
+          "Extra classlist for building the CDS archive file")              \
+                                                                            \
   experimental(uintx, ArrayAllocatorMallocLimit,                            \
           SOLARIS_ONLY(64*K) NOT_SOLARIS(max_uintx),                        \
           "Allocation less than this value will be allocated "              \
--- a/src/share/vm/runtime/globals_extension.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/globals_extension.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -223,6 +223,7 @@
   static void uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin);
   static void uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin);
   static void doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin);
+  // Contract:  Flag will make private copy of the incoming value
   static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin);
 
   static bool is_default(CommandLineFlag flag);
--- a/src/share/vm/runtime/handles.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/handles.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -227,7 +227,7 @@
   HandleArea* _prev;          // link to outer (older) area
  public:
   // Constructor
-  HandleArea(HandleArea* prev) : Arena(Chunk::tiny_size) {
+  HandleArea(HandleArea* prev) : Arena(mtThread, Chunk::tiny_size) {
     debug_only(_handle_mark_nesting    = 0);
     debug_only(_no_handle_mark_nesting = 0);
     _prev = prev;
--- a/src/share/vm/runtime/init.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/init.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,8 +34,10 @@
 #include "runtime/init.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "services/memTracker.hpp"
 #include "utilities/macros.hpp"
 
+
 // Initialization done by VM thread in vm_init_globals()
 void check_ThreadShadow();
 void eventlog_init();
@@ -131,6 +133,12 @@
   javaClasses_init();   // must happen after vtable initialization
   stubRoutines_init2(); // note: StubRoutines need 2-phase init
 
+#if INCLUDE_NMT
+  // Solaris stack is walkable only after stubRoutines are set up.
+  // On Other platforms, the stack is always walkable.
+  NMT_stack_walkable = true;
+#endif // INCLUDE_NMT
+
   // All the flags that get adjusted by VM_Version_init and os::init_2
   // have been set so dump the flags now.
   if (PrintFlagsFinal) {
--- a/src/share/vm/runtime/interfaceSupport.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/interfaceSupport.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -30,6 +30,7 @@
 #include "memory/resourceArea.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/threadLocalStorage.hpp"
 #include "runtime/vframe.hpp"
 #include "utilities/preserveException.hpp"
@@ -84,7 +85,7 @@
   // Short-circuit any possible re-entrant gc-a-lot attempt
   if (thread->skip_gcalot()) return;
 
-  if (is_init_completed()) {
+  if (Threads::is_vm_complete()) {
 
     if (++_fullgc_alot_invocation < FullGCALotStart) {
       return;
--- a/src/share/vm/runtime/java.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/java.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -61,7 +61,6 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
 #include "runtime/vm_operations.hpp"
-#include "services/memReporter.hpp"
 #include "services/memTracker.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/dtrace.hpp"
@@ -380,12 +379,7 @@
 #endif // ENABLE_ZAP_DEAD_LOCALS
   // Native memory tracking data
   if (PrintNMTStatistics) {
-    if (MemTracker::is_on()) {
-      BaselineTTYOutputer outputer(tty);
-      MemTracker::print_memory_usage(outputer, K, false);
-    } else {
-      tty->print_cr("%s", MemTracker::reason());
-    }
+    MemTracker::final_report(tty);
   }
 }
 
@@ -420,12 +414,7 @@
 
   // Native memory tracking data
   if (PrintNMTStatistics) {
-    if (MemTracker::is_on()) {
-      BaselineTTYOutputer outputer(tty);
-      MemTracker::print_memory_usage(outputer, K, false);
-    } else {
-      tty->print_cr("%s", MemTracker::reason());
-    }
+    MemTracker::final_report(tty);
   }
 }
 
@@ -578,10 +567,6 @@
     BeforeExit_lock->notify_all();
   }
 
-  // Shutdown NMT before exit. Otherwise,
-  // it will run into trouble when system destroys static variables.
-  MemTracker::shutdown(MemTracker::NMT_normal);
-
   if (VerifyStringTableAtExit) {
     int fail_cnt = 0;
     {
--- a/src/share/vm/runtime/javaCalls.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/javaCalls.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -329,6 +329,10 @@
 }
 
 void JavaCalls::call_helper(JavaValue* result, methodHandle* m, JavaCallArguments* args, TRAPS) {
+  // During dumping, Java execution environment is not fully initialized. Also, Java execution
+  // may cause undesirable side-effects in the class metadata.
+  assert(!DumpSharedSpaces, "must not execute Java bytecodes when dumping");
+
   methodHandle method = *m;
   JavaThread* thread = (JavaThread*)THREAD;
   assert(thread->is_Java_thread(), "must be called by a java thread");
--- a/src/share/vm/runtime/javaFrameAnchor.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/javaFrameAnchor.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,39 +26,7 @@
 #define SHARE_VM_RUNTIME_JAVAFRAMEANCHOR_HPP
 
 #include "utilities/globalDefinitions.hpp"
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "orderAccess_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "orderAccess_linux_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "orderAccess_linux_zero.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "orderAccess_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "orderAccess_solaris_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "orderAccess_windows_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "orderAccess_linux_arm.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "orderAccess_linux_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_aix_ppc
-# include "orderAccess_aix_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "orderAccess_bsd_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "orderAccess_bsd_zero.inline.hpp"
-#endif
+#include "runtime/orderAccess.inline.hpp"
 
 //
 // An object for encapsulating the machine/os dependent part of a JavaThread frame state
--- a/src/share/vm/runtime/mutex.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/mutex.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,6 +25,7 @@
 
 #include "precompiled.hpp"
 #include "runtime/mutex.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/events.hpp"
--- a/src/share/vm/runtime/mutexLocker.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/mutexLocker.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -69,7 +69,7 @@
 Monitor* SerializePage_lock           = NULL;
 Monitor* Threads_lock                 = NULL;
 Monitor* CGC_lock                     = NULL;
-Mutex*   STS_init_lock                = NULL;
+Monitor* STS_lock                     = NULL;
 Monitor* SLT_lock                     = NULL;
 Monitor* iCMS_lock                    = NULL;
 Monitor* FullGCCount_lock             = NULL;
@@ -173,7 +173,7 @@
   def(tty_lock                     , Mutex  , event,       true ); // allow to lock in VM
 
   def(CGC_lock                   , Monitor, special,     true ); // coordinate between fore- and background GC
-  def(STS_init_lock              , Mutex,   leaf,        true );
+  def(STS_lock                   , Monitor, leaf,        true );
   if (UseConcMarkSweepGC) {
     def(iCMS_lock                  , Monitor, special,     true ); // CMS incremental mode start/stop notification
   }
--- a/src/share/vm/runtime/mutexLocker.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/mutexLocker.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -79,7 +79,7 @@
                                                  // (also used by Safepoints too to block threads creation/destruction)
 extern Monitor* CGC_lock;                        // used for coordination between
                                                  // fore- & background GC threads.
-extern Mutex*   STS_init_lock;                   // coordinate initialization of SuspendibleThreadSets.
+extern Monitor* STS_lock;                        // used for joining/leaving SuspendibleThreadSet.
 extern Monitor* SLT_lock;                        // used in CMS GC for acquiring PLL
 extern Monitor* iCMS_lock;                       // CMS incremental mode start/stop notification
 extern Monitor* FullGCCount_lock;                // in support of "concurrent" full gc
--- a/src/share/vm/runtime/objectMonitor.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/objectMonitor.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -32,6 +32,7 @@
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
 #include "runtime/objectMonitor.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/orderAccess.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2014 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
+#define SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
+
+#include "runtime/orderAccess.hpp"
+
+// Linux
+#ifdef TARGET_OS_ARCH_linux_x86
+# include "orderAccess_linux_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_sparc
+# include "orderAccess_linux_sparc.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_zero
+# include "orderAccess_linux_zero.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_arm
+# include "orderAccess_linux_arm.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_ppc
+# include "orderAccess_linux_ppc.inline.hpp"
+#endif
+
+// Solaris
+#ifdef TARGET_OS_ARCH_solaris_x86
+# include "orderAccess_solaris_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_solaris_sparc
+# include "orderAccess_solaris_sparc.inline.hpp"
+#endif
+
+// Windows
+#ifdef TARGET_OS_ARCH_windows_x86
+# include "orderAccess_windows_x86.inline.hpp"
+#endif
+
+// AIX
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "orderAccess_aix_ppc.inline.hpp"
+#endif
+
+// BSD
+#ifdef TARGET_OS_ARCH_bsd_x86
+# include "orderAccess_bsd_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_bsd_zero
+# include "orderAccess_bsd_zero.inline.hpp"
+#endif
+
+#endif // SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP
--- a/src/share/vm/runtime/os.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/os.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -32,6 +32,9 @@
 #include "gc_implementation/shared/vmGCOperations.hpp"
 #include "interpreter/interpreter.hpp"
 #include "memory/allocation.inline.hpp"
+#ifdef ASSERT
+#include "memory/guardedMemory.hpp"
+#endif
 #include "oops/oop.inline.hpp"
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
@@ -46,6 +49,8 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "services/attachListener.hpp"
+#include "services/nmtCommon.hpp"
+#include "services/mallocTracker.hpp"
 #include "services/memTracker.hpp"
 #include "services/threadService.hpp"
 #include "utilities/defaultStream.hpp"
@@ -524,118 +529,16 @@
 
 
 
-#ifdef ASSERT
-#define space_before             (MallocCushion + sizeof(double))
-#define space_after              MallocCushion
-#define size_addr_from_base(p)   (size_t*)(p + space_before - sizeof(size_t))
-#define size_addr_from_obj(p)    ((size_t*)p - 1)
-// MallocCushion: size of extra cushion allocated around objects with +UseMallocOnly
-// NB: cannot be debug variable, because these aren't set from the command line until
-// *after* the first few allocs already happened
-#define MallocCushion            16
-#else
-#define space_before             0
-#define space_after              0
-#define size_addr_from_base(p)   should not use w/o ASSERT
-#define size_addr_from_obj(p)    should not use w/o ASSERT
-#define MallocCushion            0
-#endif
 #define paranoid                 0  /* only set to 1 if you suspect checking code has bug */
 
 #ifdef ASSERT
-inline size_t get_size(void* obj) {
-  size_t size = *size_addr_from_obj(obj);
-  if (size < 0) {
-    fatal(err_msg("free: size field of object #" PTR_FORMAT " was overwritten ("
-                  SIZE_FORMAT ")", obj, size));
-  }
-  return size;
-}
-
-u_char* find_cushion_backwards(u_char* start) {
-  u_char* p = start;
-  while (p[ 0] != badResourceValue || p[-1] != badResourceValue ||
-         p[-2] != badResourceValue || p[-3] != badResourceValue) p--;
-  // ok, we have four consecutive marker bytes; find start
-  u_char* q = p - 4;
-  while (*q == badResourceValue) q--;
-  return q + 1;
-}
-
-u_char* find_cushion_forwards(u_char* start) {
-  u_char* p = start;
-  while (p[0] != badResourceValue || p[1] != badResourceValue ||
-         p[2] != badResourceValue || p[3] != badResourceValue) p++;
-  // ok, we have four consecutive marker bytes; find end of cushion
-  u_char* q = p + 4;
-  while (*q == badResourceValue) q++;
-  return q - MallocCushion;
-}
-
-void print_neighbor_blocks(void* ptr) {
-  // find block allocated before ptr (not entirely crash-proof)
-  if (MallocCushion < 4) {
-    tty->print_cr("### cannot find previous block (MallocCushion < 4)");
-    return;
-  }
-  u_char* start_of_this_block = (u_char*)ptr - space_before;
-  u_char* end_of_prev_block_data = start_of_this_block - space_after -1;
-  // look for cushion in front of prev. block
-  u_char* start_of_prev_block = find_cushion_backwards(end_of_prev_block_data);
-  ptrdiff_t size = *size_addr_from_base(start_of_prev_block);
-  u_char* obj = start_of_prev_block + space_before;
-  if (size <= 0 ) {
-    // start is bad; mayhave been confused by OS data inbetween objects
-    // search one more backwards
-    start_of_prev_block = find_cushion_backwards(start_of_prev_block);
-    size = *size_addr_from_base(start_of_prev_block);
-    obj = start_of_prev_block + space_before;
-  }
-
-  if (start_of_prev_block + space_before + size + space_after == start_of_this_block) {
-    tty->print_cr("### previous object: " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", obj, size);
-  } else {
-    tty->print_cr("### previous object (not sure if correct): " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", obj, size);
-  }
-
-  // now find successor block
-  u_char* start_of_next_block = (u_char*)ptr + *size_addr_from_obj(ptr) + space_after;
-  start_of_next_block = find_cushion_forwards(start_of_next_block);
-  u_char* next_obj = start_of_next_block + space_before;
-  ptrdiff_t next_size = *size_addr_from_base(start_of_next_block);
-  if (start_of_next_block[0] == badResourceValue &&
-      start_of_next_block[1] == badResourceValue &&
-      start_of_next_block[2] == badResourceValue &&
-      start_of_next_block[3] == badResourceValue) {
-    tty->print_cr("### next object: " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", next_obj, next_size);
-  } else {
-    tty->print_cr("### next object (not sure if correct): " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", next_obj, next_size);
-  }
-}
-
-
-void report_heap_error(void* memblock, void* bad, const char* where) {
-  tty->print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees);
-  tty->print_cr("## memory stomp: byte at " PTR_FORMAT " %s object " PTR_FORMAT, bad, where, memblock);
-  print_neighbor_blocks(memblock);
-  fatal("memory stomping error");
-}
-
-void verify_block(void* memblock) {
-  size_t size = get_size(memblock);
-  if (MallocCushion) {
-    u_char* ptr = (u_char*)memblock - space_before;
-    for (int i = 0; i < MallocCushion; i++) {
-      if (ptr[i] != badResourceValue) {
-        report_heap_error(memblock, ptr+i, "in front of");
-      }
-    }
-    u_char* end = (u_char*)memblock + size + space_after;
-    for (int j = -MallocCushion; j < 0; j++) {
-      if (end[j] != badResourceValue) {
-        report_heap_error(memblock, end+j, "after");
-      }
-    }
+static void verify_memory(void* ptr) {
+  GuardedMemory guarded(ptr);
+  if (!guarded.verify_guards()) {
+    tty->print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees);
+    tty->print_cr("## memory stomp:");
+    guarded.print_on(tty);
+    fatal("memory stomping error");
   }
 }
 #endif
@@ -660,7 +563,11 @@
   return ptr;
 }
 
-void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
+void* os::malloc(size_t size, MEMFLAGS flags) {
+  return os::malloc(size, flags, CALLER_PC);
+}
+
+void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
   NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
   NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
 
@@ -686,16 +593,22 @@
     size = 1;
   }
 
-  const size_t alloc_size = size + space_before + space_after;
+  // NMT support
+  NMT_TrackingLevel level = MemTracker::tracking_level();
+  size_t            nmt_header_size = MemTracker::malloc_header_size(level);
 
-  if (size > alloc_size) { // Check for rollover.
+#ifndef ASSERT
+  const size_t alloc_size = size + nmt_header_size;
+#else
+  const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_header_size);
+  if (size + nmt_header_size > alloc_size) { // Check for rollover.
     return NULL;
   }
+#endif
 
   NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
 
   u_char* ptr;
-
   if (MallocMaxTestWords > 0) {
     ptr = testMalloc(alloc_size);
   } else {
@@ -703,67 +616,73 @@
   }
 
 #ifdef ASSERT
-  if (ptr == NULL) return NULL;
-  if (MallocCushion) {
-    for (u_char* p = ptr; p < ptr + MallocCushion; p++) *p = (u_char)badResourceValue;
-    u_char* end = ptr + space_before + size;
-    for (u_char* pq = ptr+MallocCushion; pq < end; pq++) *pq = (u_char)uninitBlockPad;
-    for (u_char* q = end; q < end + MallocCushion; q++) *q = (u_char)badResourceValue;
+  if (ptr == NULL) {
+    return NULL;
   }
-  // put size just before data
-  *size_addr_from_base(ptr) = size;
+  // Wrap memory with guard
+  GuardedMemory guarded(ptr, size + nmt_header_size);
+  ptr = guarded.get_user_ptr();
 #endif
-  u_char* memblock = ptr + space_before;
-  if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
-    tty->print_cr("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock);
+  if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
+    tty->print_cr("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
     breakpoint();
   }
-  debug_only(if (paranoid) verify_block(memblock));
-  if (PrintMalloc && tty != NULL) tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock);
+  debug_only(if (paranoid) verify_memory(ptr));
+  if (PrintMalloc && tty != NULL) {
+    tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
+  }
 
-  // we do not track MallocCushion memory
-    MemTracker::record_malloc((address)memblock, size, memflags, caller == 0 ? CALLER_PC : caller);
-
-  return memblock;
+  // we do not track guard memory
+  return MemTracker::record_malloc((address)ptr, size, memflags, stack, level);
 }
 
+void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) {
+  return os::realloc(memblock, size, flags, CALLER_PC);
+}
 
-void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller) {
+void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
+
 #ifndef ASSERT
   NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
   NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
-  MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
-  void* ptr = ::realloc(memblock, size);
-  if (ptr != NULL) {
-    tkr.record((address)memblock, (address)ptr, size, memflags,
-     caller == 0 ? CALLER_PC : caller);
-  } else {
-    tkr.discard();
-  }
-  return ptr;
+   // NMT support
+  void* membase = MemTracker::record_free(memblock);
+  NMT_TrackingLevel level = MemTracker::tracking_level();
+  size_t  nmt_header_size = MemTracker::malloc_header_size(level);
+  void* ptr = ::realloc(membase, size + nmt_header_size);
+  return MemTracker::record_malloc(ptr, size, memflags, stack, level);
 #else
   if (memblock == NULL) {
-    return malloc(size, memflags, (caller == 0 ? CALLER_PC : caller));
+    return os::malloc(size, memflags, stack);
   }
   if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
     tty->print_cr("os::realloc caught " PTR_FORMAT, memblock);
     breakpoint();
   }
-  verify_block(memblock);
+  // NMT support
+  void* membase = MemTracker::malloc_base(memblock);
+  verify_memory(membase);
   NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
-  if (size == 0) return NULL;
+  if (size == 0) {
+    return NULL;
+  }
   // always move the block
-  void* ptr = malloc(size, memflags, caller == 0 ? CALLER_PC : caller);
-  if (PrintMalloc) tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
+  void* ptr = os::malloc(size, memflags, stack);
+  if (PrintMalloc) {
+    tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
+  }
   // Copy to new memory if malloc didn't fail
   if ( ptr != NULL ) {
-    memcpy(ptr, memblock, MIN2(size, get_size(memblock)));
-    if (paranoid) verify_block(ptr);
+    GuardedMemory guarded(MemTracker::malloc_base(memblock));
+    // Guard's user data contains NMT header
+    size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock);
+    memcpy(ptr, memblock, MIN2(size, memblock_size));
+    if (paranoid) verify_memory(MemTracker::malloc_base(ptr));
     if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
       tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
       breakpoint();
     }
-    free(memblock);
+    os::free(memblock);
   }
   return ptr;
 #endif
@@ -778,34 +697,22 @@
     if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock);
     breakpoint();
   }
-  verify_block(memblock);
+  void* membase = MemTracker::record_free(memblock);
+  verify_memory(membase);
   NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
-  // Added by detlefs.
-  if (MallocCushion) {
-    u_char* ptr = (u_char*)memblock - space_before;
-    for (u_char* p = ptr; p < ptr + MallocCushion; p++) {
-      guarantee(*p == badResourceValue,
-                "Thing freed should be malloc result.");
-      *p = (u_char)freeBlockPad;
-    }
-    size_t size = get_size(memblock);
-    inc_stat_counter(&free_bytes, size);
-    u_char* end = ptr + space_before + size;
-    for (u_char* q = end; q < end + MallocCushion; q++) {
-      guarantee(*q == badResourceValue,
-                "Thing freed should be malloc result.");
-      *q = (u_char)freeBlockPad;
-    }
-    if (PrintMalloc && tty != NULL)
-      fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)memblock);
-  } else if (PrintMalloc && tty != NULL) {
-    // tty->print_cr("os::free %p", memblock);
-    fprintf(stderr, "os::free " PTR_FORMAT "\n", (uintptr_t)memblock);
+
+  GuardedMemory guarded(membase);
+  size_t size = guarded.get_user_size();
+  inc_stat_counter(&free_bytes, size);
+  membase = guarded.release_for_freeing();
+  if (PrintMalloc && tty != NULL) {
+      fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)membase);
   }
+  ::free(membase);
+#else
+  void* membase = MemTracker::record_free(memblock);
+  ::free(membase);
 #endif
-  MemTracker::record_free((address)memblock, memflags);
-
-  ::free((char*)memblock - space_before);
 }
 
 void os::init_random(long initval) {
@@ -1525,7 +1432,7 @@
 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
   char* result = pd_reserve_memory(bytes, addr, alignment_hint);
   if (result != NULL) {
-    MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
+    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
   }
 
   return result;
@@ -1535,7 +1442,7 @@
    MEMFLAGS flags) {
   char* result = pd_reserve_memory(bytes, addr, alignment_hint);
   if (result != NULL) {
-    MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
+    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
     MemTracker::record_virtual_memory_type((address)result, flags);
   }
 
@@ -1545,7 +1452,7 @@
 char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
   char* result = pd_attempt_reserve_memory_at(bytes, addr);
   if (result != NULL) {
-    MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
+    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
   }
   return result;
 }
@@ -1585,23 +1492,29 @@
 }
 
 bool os::uncommit_memory(char* addr, size_t bytes) {
-  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
-  bool res = pd_uncommit_memory(addr, bytes);
-  if (res) {
-    tkr.record((address)addr, bytes);
+  bool res;
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
+    res = pd_uncommit_memory(addr, bytes);
+    if (res) {
+      tkr.record((address)addr, bytes);
+    }
   } else {
-    tkr.discard();
+    res = pd_uncommit_memory(addr, bytes);
   }
   return res;
 }
 
 bool os::release_memory(char* addr, size_t bytes) {
-  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-  bool res = pd_release_memory(addr, bytes);
-  if (res) {
-    tkr.record((address)addr, bytes);
+  bool res;
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+    res = pd_release_memory(addr, bytes);
+    if (res) {
+      tkr.record((address)addr, bytes);
+    }
   } else {
-    tkr.discard();
+    res = pd_release_memory(addr, bytes);
   }
   return res;
 }
@@ -1612,7 +1525,7 @@
                            bool allow_exec) {
   char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
   if (result != NULL) {
-    MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC);
+    MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
   }
   return result;
 }
@@ -1625,12 +1538,15 @@
 }
 
 bool os::unmap_memory(char *addr, size_t bytes) {
-  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
-  bool result = pd_unmap_memory(addr, bytes);
-  if (result) {
-    tkr.record((address)addr, bytes);
+  bool result;
+  if (MemTracker::tracking_level() > NMT_minimal) {
+    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
+    result = pd_unmap_memory(addr, bytes);
+    if (result) {
+      tkr.record((address)addr, bytes);
+    }
   } else {
-    tkr.discard();
+    result = pd_unmap_memory(addr, bytes);
   }
   return result;
 }
--- a/src/share/vm/runtime/os.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/os.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -66,6 +66,8 @@
 class Event;
 class DLL;
 class FileHandle;
+class NativeCallStack;
+
 template<class E> class GrowableArray;
 
 // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
@@ -97,8 +99,11 @@
 // Typedef for structured exception handling support
 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
 
+class MallocTracker;
+
 class os: AllStatic {
   friend class VMStructs;
+  friend class MallocTracker;
 #ifdef GRAAL
   friend class Arguments; // need access to format_boot_path
 #endif
@@ -164,7 +169,10 @@
   // Override me as needed
   static int    file_name_strcmp(const char* s1, const char* s2);
 
+  // get/unset environment variable
   static bool getenv(const char* name, char* buffer, int len);
+  static bool unsetenv(const char* name);
+
   static bool have_special_privileges();
 
   static jlong  javaTimeMillis();
@@ -210,8 +218,14 @@
 
   // Interface for detecting multiprocessor system
   static inline bool is_MP() {
-    assert(_processor_count > 0, "invalid processor count");
-    return _processor_count > 1 || AssumeMP;
+    // During bootstrap if _processor_count is not yet initialized
+    // we claim to be MP as that is safest. If any platform has a
+    // stub generator that might be triggered in this phase and for
+    // which being declared MP when in fact not, is a problem - then
+    // the bootstrap routine for the stub generator needs to check
+    // the processor count directly and leave the bootstrap routine
+    // in place until called after initialization has ocurred.
+    return (_processor_count != 1) || AssumeMP;
   }
   static julong available_memory();
   static julong physical_memory();
@@ -657,12 +671,20 @@
   static void* thread_local_storage_at(int index);
   static void  free_thread_local_storage(int index);
 
-  // Stack walk
-  static address get_caller_pc(int n = 0);
+  // Retrieve native stack frames.
+  // Parameter:
+  //   stack:  an array to storage stack pointers.
+  //   frames: size of above array.
+  //   toSkip: number of stack frames to skip at the beginning.
+  // Return: number of stack frames captured.
+  static int get_native_stack(address* stack, int size, int toSkip = 0);
 
   // General allocation (must be MT-safe)
-  static void* malloc  (size_t size, MEMFLAGS flags, address caller_pc = 0);
-  static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0);
+  static void* malloc  (size_t size, MEMFLAGS flags, const NativeCallStack& stack);
+  static void* malloc  (size_t size, MEMFLAGS flags);
+  static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack);
+  static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
+
   static void  free    (void *memblock, MEMFLAGS flags = mtNone);
   static bool  check_heap(bool force = false);      // verify C heap integrity
   static char* strdup(const char *, MEMFLAGS flags = mtInternal);  // Like strdup
--- a/src/share/vm/runtime/perfMemory.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/perfMemory.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,6 +28,7 @@
 #include "runtime/java.hpp"
 #include "runtime/mutex.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/perfData.hpp"
 #include "runtime/perfMemory.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/prefetch.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_PREFETCH_INLINE_HPP
+#define SHARE_VM_RUNTIME_PREFETCH_INLINE_HPP
+
+#include "runtime/prefetch.hpp"
+
+// Linux
+#ifdef TARGET_OS_ARCH_linux_x86
+# include "prefetch_linux_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_sparc
+# include "prefetch_linux_sparc.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_zero
+# include "prefetch_linux_zero.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_arm
+# include "prefetch_linux_arm.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_linux_ppc
+# include "prefetch_linux_ppc.inline.hpp"
+#endif
+
+// Solaris
+#ifdef TARGET_OS_ARCH_solaris_x86
+# include "prefetch_solaris_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_solaris_sparc
+# include "prefetch_solaris_sparc.inline.hpp"
+#endif
+
+// Windows
+#ifdef TARGET_OS_ARCH_windows_x86
+# include "prefetch_windows_x86.inline.hpp"
+#endif
+
+// AIX
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "prefetch_aix_ppc.inline.hpp"
+#endif
+
+// BSD
+#ifdef TARGET_OS_ARCH_bsd_x86
+# include "prefetch_bsd_x86.inline.hpp"
+#endif
+#ifdef TARGET_OS_ARCH_bsd_zero
+# include "prefetch_bsd_zero.inline.hpp"
+#endif
+
+#endif // SHARE_VM_RUNTIME_PREFETCH_INLINE_HPP
--- a/src/share/vm/runtime/safepoint.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/safepoint.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -41,6 +41,7 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/signature.hpp"
@@ -49,7 +50,6 @@
 #include "runtime/sweeper.hpp"
 #include "runtime/synchronizer.hpp"
 #include "runtime/thread.inline.hpp"
-#include "services/memTracker.hpp"
 #include "services/runtimeService.hpp"
 #include "utilities/events.hpp"
 #include "utilities/macros.hpp"
@@ -75,7 +75,7 @@
 #endif
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
-#include "gc_implementation/shared/concurrentGCThread.hpp"
+#include "gc_implementation/shared/suspendibleThreadSet.hpp"
 #endif // INCLUDE_ALL_GCS
 #ifdef COMPILER1
 #include "c1/c1_globals.hpp"
@@ -112,7 +112,7 @@
     // more-general mechanism below.  DLD (01/05).
     ConcurrentMarkSweepThread::synchronize(false);
   } else if (UseG1GC) {
-    ConcurrentGCThread::safepoint_synchronize();
+    SuspendibleThreadSet::synchronize();
   }
 #endif // INCLUDE_ALL_GCS
 
@@ -488,7 +488,7 @@
   if (UseConcMarkSweepGC) {
     ConcurrentMarkSweepThread::desynchronize(false);
   } else if (UseG1GC) {
-    ConcurrentGCThread::safepoint_desynchronize();
+    SuspendibleThreadSet::desynchronize();
   }
 #endif // INCLUDE_ALL_GCS
   // record this time so VMThread can keep track how much time has elasped
@@ -546,10 +546,6 @@
     TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
     ClassLoaderDataGraph::purge_if_needed();
   }
-
-  if (MemTracker::is_on()) {
-    MemTracker::sync();
-  }
 }
 
 
--- a/src/share/vm/runtime/serviceThread.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/serviceThread.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,6 +28,7 @@
 #include "runtime/serviceThread.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "prims/jvmtiImpl.hpp"
+#include "services/allocationContextService.hpp"
 #include "services/gcNotifier.hpp"
 #include "services/diagnosticArgument.hpp"
 #include "services/diagnosticFramework.hpp"
@@ -86,6 +87,7 @@
     bool has_jvmti_events = false;
     bool has_gc_notification_event = false;
     bool has_dcmd_notification_event = false;
+    bool acs_notify = false;
     JvmtiDeferredEvent jvmti_event;
     {
       // Need state transition ThreadBlockInVM so that this thread
@@ -102,7 +104,8 @@
       while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) &&
              !(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) &&
               !(has_gc_notification_event = GCNotifier::has_event()) &&
-              !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification())) {
+              !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) &&
+             !(acs_notify = AllocationContextService::should_notify())) {
         // wait until one of the sensors has pending requests, or there is a
         // pending JVMTI event or JMX GC notification to post
         Service_lock->wait(Mutex::_no_safepoint_check_flag);
@@ -128,6 +131,10 @@
     if(has_dcmd_notification_event) {
       DCmdFactory::send_notification(CHECK);
     }
+
+    if (acs_notify) {
+      AllocationContextService::notify(CHECK);
+    }
   }
 }
 
--- a/src/share/vm/runtime/sharedRuntime.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -482,6 +482,7 @@
 
 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
   assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address));
+  assert(thread->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
 
   // Reset method handle flag.
   thread->set_is_method_handle_return(false);
@@ -1280,10 +1281,7 @@
          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
 
-  // We do not patch the call site if the caller nmethod has been made non-entrant.
-  if (!caller_nm->is_in_use()) {
-    return callee_method;
-  }
+  assert(caller_nm->is_alive(), "It should be alive");
 
 #ifndef PRODUCT
   // tracing/debugging/statistics
@@ -1353,13 +1351,11 @@
 
     // Now that we are ready to patch if the Method* was redefined then
     // don't update call site and let the caller retry.
-    // Don't update call site if caller nmethod has been made non-entrant
-    // as it is a waste of time.
     // Don't update call site if callee nmethod was unloaded or deoptimized.
     // Don't update call site if callee nmethod was replaced by an other nmethod
     // which may happen when multiply alive nmethod (tiered compilation)
     // will be supported.
-    if (!callee_method->is_old() && caller_nm->is_in_use() &&
+    if (!callee_method->is_old() &&
         (callee_nm == NULL || callee_nm->is_in_use() && (callee_method->code() == callee_nm))) {
 #ifdef ASSERT
       // We must not try to patch to jump to an already unloaded method.
@@ -1560,14 +1556,12 @@
   // out of scope.
   JvmtiDynamicCodeEventCollector event_collector;
 
-  // Update inline cache to megamorphic. Skip update if caller has been
-  // made non-entrant or we are called from interpreted.
+  // Update inline cache to megamorphic. Skip update if we are called from interpreted.
   { MutexLocker ml_patch (CompiledIC_lock);
     RegisterMap reg_map(thread, false);
     frame caller_frame = thread->last_frame().sender(&reg_map);
     CodeBlob* cb = caller_frame.cb();
-    if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) {
-      // Not a non-entrant nmethod, so find inline_cache
+    if (cb->is_nmethod()) {
       CompiledIC* inline_cache = CompiledIC_before(((nmethod*)cb), caller_frame.pc());
       bool should_be_mono = false;
       if (inline_cache->is_optimized()) {
@@ -1710,19 +1704,13 @@
       // resolve is only done once.
 
       MutexLocker ml(CompiledIC_lock);
-      //
-      // We do not patch the call site if the nmethod has been made non-entrant
-      // as it is a waste of time
-      //
-      if (caller_nm->is_in_use()) {
-        if (is_static_call) {
-          CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
-          ssc->set_to_clean();
-        } else {
-          // compiled, dispatched call (which used to call an interpreted method)
-          CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
-          inline_cache->set_to_clean();
-        }
+      if (is_static_call) {
+        CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
+        ssc->set_to_clean();
+      } else {
+        // compiled, dispatched call (which used to call an interpreted method)
+        CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
+        inline_cache->set_to_clean();
       }
     }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/sharedRuntimeMath.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_SHAREDRUNTIMEMATH_HPP
+#define SHARE_VM_RUNTIME_SHAREDRUNTIMEMATH_HPP
+
+#include <math.h>
+
+// Used to access the lower/higher 32 bits of a double
+typedef union {
+    double d;
+    struct {
+#ifdef VM_LITTLE_ENDIAN
+      int lo;
+      int hi;
+#else
+      int hi;
+      int lo;
+#endif
+    } split;
+} DoubleIntConv;
+
+static inline int high(double d) {
+  DoubleIntConv x;
+  x.d = d;
+  return x.split.hi;
+}
+
+static inline int low(double d) {
+  DoubleIntConv x;
+  x.d = d;
+  return x.split.lo;
+}
+
+static inline void set_high(double* d, int high) {
+  DoubleIntConv conv;
+  conv.d = *d;
+  conv.split.hi = high;
+  *d = conv.d;
+}
+
+static inline void set_low(double* d, int low) {
+  DoubleIntConv conv;
+  conv.d = *d;
+  conv.split.lo = low;
+  *d = conv.d;
+}
+
+static double copysignA(double x, double y) {
+  DoubleIntConv convX;
+  convX.d = x;
+  convX.split.hi = (convX.split.hi & 0x7fffffff) | (high(y) & 0x80000000);
+  return convX.d;
+}
+
+/*
+ * ====================================================
+ * Copyright (c) 1998 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * scalbn (double x, int n)
+ * scalbn(x,n) returns x* 2**n  computed by  exponent
+ * manipulation rather than by actually performing an
+ * exponentiation or a multiplication.
+ */
+
+static const double
+two54   =  1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
+twom54  =  5.55111512312578270212e-17, /* 0x3C900000, 0x00000000 */
+hugeX  = 1.0e+300,
+tiny   = 1.0e-300;
+
+static double scalbnA(double x, int n) {
+  int  k,hx,lx;
+  hx = high(x);
+  lx = low(x);
+  k = (hx&0x7ff00000)>>20;              /* extract exponent */
+  if (k==0) {                           /* 0 or subnormal x */
+    if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */
+    x *= two54;
+    hx = high(x);
+    k = ((hx&0x7ff00000)>>20) - 54;
+    if (n< -50000) return tiny*x;       /*underflow*/
+  }
+  if (k==0x7ff) return x+x;             /* NaN or Inf */
+  k = k+n;
+  if (k > 0x7fe) return hugeX*copysignA(hugeX,x); /* overflow  */
+  if (k > 0) {                          /* normal result */
+    set_high(&x, (hx&0x800fffff)|(k<<20));
+    return x;
+  }
+  if (k <= -54) {
+    if (n > 50000)      /* in case integer overflow in n+k */
+      return hugeX*copysignA(hugeX,x);  /*overflow*/
+    else return tiny*copysignA(tiny,x); /*underflow*/
+  }
+  k += 54;                              /* subnormal result */
+  set_high(&x, (hx&0x800fffff)|(k<<20));
+  return x*twom54;
+}
+
+#endif // SHARE_VM_RUNTIME_SHAREDRUNTIMEMATH_HPP
--- a/src/share/vm/runtime/sharedRuntimeTrans.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/sharedRuntimeTrans.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,81 +40,11 @@
 // generated; can not figure out how to turn down optimization for one
 // file in the IDE on Windows
 #ifdef WIN32
+# pragma warning( disable: 4748 ) // /GS can not protect parameters and local variables from local buffer overrun because optimizations are disabled in function
 # pragma optimize ( "", off )
 #endif
 
-#include <math.h>
-
-// VM_LITTLE_ENDIAN is #defined appropriately in the Makefiles
-// [jk] this is not 100% correct because the float word order may different
-// from the byte order (e.g. on ARM)
-#ifdef VM_LITTLE_ENDIAN
-# define __HI(x) *(1+(int*)&x)
-# define __LO(x) *(int*)&x
-#else
-# define __HI(x) *(int*)&x
-# define __LO(x) *(1+(int*)&x)
-#endif
-
-#if !defined(AIX)
-double copysign(double x, double y) {
-  __HI(x) = (__HI(x)&0x7fffffff)|(__HI(y)&0x80000000);
-  return x;
-}
-#endif
-
-/*
- * ====================================================
- * Copyright (c) 1998 Oracle and/or its affiliates. All rights reserved.
- *
- * Developed at SunSoft, a Sun Microsystems, Inc. business.
- * Permission to use, copy, modify, and distribute this
- * software is freely granted, provided that this notice
- * is preserved.
- * ====================================================
- */
-
-/*
- * scalbn (double x, int n)
- * scalbn(x,n) returns x* 2**n  computed by  exponent
- * manipulation rather than by actually performing an
- * exponentiation or a multiplication.
- */
-
-static const double
-two54   =  1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
-  twom54  =  5.55111512312578270212e-17, /* 0x3C900000, 0x00000000 */
-  hugeX   = 1.0e+300,
-  tiny   = 1.0e-300;
-
-#if !defined(AIX)
-double scalbn (double x, int n) {
-  int  k,hx,lx;
-  hx = __HI(x);
-  lx = __LO(x);
-  k = (hx&0x7ff00000)>>20;              /* extract exponent */
-  if (k==0) {                           /* 0 or subnormal x */
-    if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */
-    x *= two54;
-    hx = __HI(x);
-    k = ((hx&0x7ff00000)>>20) - 54;
-    if (n< -50000) return tiny*x;       /*underflow*/
-  }
-  if (k==0x7ff) return x+x;             /* NaN or Inf */
-  k = k+n;
-  if (k >  0x7fe) return hugeX*copysign(hugeX,x); /* overflow  */
-  if (k > 0)                            /* normal result */
-    {__HI(x) = (hx&0x800fffff)|(k<<20); return x;}
-  if (k <= -54) {
-    if (n > 50000)      /* in case integer overflow in n+k */
-      return hugeX*copysign(hugeX,x);   /*overflow*/
-    else return tiny*copysign(tiny,x);  /*underflow*/
-  }
-  k += 54;                              /* subnormal result */
-  __HI(x) = (hx&0x800fffff)|(k<<20);
-  return x*twom54;
-}
-#endif
+#include "runtime/sharedRuntimeMath.hpp"
 
 /* __ieee754_log(x)
  * Return the logrithm of x
@@ -185,8 +115,8 @@
   int k,hx,i,j;
   unsigned lx;
 
-  hx = __HI(x);               /* high word of x */
-  lx = __LO(x);               /* low  word of x */
+  hx = high(x);               /* high word of x */
+  lx = low(x);                /* low  word of x */
 
   k=0;
   if (hx < 0x00100000) {                   /* x < 2**-1022  */
@@ -194,13 +124,13 @@
       return -two54/zero;             /* log(+-0)=-inf */
     if (hx<0) return (x-x)/zero;   /* log(-#) = NaN */
     k -= 54; x *= two54; /* subnormal number, scale up x */
-    hx = __HI(x);             /* high word of x */
+    hx = high(x);             /* high word of x */
   }
   if (hx >= 0x7ff00000) return x+x;
   k += (hx>>20)-1023;
   hx &= 0x000fffff;
   i = (hx+0x95f64)&0x100000;
-  __HI(x) = hx|(i^0x3ff00000);        /* normalize x or x/2 */
+  set_high(&x, hx|(i^0x3ff00000)); /* normalize x or x/2 */
   k += (i>>20);
   f = x-1.0;
   if((0x000fffff&(2+hx))<3) {  /* |f| < 2**-20 */
@@ -279,8 +209,8 @@
   int i,k,hx;
   unsigned lx;
 
-  hx = __HI(x);       /* high word of x */
-  lx = __LO(x);       /* low word of x */
+  hx = high(x);       /* high word of x */
+  lx = low(x);        /* low word of x */
 
   k=0;
   if (hx < 0x00100000) {                  /* x < 2**-1022  */
@@ -288,14 +218,14 @@
       return -two54/zero;             /* log(+-0)=-inf */
     if (hx<0) return (x-x)/zero;        /* log(-#) = NaN */
     k -= 54; x *= two54; /* subnormal number, scale up x */
-    hx = __HI(x);                /* high word of x */
+    hx = high(x);                /* high word of x */
   }
   if (hx >= 0x7ff00000) return x+x;
   k += (hx>>20)-1023;
   i  = ((unsigned)k&0x80000000)>>31;
   hx = (hx&0x000fffff)|((0x3ff-i)<<20);
   y  = (double)(k+i);
-  __HI(x) = hx;
+  set_high(&x, hx);
   z  = y*log10_2lo + ivln10*__ieee754_log(x);
   return  z+y*log10_2hi;
 }
@@ -390,14 +320,14 @@
   int k=0,xsb;
   unsigned hx;
 
-  hx  = __HI(x);        /* high word of x */
+  hx  = high(x);                /* high word of x */
   xsb = (hx>>31)&1;             /* sign bit of x */
   hx &= 0x7fffffff;             /* high word of |x| */
 
   /* filter out non-finite argument */
   if(hx >= 0x40862E42) {                        /* if |x|>=709.78... */
     if(hx>=0x7ff00000) {
-      if(((hx&0xfffff)|__LO(x))!=0)
+      if(((hx&0xfffff)|low(x))!=0)
         return x+x;             /* NaN */
       else return (xsb==0)? x:0.0;      /* exp(+-inf)={inf,0} */
     }
@@ -428,10 +358,10 @@
   if(k==0)      return one-((x*c)/(c-2.0)-x);
   else          y = one-((lo-(x*c)/(2.0-c))-hi);
   if(k >= -1021) {
-    __HI(y) += (k<<20); /* add k to y's exponent */
+    set_high(&y, high(y) + (k<<20)); /* add k to y's exponent */
     return y;
   } else {
-    __HI(y) += ((k+1000)<<20);/* add k to y's exponent */
+    set_high(&y, high(y) + ((k+1000)<<20)); /* add k to y's exponent */
     return y*twom1000;
   }
 }
@@ -518,8 +448,8 @@
   unsigned lx,ly;
 
   i0 = ((*(int*)&one)>>29)^1; i1=1-i0;
-  hx = __HI(x); lx = __LO(x);
-  hy = __HI(y); ly = __LO(y);
+  hx = high(x); lx = low(x);
+  hy = high(y); ly = low(y);
   ix = hx&0x7fffffff;  iy = hy&0x7fffffff;
 
   /* y==zero: x**0 = 1 */
@@ -619,14 +549,14 @@
     u = ivln2_h*t;      /* ivln2_h has 21 sig. bits */
     v = t*ivln2_l-w*ivln2;
     t1 = u+v;
-    __LO(t1) = 0;
+    set_low(&t1, 0);
     t2 = v-(t1-u);
   } else {
     double ss,s2,s_h,s_l,t_h,t_l;
     n = 0;
     /* take care subnormal number */
     if(ix<0x00100000)
-      {ax *= two53; n -= 53; ix = __HI(ax); }
+      {ax *= two53; n -= 53; ix = high(ax); }
     n  += ((ix)>>20)-0x3ff;
     j  = ix&0x000fffff;
     /* determine interval */
@@ -634,17 +564,17 @@
     if(j<=0x3988E) k=0;         /* |x|<sqrt(3/2) */
     else if(j<0xBB67A) k=1;     /* |x|<sqrt(3)   */
     else {k=0;n+=1;ix -= 0x00100000;}
-    __HI(ax) = ix;
+    set_high(&ax, ix);
 
     /* compute ss = s_h+s_l = (x-1)/(x+1) or (x-1.5)/(x+1.5) */
     u = ax-bp[k];               /* bp[0]=1.0, bp[1]=1.5 */
     v = one/(ax+bp[k]);
     ss = u*v;
     s_h = ss;
-    __LO(s_h) = 0;
+    set_low(&s_h, 0);
     /* t_h=ax+bp[k] High */
     t_h = zeroX;
-    __HI(t_h)=((ix>>1)|0x20000000)+0x00080000+(k<<18);
+    set_high(&t_h, ((ix>>1)|0x20000000)+0x00080000+(k<<18));
     t_l = ax - (t_h-bp[k]);
     s_l = v*((u-s_h*t_h)-s_h*t_l);
     /* compute log(ax) */
@@ -653,32 +583,32 @@
     r += s_l*(s_h+ss);
     s2  = s_h*s_h;
     t_h = 3.0+s2+r;
-    __LO(t_h) = 0;
+    set_low(&t_h, 0);
     t_l = r-((t_h-3.0)-s2);
     /* u+v = ss*(1+...) */
     u = s_h*t_h;
     v = s_l*t_h+t_l*ss;
     /* 2/(3log2)*(ss+...) */
     p_h = u+v;
-    __LO(p_h) = 0;
+    set_low(&p_h, 0);
     p_l = v-(p_h-u);
     z_h = cp_h*p_h;             /* cp_h+cp_l = 2/(3*log2) */
     z_l = cp_l*p_h+p_l*cp+dp_l[k];
     /* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */
     t = (double)n;
     t1 = (((z_h+z_l)+dp_h[k])+t);
-    __LO(t1) = 0;
+    set_low(&t1, 0);
     t2 = z_l-(((t1-t)-dp_h[k])-z_h);
   }
 
   /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */
   y1  = y;
-  __LO(y1) = 0;
+  set_low(&y1, 0);
   p_l = (y-y1)*t1+y*t2;
   p_h = y1*t1;
   z = p_l+p_h;
-  j = __HI(z);
-  i = __LO(z);
+  j = high(z);
+  i = low(z);
   if (j>=0x40900000) {                          /* z >= 1024 */
     if(((j-0x40900000)|i)!=0)                   /* if z > 1024 */
       return s*hugeX*hugeX;                     /* overflow */
@@ -702,13 +632,13 @@
     n = j+(0x00100000>>(k+1));
     k = ((n&0x7fffffff)>>20)-0x3ff;     /* new k for n */
     t = zeroX;
-    __HI(t) = (n&~(0x000fffff>>k));
+    set_high(&t, (n&~(0x000fffff>>k)));
     n = ((n&0x000fffff)|0x00100000)>>(20-k);
     if(j<0) n = -n;
     p_h -= t;
   }
   t = p_l+p_h;
-  __LO(t) = 0;
+  set_low(&t, 0);
   u = t*lg2_h;
   v = (p_l-(t-p_h))*lg2+t*lg2_l;
   z = u+v;
@@ -717,10 +647,10 @@
   t1  = z - t*(P1+t*(P2+t*(P3+t*(P4+t*P5))));
   r  = (z*t1)/(t1-two)-(w+z*w);
   z  = one-(r-z);
-  j  = __HI(z);
+  j  = high(z);
   j += (n<<20);
-  if((j>>20)<=0) z = scalbn(z,n);       /* subnormal output */
-  else __HI(z) += (n<<20);
+  if((j>>20)<=0) z = scalbnA(z,n);       /* subnormal output */
+  else set_high(&z, high(z) + (n<<20));
   return s*z;
 }
 
--- a/src/share/vm/runtime/sharedRuntimeTrig.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/sharedRuntimeTrig.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,63 +63,7 @@
 #define SAFEBUF
 #endif
 
-#include <math.h>
-
-// VM_LITTLE_ENDIAN is #defined appropriately in the Makefiles
-// [jk] this is not 100% correct because the float word order may different
-// from the byte order (e.g. on ARM)
-#ifdef VM_LITTLE_ENDIAN
-# define __HI(x) *(1+(int*)&x)
-# define __LO(x) *(int*)&x
-#else
-# define __HI(x) *(int*)&x
-# define __LO(x) *(1+(int*)&x)
-#endif
-
-static double copysignA(double x, double y) {
-  __HI(x) = (__HI(x)&0x7fffffff)|(__HI(y)&0x80000000);
-  return x;
-}
-
-/*
- * scalbn (double x, int n)
- * scalbn(x,n) returns x* 2**n  computed by  exponent
- * manipulation rather than by actually performing an
- * exponentiation or a multiplication.
- */
-
-static const double
-two54   =  1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
-twom54  =  5.55111512312578270212e-17, /* 0x3C900000, 0x00000000 */
-hugeX  = 1.0e+300,
-tiny   = 1.0e-300;
-
-static double scalbnA (double x, int n) {
-  int  k,hx,lx;
-  hx = __HI(x);
-  lx = __LO(x);
-  k = (hx&0x7ff00000)>>20;              /* extract exponent */
-  if (k==0) {                           /* 0 or subnormal x */
-    if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */
-    x *= two54;
-    hx = __HI(x);
-    k = ((hx&0x7ff00000)>>20) - 54;
-    if (n< -50000) return tiny*x;       /*underflow*/
-  }
-  if (k==0x7ff) return x+x;             /* NaN or Inf */
-  k = k+n;
-  if (k >  0x7fe) return hugeX*copysignA(hugeX,x); /* overflow  */
-  if (k > 0)                            /* normal result */
-    {__HI(x) = (hx&0x800fffff)|(k<<20); return x;}
-  if (k <= -54) {
-    if (n > 50000)      /* in case integer overflow in n+k */
-      return hugeX*copysignA(hugeX,x);  /*overflow*/
-    else return tiny*copysignA(tiny,x); /*underflow*/
-  }
-  k += 54;                              /* subnormal result */
-  __HI(x) = (hx&0x800fffff)|(k<<20);
-  return x*twom54;
-}
+#include "runtime/sharedRuntimeMath.hpp"
 
 /*
  * __kernel_rem_pio2(x,y,e0,nx,prec,ipio2)
@@ -603,7 +547,7 @@
 {
         double z,r,v;
         int ix;
-        ix = __HI(x)&0x7fffffff;        /* high word of x */
+        ix = high(x)&0x7fffffff;                /* high word of x */
         if(ix<0x3e400000)                       /* |x| < 2**-27 */
            {if((int)x==0) return x;}            /* generate inexact */
         z       =  x*x;
@@ -658,9 +602,9 @@
 
 static double __kernel_cos(double x, double y)
 {
-  double a,h,z,r,qx;
+  double a,h,z,r,qx=0;
   int ix;
-  ix = __HI(x)&0x7fffffff;      /* ix = |x|'s high word*/
+  ix = high(x)&0x7fffffff;              /* ix = |x|'s high word*/
   if(ix<0x3e400000) {                   /* if x < 2**27 */
     if(((int)x)==0) return one;         /* generate inexact */
   }
@@ -672,8 +616,8 @@
     if(ix > 0x3fe90000) {               /* x > 0.78125 */
       qx = 0.28125;
     } else {
-      __HI(qx) = ix-0x00200000; /* x/4 */
-      __LO(qx) = 0;
+      set_high(&qx, ix-0x00200000); /* x/4 */
+      set_low(&qx, 0);
     }
     h = 0.5*z-qx;
     a = one-qx;
@@ -738,11 +682,11 @@
 {
   double z,r,v,w,s;
   int ix,hx;
-  hx = __HI(x);   /* high word of x */
+  hx = high(x);           /* high word of x */
   ix = hx&0x7fffffff;     /* high word of |x| */
   if(ix<0x3e300000) {                     /* x < 2**-28 */
     if((int)x==0) {                       /* generate inexact */
-      if (((ix | __LO(x)) | (iy + 1)) == 0)
+      if (((ix | low(x)) | (iy + 1)) == 0)
         return one / fabsd(x);
       else {
         if (iy == 1)
@@ -751,10 +695,10 @@
           double a, t;
 
           z = w = x + y;
-          __LO(z) = 0;
+          set_low(&z, 0);
           v = y - (z - x);
           t = a = -one / w;
-          __LO(t) = 0;
+          set_low(&t, 0);
           s = one + t * z;
           return t + a * (s + t * v);
         }
@@ -789,10 +733,10 @@
     /*  compute -1.0/(x+r) accurately */
     double a,t;
     z  = w;
-    __LO(z) = 0;
+    set_low(&z, 0);
     v  = r-(z - x);     /* z+v = r+x */
     t = a  = -1.0/w;    /* a = -1.0/w */
-    __LO(t) = 0;
+    set_low(&t, 0);
     s  = 1.0+t*z;
     return t+a*(s+t*v);
   }
@@ -841,7 +785,7 @@
   int n, ix;
 
   /* High word of x. */
-  ix = __HI(x);
+  ix = high(x);
 
   /* |x| ~< pi/4 */
   ix &= 0x7fffffff;
@@ -899,7 +843,7 @@
   int n, ix;
 
   /* High word of x. */
-  ix = __HI(x);
+  ix = high(x);
 
   /* |x| ~< pi/4 */
   ix &= 0x7fffffff;
@@ -956,7 +900,7 @@
   int n, ix;
 
   /* High word of x. */
-  ix = __HI(x);
+  ix = high(x);
 
   /* |x| ~< pi/4 */
   ix &= 0x7fffffff;
--- a/src/share/vm/runtime/simpleThresholdPolicy.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -196,7 +196,6 @@
     // Don't trigger other compiles in testing mode
     return NULL;
   }
-  nmethod *osr_nm = NULL;
 
   handle_counter_overflow(method());
   if (method() != inlinee()) {
@@ -210,14 +209,16 @@
   if (bci == InvocationEntryBci) {
     method_invocation_event(method, inlinee, comp_level, nm, thread);
   } else {
+    // method == inlinee if the event originated in the main method
     method_back_branch_event(method, inlinee, bci, comp_level, nm, thread);
-    // method == inlinee if the event originated in the main method
-    int highest_level = inlinee->highest_osr_comp_level();
-    if (highest_level > comp_level) {
-      osr_nm = inlinee->lookup_osr_nmethod_for(bci, highest_level, false);
+    // Check if event led to a higher level OSR compilation
+    nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, comp_level, false);
+    if (osr_nm != NULL && osr_nm->comp_level() > comp_level) {
+      // Perform OSR with new nmethod
+      return osr_nm;
     }
   }
-  return osr_nm;
+  return NULL;
 }
 
 // Check if the method can be compiled, change level if necessary
@@ -239,7 +240,7 @@
   if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
     return;
   }
-  if (!CompileBroker::compilation_is_in_queue(mh, bci)) {
+  if (!CompileBroker::compilation_is_in_queue(mh)) {
     if (PrintTieredEvents) {
       print_event(COMPILE, mh, mh, bci, level);
     }
@@ -378,7 +379,7 @@
 // Handle the invocation event.
 void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
                                               CompLevel level, nmethod* nm, JavaThread* thread) {
-  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
+  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
     CompLevel next_level = call_event(mh(), level);
     if (next_level != level) {
       compile(mh, InvocationEntryBci, next_level, thread);
@@ -391,8 +392,8 @@
 void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
                                                      int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
   // If the method is already compiling, quickly bail out.
-  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
-    // Use loop event as an opportinity to also check there's been
+  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
+    // Use loop event as an opportunity to also check there's been
     // enough calls.
     CompLevel cur_level = comp_level(mh());
     CompLevel next_level = call_event(mh(), cur_level);
--- a/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -54,7 +54,10 @@
 // Simple methods are as good being compiled with C1 as C2.
 // Determine if a given method is such a case.
 bool SimpleThresholdPolicy::is_trivial(Method* method) {
-  if (method->is_accessor()) return true;
+  if (method->is_accessor() ||
+      method->is_constant_getter()) {
+    return true;
+  }
 #ifdef COMPILERGRAAL
   if (TieredCompilation && GraalCompileWithC1Only &&
       SystemDictionary::graal_loader() != NULL &&
@@ -62,12 +65,13 @@
     return true;
   }
 #endif
-  if (method->code() != NULL) {
-    MethodData* mdo = method->method_data();
-    if (mdo != NULL && mdo->num_loops() == 0 &&
-        (method->code_size() < 5  || (mdo->num_blocks() < 4) && (method->code_size() < 15))) {
-      return !mdo->would_profile();
-    }
+  if (method->has_loops() || method->code_size() >= 15) {
+    return false;
+  }
+  MethodData* mdo = method->method_data();
+  if (mdo != NULL && !mdo->would_profile() &&
+      (method->code_size() < 5  || (mdo->num_blocks() < 4))) {
+    return true;
   }
   return false;
 }
--- a/src/share/vm/runtime/stubRoutines.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/stubRoutines.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -125,9 +125,18 @@
 address StubRoutines::_cipherBlockChaining_encryptAESCrypt = NULL;
 address StubRoutines::_cipherBlockChaining_decryptAESCrypt = NULL;
 
+address StubRoutines::_sha1_implCompress     = NULL;
+address StubRoutines::_sha1_implCompressMB   = NULL;
+address StubRoutines::_sha256_implCompress   = NULL;
+address StubRoutines::_sha256_implCompressMB = NULL;
+address StubRoutines::_sha512_implCompress   = NULL;
+address StubRoutines::_sha512_implCompressMB = NULL;
+
 address StubRoutines::_updateBytesCRC32 = NULL;
 address StubRoutines::_crc_table_adr = NULL;
 
+address StubRoutines::_multiplyToLen = NULL;
+
 double (* StubRoutines::_intrinsic_log   )(double) = NULL;
 double (* StubRoutines::_intrinsic_log10 )(double) = NULL;
 double (* StubRoutines::_intrinsic_exp   )(double) = NULL;
--- a/src/share/vm/runtime/stubRoutines.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/stubRoutines.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -207,9 +207,18 @@
   static address _cipherBlockChaining_encryptAESCrypt;
   static address _cipherBlockChaining_decryptAESCrypt;
 
+  static address _sha1_implCompress;
+  static address _sha1_implCompressMB;
+  static address _sha256_implCompress;
+  static address _sha256_implCompressMB;
+  static address _sha512_implCompress;
+  static address _sha512_implCompressMB;
+
   static address _updateBytesCRC32;
   static address _crc_table_adr;
 
+  static address _multiplyToLen;
+
   // These are versions of the java.lang.Math methods which perform
   // the same operations as the intrinsic version.  They are used for
   // constant folding in the compiler to ensure equivalence.  If the
@@ -356,9 +365,18 @@
   static address cipherBlockChaining_encryptAESCrypt()  { return _cipherBlockChaining_encryptAESCrypt; }
   static address cipherBlockChaining_decryptAESCrypt()  { return _cipherBlockChaining_decryptAESCrypt; }
 
+  static address sha1_implCompress()     { return _sha1_implCompress; }
+  static address sha1_implCompressMB()   { return _sha1_implCompressMB; }
+  static address sha256_implCompress()   { return _sha256_implCompress; }
+  static address sha256_implCompressMB() { return _sha256_implCompressMB; }
+  static address sha512_implCompress()   { return _sha512_implCompress; }
+  static address sha512_implCompressMB() { return _sha512_implCompressMB; }
+
   static address updateBytesCRC32()    { return _updateBytesCRC32; }
   static address crc_table_addr()      { return _crc_table_adr; }
 
+  static address multiplyToLen()       {return _multiplyToLen; }
+
   static address select_fill_function(BasicType t, bool aligned, const char* &name);
 
   static address zero_aligned_words()   { return _zero_aligned_words; }
--- a/src/share/vm/runtime/sweeper.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/sweeper.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -33,8 +33,10 @@
 #include "runtime/atomic.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/sweeper.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vm_operations.hpp"
 #include "trace/tracing.hpp"
 #include "utilities/events.hpp"
--- a/src/share/vm/runtime/thread.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/thread.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -62,6 +62,7 @@
 #include "runtime/memprofiler.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -237,6 +238,8 @@
   // This initial value ==> never claimed.
   _oops_do_parity = 0;
 
+  _metadata_on_stack_buffer = NULL;
+
   // the handle mark links itself to last_handle_mark
   new HandleMark(this);
 
@@ -334,8 +337,7 @@
 #if INCLUDE_NMT
   // record thread's native stack, stack grows downward
   address stack_low_addr = stack_base() - stack_size();
-  MemTracker::record_thread_stack(stack_low_addr, stack_size(), this,
-      CURRENT_PC);
+  MemTracker::record_thread_stack(stack_low_addr, stack_size());
 #endif // INCLUDE_NMT
 }
 
@@ -353,7 +355,7 @@
 #if INCLUDE_NMT
   if (_stack_base != NULL) {
     address low_stack_addr = stack_base() - stack_size();
-    MemTracker::release_thread_stack(low_stack_addr, stack_size(), this);
+    MemTracker::release_thread_stack(low_stack_addr, stack_size());
 #ifdef ASSERT
     set_stack_base(NULL);
 #endif
@@ -837,7 +839,7 @@
   return false;
 }
 
-void Thread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+void Thread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   active_handles()->oops_do(f);
   // Do oop for ThreadShadow
   f->do_oop((oop*)&_pending_exception);
@@ -867,6 +869,7 @@
       st->print("os_prio=%d ", os_prio);
     }
     st->print("tid=" INTPTR_FORMAT " ", this);
+    ext().print_on(st);
     osthread()->print_on(st);
   }
   debug_only(if (WizardMode) print_owned_locks_on(st);)
@@ -946,7 +949,7 @@
               cur != VMOperationRequest_lock &&
               cur != VMOperationQueue_lock) ||
               cur->rank() == Mutex::special) {
-          warning("Thread holding lock at safepoint that vm can block on: %s", cur->name());
+          fatal(err_msg("Thread holding lock at safepoint that vm can block on: %s", cur->name()));
         }
       }
     }
@@ -1476,9 +1479,6 @@
   set_monitor_chunks(NULL);
   set_next(NULL);
   set_thread_state(_thread_new);
-#if INCLUDE_NMT
-  set_recorder(NULL);
-#endif
   _terminated = _not_terminated;
   _privileged_stack_top = NULL;
   _array_for_gc = NULL;
@@ -1541,6 +1541,7 @@
   _popframe_condition = popframe_inactive;
   _popframe_preserved_args = NULL;
   _popframe_preserved_args_size = 0;
+  _frames_to_pop_failed_realloc = 0;
 
   pd_initialize();
 }
@@ -1564,7 +1565,6 @@
     _jni_attach_state = _not_attaching_via_jni;
   }
   assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
-  _safepoint_visible = false;
 }
 
 bool JavaThread::reguard_stack(address cur_sp) {
@@ -1627,7 +1627,6 @@
   thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
                                                      os::java_thread;
   os::create_thread(this, thr_type, stack_sz);
-  _safepoint_visible = false;
   // The _osthread may be NULL here because we ran out of memory (too many threads active).
   // We need to throw and OutOfMemoryError - however we cannot do this here because the caller
   // may hold a lock and all locks must be unlocked before throwing the exception (throwing
@@ -1645,13 +1644,6 @@
       tty->print_cr("terminate thread %p", this);
   }
 
-  // By now, this thread should already be invisible to safepoint,
-  // and its per-thread recorder also collected.
-  assert(!is_safepoint_visible(), "wrong state");
-#if INCLUDE_NMT
-  assert(get_recorder() == NULL, "Already collected");
-#endif // INCLUDE_NMT
-
   // JSR166 -- return the parker to the free list
   Parker::Release(_parker);
   _parker = NULL ;
@@ -2780,7 +2772,7 @@
   }
 };
 
-void JavaThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   // Verify that the deferred card marks have been flushed.
   assert(deferred_card_mark().is_empty(), "Should be empty during GC");
 
@@ -3091,6 +3083,8 @@
   // Push the Java priority down to the native thread; needs Threads_lock
   Thread::set_priority(this, prio);
 
+  prepare_ext();
+
   // Add the new thread to the Threads list and set it in motion.
   // We must have threads lock in order to call Threads::add.
   // It is crucial that we do not block before the thread is
@@ -3316,7 +3310,7 @@
 }
 #endif
 
-void CompilerThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+void CompilerThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   JavaThread::oops_do(f, cld_f, cf);
   if (_scanned_nmethod != NULL && cf != NULL) {
     // Safepoints can occur when the sweeper is scanning an nmethod so
@@ -3433,11 +3427,6 @@
   // intialize TLS
   ThreadLocalStorage::init();
 
-  // Bootstrap native memory tracking, so it can start recording memory
-  // activities before worker thread is started. This is the first phase
-  // of bootstrapping, VM is currently running in single-thread mode.
-  MemTracker::bootstrap_single_thread();
-
   // Initialize output stream logging
   ostream_init_log();
 
@@ -3497,9 +3486,6 @@
   // Initialize Java-Level synchronization subsystem
   ObjectMonitor::Initialize() ;
 
-  // Second phase of bootstrapping, VM is about entering multi-thread mode
-  MemTracker::bootstrap_multi_thread();
-
   // Initialize global modules
   jint status = init_globals();
   if (status != JNI_OK) {
@@ -3521,9 +3507,6 @@
   // real raw monitor. VM is setup enough here for raw monitor enter.
   JvmtiExport::transition_pending_onload_raw_monitors();
 
-  // Fully start NMT
-  MemTracker::start();
-
   // Create the VMThread
   { TraceTime timer("Start VMThread", TraceStartupTime);
     VMThread::create();
@@ -3979,6 +3962,24 @@
   }
 }
 
+JavaThread* Threads::find_java_thread_from_java_tid(jlong java_tid) {
+  assert(Threads_lock->owned_by_self(), "Must hold Threads_lock");
+
+  JavaThread* java_thread = NULL;
+  // Sequential search for now.  Need to do better optimization later.
+  for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
+    oop tobj = thread->threadObj();
+    if (!thread->is_exiting() &&
+        tobj != NULL &&
+        java_tid == java_lang_Thread::thread_id(tobj)) {
+      java_thread = thread;
+      break;
+    }
+  }
+  return java_thread;
+}
+
+
 // Last thread running calls java.lang.Shutdown.shutdown()
 void JavaThread::invoke_shutdown_hooks() {
   HandleMark hm(this);
@@ -4175,8 +4176,6 @@
     daemon = false;
   }
 
-  p->set_safepoint_visible(true);
-
   ThreadService::add_thread(p, daemon);
 
   // Possible GC point.
@@ -4222,13 +4221,6 @@
     // to do callbacks into the safepoint code. However, the safepoint code is not aware
     // of this thread since it is removed from the queue.
     p->set_terminated_value();
-
-    // Now, this thread is not visible to safepoint
-    p->set_safepoint_visible(false);
-    // once the thread becomes safepoint invisible, we can not use its per-thread
-    // recorder. And Threads::do_threads() no longer walks this thread, so we have
-    // to release its per-thread recorder here.
-    MemTracker::thread_exiting(p);
   } // unlock Threads_lock
 
   // Since Events::log uses a lock, we grab it outside the Threads_lock
@@ -4253,22 +4245,22 @@
 // uses the Threads_lock to gurantee this property. It also makes sure that
 // all threads gets blocked when exiting or starting).
 
-void Threads::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+void Threads::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   ALL_JAVA_THREADS(p) {
     p->oops_do(f, cld_f, cf);
   }
   VMThread::vm_thread()->oops_do(f, cld_f, cf);
 }
 
-void Threads::possibly_parallel_oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   // Introduce a mechanism allowing parallel threads to claim threads as
   // root groups.  Overhead should be small enough to use all the time,
   // even in sequential code.
   SharedHeap* sh = SharedHeap::heap();
   // Cannot yet substitute active_workers for n_par_threads
   // because of G1CollectedHeap::verify() use of
-  // SharedHeap::process_strong_roots().  n_par_threads == 0 will
-  // turn off parallelism in process_strong_roots while active_workers
+  // SharedHeap::process_roots().  n_par_threads == 0 will
+  // turn off parallelism in process_roots while active_workers
   // is being used for parallelism elsewhere.
   bool is_par = sh->n_par_threads() > 0;
   assert(!is_par ||
--- a/src/share/vm/runtime/thread.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/thread.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -40,16 +40,12 @@
 #include "runtime/safepoint.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/threadLocalStorage.hpp"
+#include "runtime/thread_ext.hpp"
 #include "runtime/unhandledOops.hpp"
-#include "utilities/macros.hpp"
-
-#if INCLUDE_NMT
-#include "services/memRecorder.hpp"
-#endif // INCLUDE_NMT
-
 #include "trace/traceBackend.hpp"
 #include "trace/traceMacros.hpp"
 #include "utilities/exceptions.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/top.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/dirtyCardQueue.hpp"
@@ -86,6 +82,10 @@
 class ThreadClosure;
 class IdealGraphPrinter;
 
+class Metadata;
+template <class T, MEMFLAGS F> class ChunkedList;
+typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
+
 DEBUG_ONLY(class ResourceMark;)
 
 class WorkerThread;
@@ -259,8 +259,13 @@
   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
                                                 // the Java heap
 
+  // Thread-local buffer used by MetadataOnStackMark.
+  MetadataOnStackBuffer* _metadata_on_stack_buffer;
+
   TRACE_DATA _trace_data;                       // Thread-local data for tracing
 
+  ThreadExt _ext;
+
   int   _vm_operation_started_count;            // VM_Operation support
   int   _vm_operation_completed_count;          // VM_Operation support
 
@@ -439,21 +444,13 @@
   jlong allocated_bytes()               { return _allocated_bytes; }
   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
-  jlong cooked_allocated_bytes() {
-    jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
-    if (UseTLAB) {
-      size_t used_bytes = tlab().used_bytes();
-      if ((ssize_t)used_bytes > 0) {
-        // More-or-less valid tlab.  The load_acquire above should ensure
-        // that the result of the add is <= the instantaneous value
-        return allocated_bytes + used_bytes;
-      }
-    }
-    return allocated_bytes;
-  }
+  inline jlong cooked_allocated_bytes();
 
   TRACE_DATA* trace_data()              { return &_trace_data; }
 
+  const ThreadExt& ext() const          { return _ext; }
+  ThreadExt& ext()                      { return _ext; }
+
   // VM operation support
   int vm_operation_ticket()                      { return ++_vm_operation_started_count; }
   int vm_operation_completed_count()             { return _vm_operation_completed_count; }
@@ -486,13 +483,13 @@
   // Apply "cld_f->do_cld" to CLDs that are otherwise not kept alive.
   //   Used by JavaThread::oops_do.
   // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
-  virtual void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
+  virtual void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
 
   // Handles the parallel case for the method below.
 private:
   bool claim_oops_do_par_case(int collection_parity);
 public:
-  // Requires that "collection_parity" is that of the current strong roots
+  // Requires that "collection_parity" is that of the current roots
   // iteration.  If "is_par" is false, sets the parity of "this" to
   // "collection_parity", and returns "true".  If "is_par" is true,
   // uses an atomic instruction to set the current threads parity to
@@ -529,7 +526,10 @@
   // creation fails due to lack of memory, too many threads etc.
   bool set_as_starting_thread();
 
- protected:
+  void set_metadata_on_stack_buffer(MetadataOnStackBuffer* buffer) { _metadata_on_stack_buffer = buffer; }
+  MetadataOnStackBuffer* metadata_on_stack_buffer() const          { return _metadata_on_stack_buffer; }
+
+protected:
   // OS data associated with the thread
   OSThread* _osthread;  // Platform-specific thread information
 
@@ -955,6 +955,12 @@
   // This is set to popframe_pending to signal that top Java frame should be popped immediately
   int _popframe_condition;
 
+  // If reallocation of scalar replaced objects fails, we throw OOM
+  // and during exception propagation, pop the top
+  // _frames_to_pop_failed_realloc frames, the ones that reference
+  // failed reallocations.
+  int _frames_to_pop_failed_realloc;
+
 #ifndef PRODUCT
   int _jmp_ring_index;
   struct {
@@ -1049,6 +1055,7 @@
   // not specified, use the priority of the thread object. Threads_lock
   // must be held while this function is called.
   void prepare(jobject jni_thread, ThreadPriority prio=NoPriority);
+  void prepare_ext();
 
   void set_saved_exception_pc(address pc)        { _saved_exception_pc = pc; }
   address saved_exception_pc()                   { return _saved_exception_pc; }
@@ -1078,12 +1085,8 @@
 #else
   // Use membars when accessing volatile _thread_state. See
   // Threads::create_vm() for size checks.
-  JavaThreadState thread_state() const           {
-    return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state);
-  }
-  void set_thread_state(JavaThreadState s)       {
-    OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s);
-  }
+  inline JavaThreadState thread_state() const;
+  inline void set_thread_state(JavaThreadState s);
 #endif
   ThreadSafepointState *safepoint_state() const  { return _safepoint_state; }
   void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; }
@@ -1106,16 +1109,6 @@
   bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
   void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
 
-#if INCLUDE_NMT
-  // native memory tracking
-  inline MemRecorder* get_recorder() const          { return (MemRecorder*)_recorder; }
-  inline void         set_recorder(MemRecorder* rc) { _recorder = rc; }
-
- private:
-  // per-thread memory recorder
-  MemRecorder* volatile _recorder;
-#endif // INCLUDE_NMT
-
   // Suspend/resume support for JavaThread
  private:
   void set_ext_suspended()       { set_suspend_flag (_ext_suspended);  }
@@ -1486,7 +1479,7 @@
   void frames_do(void f(frame*, const RegisterMap*));
 
   // Memory operations
-  void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
+  void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
 
   // Sweeper operations
   void nmethods_do(CodeBlobClosure* cf);
@@ -1568,19 +1561,6 @@
      return result;
    }
 
- // NMT (Native memory tracking) support.
- // This flag helps NMT to determine if this JavaThread will be blocked
- // at safepoint. If not, ThreadCritical is needed for writing memory records.
- // JavaThread is only safepoint visible when it is in Threads' thread list,
- // it is not visible until it is added to the list and becomes invisible
- // once it is removed from the list.
- public:
-  bool is_safepoint_visible() const { return _safepoint_visible; }
-  void set_safepoint_visible(bool visible) { _safepoint_visible = visible; }
- private:
-  bool _safepoint_visible;
-
-  // Static operations
  public:
   // Returns the running thread as a JavaThread
   static inline JavaThread* current();
@@ -1653,6 +1633,10 @@
   void clr_pop_frame_in_process(void)                 { _popframe_condition &= ~popframe_processing_bit; }
 #endif
 
+  int frames_to_pop_failed_realloc() const            { return _frames_to_pop_failed_realloc; }
+  void set_frames_to_pop_failed_realloc(int nb)       { _frames_to_pop_failed_realloc = nb; }
+  void dec_frames_to_pop_failed_realloc()             { _frames_to_pop_failed_realloc--; }
+
  private:
   // Saved incoming arguments to popped frame.
   // Used only when popped interpreted frame returns to deoptimized frame.
@@ -1817,7 +1801,7 @@
   // clearing/querying jni attach status
   bool is_attaching_via_jni() const { return _jni_attach_state == _attaching_via_jni; }
   bool has_attached_via_jni() const { return is_attaching_via_jni() || _jni_attach_state == _attached_via_jni; }
-  void set_done_attaching_via_jni() { _jni_attach_state = _attached_via_jni; OrderAccess::fence(); }
+  inline void set_done_attaching_via_jni();
 private:
   // This field is used to determine if a thread has claimed
   // a par_id: it is UINT_MAX if the thread has not claimed a par_id;
@@ -1924,7 +1908,7 @@
   // GC support
   // Apply "f->do_oop" to all root oops in "this".
   // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
-  void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
+  void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
 
 #ifndef PRODUCT
 private:
@@ -1991,9 +1975,9 @@
 
   // Apply "f->do_oop" to all root oops in all threads.
   // This version may only be called by sequential code.
-  static void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
+  static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
   // This version may be called by sequential or parallel code.
-  static void possibly_parallel_oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
+  static void possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
   // This creates a list of GCTasks, one per thread.
   static void create_thread_roots_tasks(GCTaskQueue* q);
   // This creates a list of GCTasks, one per thread, for marking objects.
@@ -2047,6 +2031,8 @@
   // Deoptimizes all frames tied to marked nmethods
   static void deoptimized_wrt_marked_nmethods();
 
+  static JavaThread* find_java_thread_from_java_tid(jlong java_tid);
+
 };
 
 
--- a/src/share/vm/runtime/thread.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/thread.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -46,4 +46,32 @@
 
 #undef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE
 
+inline jlong Thread::cooked_allocated_bytes() {
+  jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
+  if (UseTLAB) {
+    size_t used_bytes = tlab().used_bytes();
+    if ((ssize_t)used_bytes > 0) {
+      // More-or-less valid tlab. The load_acquire above should ensure
+      // that the result of the add is <= the instantaneous value.
+      return allocated_bytes + used_bytes;
+    }
+  }
+  return allocated_bytes;
+}
+
+#ifdef PPC64
+inline JavaThreadState JavaThread::thread_state() const    {
+  return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state);
+}
+
+inline void JavaThread::set_thread_state(JavaThreadState s) {
+  OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s);
+}
+#endif
+
+inline void JavaThread::set_done_attaching_via_jni() {
+  _jni_attach_state = _attached_via_jni;
+  OrderAccess::fence();
+}
+
 #endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/thread_ext.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/thread_ext.hpp"
+
+void JavaThread::prepare_ext() {
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/runtime/thread_ext.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_THREAD_EXT_HPP
+#define SHARE_VM_RUNTIME_THREAD_EXT_HPP
+
+#include "memory/allocation.hpp"
+
+class ThreadExt VALUE_OBJ_CLASS_SPEC {
+public:
+  void print_on(outputStream* st) const {};
+};
+
+#endif // SHARE_VM_RUNTIME_THREAD_EXT_HPP
--- a/src/share/vm/runtime/vframe.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/vframe.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -260,65 +260,156 @@
   return fr().interpreter_frame_method();
 }
 
-StackValueCollection* interpretedVFrame::locals() const {
-  int length = method()->max_locals();
+static StackValue* create_stack_value_from_oop_map(const InterpreterOopMap& oop_mask,
+                                                   int index,
+                                                   const intptr_t* const addr) {
+
+  assert(index >= 0 &&
+         index < oop_mask.number_of_entries(), "invariant");
 
-  if (method()->is_native()) {
-    // If the method is native, max_locals is not telling the truth.
-    // maxlocals then equals the size of parameters
-    length = method()->size_of_parameters();
+  // categorize using oop_mask
+  if (oop_mask.is_oop(index)) {
+    // reference (oop) "r"
+    Handle h(addr != NULL ? (*(oop*)addr) : (oop)NULL);
+    return new StackValue(h);
+  }
+  // value (integer) "v"
+  return new StackValue(addr != NULL ? *addr : 0);
+}
+
+static bool is_in_expression_stack(const frame& fr, const intptr_t* const addr) {
+  assert(addr != NULL, "invariant");
+
+  // Ensure to be 'inside' the expresion stack (i.e., addr >= sp for Intel).
+  // In case of exceptions, the expression stack is invalid and the sp
+  // will be reset to express this condition.
+  if (frame::interpreter_frame_expression_stack_direction() > 0) {
+    return addr <= fr.interpreter_frame_tos_address();
   }
 
-  StackValueCollection* result = new StackValueCollection(length);
+  return addr >= fr.interpreter_frame_tos_address();
+}
+
+static void stack_locals(StackValueCollection* result,
+                         int length,
+                         const InterpreterOopMap& oop_mask,
+                         const frame& fr) {
+
+  assert(result != NULL, "invariant");
+
+  for (int i = 0; i < length; ++i) {
+    const intptr_t* const addr = fr.interpreter_frame_local_at(i);
+    assert(addr != NULL, "invariant");
+    assert(addr >= fr.sp(), "must be inside the frame");
+
+    StackValue* const sv = create_stack_value_from_oop_map(oop_mask, i, addr);
+    assert(sv != NULL, "sanity check");
+
+    result->add(sv);
+  }
+}
+
+static void stack_expressions(StackValueCollection* result,
+                              int length,
+                              int max_locals,
+                              const InterpreterOopMap& oop_mask,
+                              const frame& fr) {
+
+  assert(result != NULL, "invariant");
 
-  // Get oopmap describing oops and int for current bci
+  for (int i = 0; i < length; ++i) {
+    const intptr_t* addr = fr.interpreter_frame_expression_stack_at(i);
+    assert(addr != NULL, "invariant");
+    if (!is_in_expression_stack(fr, addr)) {
+      // Need to ensure no bogus escapes.
+      addr = NULL;
+    }
+
+    StackValue* const sv = create_stack_value_from_oop_map(oop_mask,
+                                                           i + max_locals,
+                                                           addr);
+    assert(sv != NULL, "sanity check");
+
+    result->add(sv);
+  }
+}
+
+StackValueCollection* interpretedVFrame::locals() const {
+  return stack_data(false);
+}
+
+StackValueCollection* interpretedVFrame::expressions() const {
+  return stack_data(true);
+}
+
+/*
+ * Worker routine for fetching references and/or values
+ * for a particular bci in the interpretedVFrame.
+ *
+ * Returns data for either "locals" or "expressions",
+ * using bci relative oop_map (oop_mask) information.
+ *
+ * @param expressions  bool switch controlling what data to return
+                       (false == locals / true == expressions)
+ *
+ */
+StackValueCollection* interpretedVFrame::stack_data(bool expressions) const {
+
   InterpreterOopMap oop_mask;
+  // oopmap for current bci
   if ((TraceDeoptimization && Verbose) GRAAL_ONLY( || PrintDeoptimizationDetails)) {
     methodHandle m_h(Thread::current(), method());
     OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
   } else {
     method()->mask_for(bci(), &oop_mask);
   }
-  // handle locals
-  for(int i=0; i < length; i++) {
-    // Find stack location
-    intptr_t *addr = locals_addr_at(i);
+
+  const int mask_len = oop_mask.number_of_entries();
+
+  // If the method is native, method()->max_locals() is not telling the truth.
+  // For our purposes, max locals instead equals the size of parameters.
+  const int max_locals = method()->is_native() ?
+    method()->size_of_parameters() : method()->max_locals();
+
+  assert(mask_len >= max_locals, "invariant");
+
+  const int length = expressions ? mask_len - max_locals : max_locals;
+  assert(length >= 0, "invariant");
 
-    // Depending on oop/int put it in the right package
-    StackValue *sv;
-    if (oop_mask.is_oop(i)) {
-      // oop value
-      Handle h(*(oop *)addr);
-      sv = new StackValue(h);
-    } else {
-      // integer
-      sv = new StackValue(*addr);
-    }
-    assert(sv != NULL, "sanity check");
-    result->add(sv);
+  StackValueCollection* const result = new StackValueCollection(length);
+
+  if (0 == length) {
+    return result;
   }
+
+  if (expressions) {
+    stack_expressions(result, length, max_locals, oop_mask, fr());
+  } else {
+    stack_locals(result, length, oop_mask, fr());
+  }
+
+  assert(length == result->size(), "invariant");
+
   return result;
 }
 
 void interpretedVFrame::set_locals(StackValueCollection* values) const {
   if (values == NULL || values->size() == 0) return;
 
-  int length = method()->max_locals();
-  if (method()->is_native()) {
-    // If the method is native, max_locals is not telling the truth.
-    // maxlocals then equals the size of parameters
-    length = method()->size_of_parameters();
-  }
+  // If the method is native, max_locals is not telling the truth.
+  // maxlocals then equals the size of parameters
+  const int max_locals = method()->is_native() ?
+    method()->size_of_parameters() : method()->max_locals();
 
-  assert(length == values->size(), "Mismatch between actual stack format and supplied data");
+  assert(max_locals == values->size(), "Mismatch between actual stack format and supplied data");
 
   // handle locals
-  for (int i = 0; i < length; i++) {
+  for (int i = 0; i < max_locals; i++) {
     // Find stack location
     intptr_t *addr = locals_addr_at(i);
 
     // Depending on oop/int put it in the right package
-    StackValue *sv = values->at(i);
+    const StackValue* const sv = values->at(i);
     assert(sv != NULL, "sanity check");
     if (sv->type() == T_OBJECT) {
       *(oop *) addr = (sv->get_obj())();
@@ -328,46 +419,6 @@
   }
 }
 
-StackValueCollection*  interpretedVFrame::expressions() const {
-  int length = fr().interpreter_frame_expression_stack_size();
-  if (method()->is_native()) {
-    // If the method is native, there is no expression stack
-    length = 0;
-  }
-
-  int nof_locals = method()->max_locals();
-  StackValueCollection* result = new StackValueCollection(length);
-
-  InterpreterOopMap oop_mask;
-  // Get oopmap describing oops and int for current bci
-  if ((TraceDeoptimization && Verbose) GRAAL_ONLY( || PrintDeoptimizationDetails)) {
-    methodHandle m_h(method());
-    OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
-  } else {
-    method()->mask_for(bci(), &oop_mask);
-  }
-  // handle expressions
-  for(int i=0; i < length; i++) {
-    // Find stack location
-    intptr_t *addr = fr().interpreter_frame_expression_stack_at(i);
-
-    // Depending on oop/int put it in the right package
-    StackValue *sv;
-    if (oop_mask.is_oop(i + nof_locals)) {
-      // oop value
-      Handle h(*(oop *)addr);
-      sv = new StackValue(h);
-    } else {
-      // integer
-      sv = new StackValue(*addr);
-    }
-    assert(sv != NULL, "sanity check");
-    result->add(sv);
-  }
-  return result;
-}
-
-
 // ------------- cChunk --------------
 
 entryVFrame::entryVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread)
--- a/src/share/vm/runtime/vframe.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/vframe.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -186,7 +186,7 @@
  private:
   static const int bcp_offset;
   intptr_t* locals_addr_at(int offset) const;
-
+  StackValueCollection* stack_data(bool expressions) const;
   // returns where the parameters starts relative to the frame pointer
   int start_of_parameters() const;
 
--- a/src/share/vm/runtime/vframeArray.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/vframeArray.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -56,7 +56,7 @@
   }
 }
 
-void vframeArrayElement::fill_in(compiledVFrame* vf) {
+void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) {
 
 // Copy the information from the compiled vframe to the
 // interpreter frame we will be creating to replace vf
@@ -64,6 +64,9 @@
   _method = vf->method();
   _bci    = vf->raw_bci();
   _reexecute = vf->should_reexecute();
+#ifdef ASSERT
+  _removed_monitors = false;
+#endif
 
   int index;
 
@@ -81,11 +84,15 @@
     // Migrate the BasicLocks from the stack to the monitor chunk
     for (index = 0; index < list->length(); index++) {
       MonitorInfo* monitor = list->at(index);
-      assert(!monitor->owner_is_scalar_replaced(), "object should be reallocated already");
-      assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
+      assert(!monitor->owner_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
       BasicObjectLock* dest = _monitors->at(index);
-      dest->set_obj(monitor->owner());
-      monitor->lock()->move_to(monitor->owner(), dest->lock());
+      if (monitor->owner_is_scalar_replaced()) {
+        dest->set_obj(NULL);
+      } else {
+        assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
+        dest->set_obj(monitor->owner());
+        monitor->lock()->move_to(monitor->owner(), dest->lock());
+      }
     }
   }
 
@@ -110,7 +117,7 @@
     StackValue* value = locs->at(index);
     switch(value->type()) {
       case T_OBJECT:
-        assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
+        assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
         // preserve object type
         _locals->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
         break;
@@ -135,7 +142,7 @@
     StackValue* value = exprs->at(index);
     switch(value->type()) {
       case T_OBJECT:
-        assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
+        assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already");
         // preserve object type
         _expressions->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
         break;
@@ -286,7 +293,7 @@
 
   _frame.patch_pc(thread, pc);
 
-  assert (!method()->is_synchronized() || locks > 0 || raw_bci() == SynchronizationEntryBCI, "synchronized methods must have monitors");
+  assert (!method()->is_synchronized() || locks > 0 || _removed_monitors || raw_bci() == SynchronizationEntryBCI, "synchronized methods must have monitors");
 
   BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin();
   for (int index = 0; index < locks; index++) {
@@ -474,7 +481,8 @@
 
 
 vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
-                                   RegisterMap *reg_map, frame sender, frame caller, frame self) {
+                                   RegisterMap *reg_map, frame sender, frame caller, frame self,
+                                   bool realloc_failures) {
 
   // Allocate the vframeArray
   vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part
@@ -486,19 +494,20 @@
   result->_caller = caller;
   result->_original = self;
   result->set_unroll_block(NULL); // initialize it
-  result->fill_in(thread, frame_size, chunk, reg_map);
+  result->fill_in(thread, frame_size, chunk, reg_map, realloc_failures);
   return result;
 }
 
 void vframeArray::fill_in(JavaThread* thread,
                           int frame_size,
                           GrowableArray<compiledVFrame*>* chunk,
-                          const RegisterMap *reg_map) {
+                          const RegisterMap *reg_map,
+                          bool realloc_failures) {
   // Set owner first, it is used when adding monitor chunks
 
   _frame_size = frame_size;
   for(int i = 0; i < chunk->length(); i++) {
-    element(i)->fill_in(chunk->at(i));
+    element(i)->fill_in(chunk->at(i), realloc_failures);
   }
 
   // Copy registers for callee-saved registers
--- a/src/share/vm/runtime/vframeArray.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/vframeArray.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -58,6 +58,9 @@
     MonitorChunk* _monitors;                                     // active monitors for this vframe
     StackValueCollection* _locals;
     StackValueCollection* _expressions;
+#ifdef ASSERT
+    bool _removed_monitors;
+#endif
 
   public:
 
@@ -78,7 +81,7 @@
 
   StackValueCollection* expressions(void) const        { return _expressions; }
 
-  void fill_in(compiledVFrame* vf);
+  void fill_in(compiledVFrame* vf, bool realloc_failures);
 
   // Formerly part of deoptimizedVFrame
 
@@ -99,6 +102,12 @@
                        bool is_bottom_frame,
                        int exec_mode);
 
+#ifdef ASSERT
+  void set_removed_monitors() {
+    _removed_monitors = true;
+  }
+#endif
+
 #ifndef PRODUCT
   void print(outputStream* st);
 #endif /* PRODUCT */
@@ -160,13 +169,14 @@
   int frames() const                            { return _frames;   }
 
   static vframeArray* allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
-                               RegisterMap* reg_map, frame sender, frame caller, frame self);
+                               RegisterMap* reg_map, frame sender, frame caller, frame self,
+                               bool realloc_failures);
 
 
   vframeArrayElement* element(int index)        { assert(is_within_bounds(index), "Bad index"); return &_elements[index]; }
 
   // Allocates a new vframe in the array and fills the array with vframe information in chunk
-  void fill_in(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk, const RegisterMap *reg_map);
+  void fill_in(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk, const RegisterMap *reg_map, bool realloc_failures);
 
   // Returns the owner of this vframeArray
   JavaThread* owner_thread() const           { return _owner_thread; }
--- a/src/share/vm/runtime/vmStructs.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/vmStructs.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -327,7 +327,7 @@
   nonstatic_field(InstanceKlass,               _jni_ids,                                      JNIid*)                                \
   nonstatic_field(InstanceKlass,               _osr_nmethods_head,                            nmethod*)                              \
   nonstatic_field(InstanceKlass,               _breakpoints,                                  BreakpointInfo*)                       \
-  nonstatic_field(InstanceKlass,               _generic_signature_index,                           u2)                               \
+  nonstatic_field(InstanceKlass,               _generic_signature_index,                      u2)                                    \
   nonstatic_field(InstanceKlass,               _methods_jmethod_ids,                          jmethodID*)                            \
   volatile_nonstatic_field(InstanceKlass,      _idnum_allocated_count,                        u2)                                    \
   nonstatic_field(InstanceKlass,               _annotations,                                  Annotations*)                          \
@@ -675,11 +675,13 @@
       static_field(SystemDictionary,            WK_KLASS(StackOverflowError_klass),            Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(ProtectionDomain_klass),              Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(AccessControlContext_klass),          Klass*)                               \
+      static_field(SystemDictionary,            WK_KLASS(SecureClassLoader_klass),             Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(Reference_klass),                     Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(SoftReference_klass),                 Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(WeakReference_klass),                 Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(FinalReference_klass),                Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(PhantomReference_klass),              Klass*)                               \
+      static_field(SystemDictionary,            WK_KLASS(Cleaner_klass),                       Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(Finalizer_klass),                     Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(Thread_klass),                        Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(ThreadGroup_klass),                   Klass*)                               \
@@ -823,6 +825,7 @@
      static_field(StubRoutines,                _cipherBlockChaining_decryptAESCrypt,          address)                               \
      static_field(StubRoutines,                _updateBytesCRC32,                             address)                               \
      static_field(StubRoutines,                _crc_table_adr,                                address)                               \
+     static_field(StubRoutines,                _multiplyToLen,                                address)                               \
      static_field(StubRoutines,                _jbyte_arraycopy,                              address)                               \
      static_field(StubRoutines,                _jshort_arraycopy,                             address)                               \
      static_field(StubRoutines,                _jint_arraycopy,                               address)                               \
@@ -2598,6 +2601,7 @@
   declare_constant(Deoptimization::Reason_age)                            \
   declare_constant(Deoptimization::Reason_predicate)                      \
   declare_constant(Deoptimization::Reason_loop_limit_check)               \
+  declare_constant(Deoptimization::Reason_unstable_if)                    \
   declare_constant(Deoptimization::Reason_LIMIT)                          \
   declare_constant(Deoptimization::Reason_RECORDED_LIMIT)                 \
                                                                           \
--- a/src/share/vm/runtime/vmThread.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/vmThread.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -682,7 +682,7 @@
 }
 
 
-void VMThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+void VMThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   Thread::oops_do(f, cld_f, cf);
   _vm_queue->oops_do(f);
 }
--- a/src/share/vm/runtime/vmThread.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/vmThread.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -126,7 +126,7 @@
   static VMThread* vm_thread()                    { return _vm_thread; }
 
   // GC support
-  void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
+  void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
 
   // Debugging
   void print_on(outputStream* st) const;
--- a/src/share/vm/runtime/vm_operations.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/runtime/vm_operations.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -68,6 +68,7 @@
   template(G1CollectFull)                         \
   template(G1CollectForAllocation)                \
   template(G1IncCollectionPause)                  \
+  template(DestroyAllocationContext)              \
   template(EnableBiasedLocking)                   \
   template(RevokeBias)                            \
   template(BulkRevokeBias)                        \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/allocationContextService.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP
+#define SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP
+
+#include "utilities/exceptions.hpp"
+
+class AllocationContextService: public AllStatic {
+public:
+  static inline bool should_notify();
+  static inline void notify(TRAPS);
+};
+
+bool AllocationContextService::should_notify() { return false; }
+void AllocationContextService::notify(TRAPS) { }
+
+#endif // SHARE_VM_SERVICES_ALLOCATION_CONTEXT_SERVICE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/allocationSite.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
+#define SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/nativeCallStack.hpp"
+
+// Allocation site represents a code path that makes a memory
+// allocation
+template <class E> class AllocationSite VALUE_OBJ_CLASS_SPEC {
+ private:
+  NativeCallStack  _call_stack;
+  E                e;
+ public:
+  AllocationSite(const NativeCallStack& stack) : _call_stack(stack) { }
+  int hash() const { return _call_stack.hash(); }
+  bool equals(const NativeCallStack& stack) const {
+    return _call_stack.equals(stack);
+  }
+
+  bool equals(const AllocationSite<E>& other) const {
+    return other.equals(_call_stack);
+  }
+
+  const NativeCallStack* call_stack() const {
+    return &_call_stack;
+  }
+
+  // Information regarding this allocation
+  E* data()             { return &e; }
+  const E* peek() const { return &e; }
+};
+
+#endif  // SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
--- a/src/share/vm/services/attachListener.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/attachListener.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -162,10 +162,7 @@
     java_lang_Throwable::print(PENDING_EXCEPTION, out);
     out->cr();
     CLEAR_PENDING_EXCEPTION;
-    // The exception has been printed on the output stream
-    // If the JVM returns JNI_ERR, the attachAPI throws a generic I/O
-    // exception and the content of the output stream is not processed.
-    // By returning JNI_OK, the exception will be displayed on the client side
+    return JNI_ERR;
   }
   return JNI_OK;
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/mallocSiteTable.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+
+
+#include "memory/allocation.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "services/mallocSiteTable.hpp"
+
+/*
+ * Early os::malloc() calls come from initializations of static variables, long before entering any
+ * VM code. Upon the arrival of the first os::malloc() call, malloc site hashtable has to be
+ * initialized, along with the allocation site for the hashtable entries.
+ * To ensure that malloc site hashtable can be initialized without triggering any additional os::malloc()
+ * call, the hashtable bucket array and hashtable entry allocation site have to be static.
+ * It is not a problem for hashtable bucket, since it is an array of pointer type, C runtime just
+ * allocates a block memory and zero the memory for it.
+ * But for hashtable entry allocation site object, things get tricky. C runtime not only allocates
+ * memory for it, but also calls its constructor at some later time. If we initialize the allocation site
+ * at the first os::malloc() call, the object will be reinitialized when its constructor is called
+ * by C runtime.
+ * To workaround above issue, we declare a static size_t array with the size of the CallsiteHashtableEntry,
+ * the memory is used to instantiate CallsiteHashtableEntry for the hashtable entry allocation site.
+ * Given it is a primitive type array, C runtime will do nothing other than assign the memory block for the variable,
+ * which is exactly what we want.
+ * The same trick is also applied to create NativeCallStack object for CallsiteHashtableEntry memory allocation.
+ *
+ * Note: C++ object usually aligns to particular alignment, depends on compiler implementation, we declare
+ * the memory as size_t arrays, to ensure the memory is aligned to native machine word alignment.
+ */
+
+// Reserve enough memory for NativeCallStack and MallocSiteHashtableEntry objects
+size_t MallocSiteTable::_hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
+size_t MallocSiteTable::_hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
+
+// Malloc site hashtable buckets
+MallocSiteHashtableEntry*  MallocSiteTable::_table[MallocSiteTable::table_size];
+
+// concurrent access counter
+volatile int MallocSiteTable::_access_count = 0;
+
+// Tracking hashtable contention
+NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;)
+
+
+/*
+ * Initialize malloc site table.
+ * Hashtable entry is malloc'd, so it can cause infinite recursion.
+ * To avoid above problem, we pre-initialize a hash entry for
+ * this allocation site.
+ * The method is called during C runtime static variable initialization
+ * time, it is in single-threaded mode from JVM perspective.
+ */
+bool MallocSiteTable::initialize() {
+  assert(sizeof(_hash_entry_allocation_stack) >= sizeof(NativeCallStack), "Sanity Check");
+  assert(sizeof(_hash_entry_allocation_site) >= sizeof(MallocSiteHashtableEntry),
+    "Sanity Check");
+  assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow");
+
+  // Fake the call stack for hashtable entry allocation
+  assert(NMT_TrackingStackDepth > 1, "At least one tracking stack");
+
+  // Create pseudo call stack for hashtable entry allocation
+  address pc[3];
+  if (NMT_TrackingStackDepth >= 3) {
+    pc[2] = (address)MallocSiteTable::allocation_at;
+  }
+  if (NMT_TrackingStackDepth >= 2) {
+    pc[1] = (address)MallocSiteTable::lookup_or_add;
+  }
+  pc[0] = (address)MallocSiteTable::new_entry;
+
+  // Instantiate NativeCallStack object, have to use placement new operator. (see comments above)
+  NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack)
+    NativeCallStack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth)));
+
+  // Instantiate hash entry for hashtable entry allocation callsite
+  MallocSiteHashtableEntry* entry = ::new ((void*)_hash_entry_allocation_site)
+    MallocSiteHashtableEntry(*stack);
+
+  // Add the allocation site to hashtable.
+  int index = hash_to_index(stack->hash());
+  _table[index] = entry;
+
+  return true;
+}
+
+// Walks entries in the hashtable.
+// It stops walk if the walker returns false.
+bool MallocSiteTable::walk(MallocSiteWalker* walker) {
+  MallocSiteHashtableEntry* head;
+  for (int index = 0; index < table_size; index ++) {
+    head = _table[index];
+    while (head != NULL) {
+      if (!walker->do_malloc_site(head->peek())) {
+        return false;
+      }
+      head = (MallocSiteHashtableEntry*)head->next();
+    }
+  }
+  return true;
+}
+
+/*
+ *  The hashtable does not have deletion policy on individual entry,
+ *  and each linked list node is inserted via compare-and-swap,
+ *  so each linked list is stable, the contention only happens
+ *  at the end of linked list.
+ *  This method should not return NULL under normal circumstance.
+ *  If NULL is returned, it indicates:
+ *    1. Out of memory, it cannot allocate new hash entry.
+ *    2. Overflow hash bucket.
+ *  Under any of above circumstances, caller should handle the situation.
+ */
+MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
+  size_t* pos_idx) {
+  int index = hash_to_index(key.hash());
+  assert(index >= 0, "Negative index");
+  *bucket_idx = (size_t)index;
+  *pos_idx = 0;
+
+  // First entry for this hash bucket
+  if (_table[index] == NULL) {
+    MallocSiteHashtableEntry* entry = new_entry(key);
+    // OOM check
+    if (entry == NULL) return NULL;
+
+    // swap in the head
+    if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) {
+      return entry->data();
+    }
+
+    delete entry;
+  }
+
+  MallocSiteHashtableEntry* head = _table[index];
+  while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) {
+    MallocSite* site = head->data();
+    if (site->equals(key)) {
+      // found matched entry
+      return head->data();
+    }
+
+    if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) {
+      MallocSiteHashtableEntry* entry = new_entry(key);
+      // OOM check
+      if (entry == NULL) return NULL;
+      if (head->atomic_insert(entry)) {
+        (*pos_idx) ++;
+        return entry->data();
+      }
+      // contended, other thread won
+      delete entry;
+    }
+    head = (MallocSiteHashtableEntry*)head->next();
+    (*pos_idx) ++;
+  }
+  return NULL;
+}
+
+// Access malloc site
+MallocSite* MallocSiteTable::malloc_site(size_t bucket_idx, size_t pos_idx) {
+  assert(bucket_idx < table_size, "Invalid bucket index");
+  MallocSiteHashtableEntry* head = _table[bucket_idx];
+  for (size_t index = 0; index < pos_idx && head != NULL;
+    index ++, head = (MallocSiteHashtableEntry*)head->next());
+  assert(head != NULL, "Invalid position index");
+  return head->data();
+}
+
+// Allocates MallocSiteHashtableEntry object. Special call stack
+// (pre-installed allocation site) has to be used to avoid infinite
+// recursion.
+MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key) {
+  void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT,
+    *hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL);
+  return ::new (p) MallocSiteHashtableEntry(key);
+}
+
+void MallocSiteTable::reset() {
+  for (int index = 0; index < table_size; index ++) {
+    MallocSiteHashtableEntry* head = _table[index];
+    _table[index] = NULL;
+    delete_linked_list(head);
+  }
+}
+
+void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) {
+  MallocSiteHashtableEntry* p;
+  while (head != NULL) {
+    p = head;
+    head = (MallocSiteHashtableEntry*)head->next();
+    if (p != (MallocSiteHashtableEntry*)_hash_entry_allocation_site) {
+      delete p;
+    }
+  }
+}
+
+void MallocSiteTable::shutdown() {
+  AccessLock locker(&_access_count);
+  locker.exclusiveLock();
+  reset();
+}
+
+bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) {
+  assert(walker != NULL, "NuLL walker");
+  AccessLock locker(&_access_count);
+  if (locker.sharedLock()) {
+    NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
+    return walk(walker);
+  }
+  return false;
+}
+
+
+void MallocSiteTable::AccessLock::exclusiveLock() {
+  jint target;
+  jint val;
+
+  assert(_lock_state != ExclusiveLock, "Can only call once");
+  assert(*_lock >= 0, "Can not content exclusive lock");
+
+  // make counter negative to block out shared locks
+  do {
+    val = *_lock;
+    target = _MAGIC_ + *_lock;
+  } while (Atomic::cmpxchg(target, _lock, val) != val);
+
+  // wait for all readers to exit
+  while (*_lock != _MAGIC_) {
+#ifdef _WINDOWS
+    os::naked_short_sleep(1);
+#else
+    os::NakedYield();
+#endif
+  }
+  _lock_state = ExclusiveLock;
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/mallocSiteTable.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
+#define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
+
+#if INCLUDE_NMT
+
+#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
+#include "services/allocationSite.hpp"
+#include "services/mallocTracker.hpp"
+#include "services/nmtCommon.hpp"
+#include "utilities/nativeCallStack.hpp"
+
+// MallocSite represents a code path that eventually calls
+// os::malloc() to allocate memory
+class MallocSite : public AllocationSite<MemoryCounter> {
+ public:
+  MallocSite() :
+    AllocationSite<MemoryCounter>(NativeCallStack::EMPTY_STACK) { }
+
+  MallocSite(const NativeCallStack& stack) :
+    AllocationSite<MemoryCounter>(stack) { }
+
+  void allocate(size_t size)      { data()->allocate(size);   }
+  void deallocate(size_t size)    { data()->deallocate(size); }
+
+  // Memory allocated from this code path
+  size_t size()  const { return peek()->size(); }
+  // The number of calls were made
+  size_t count() const { return peek()->count(); }
+};
+
+// Malloc site hashtable entry
+class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
+ private:
+  MallocSite                _malloc_site;
+  MallocSiteHashtableEntry* _next;
+
+ public:
+  MallocSiteHashtableEntry() : _next(NULL) { }
+
+  MallocSiteHashtableEntry(NativeCallStack stack):
+    _malloc_site(stack), _next(NULL) { }
+
+  inline const MallocSiteHashtableEntry* next() const {
+    return _next;
+  }
+
+  // Insert an entry atomically.
+  // Return true if the entry is inserted successfully.
+  // The operation can be failed due to contention from other thread.
+  bool atomic_insert(const MallocSiteHashtableEntry* entry) {
+    return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
+      NULL) == NULL);
+  }
+
+  void set_callsite(const MallocSite& site) {
+    _malloc_site = site;
+  }
+
+  inline const MallocSite* peek() const { return &_malloc_site; }
+  inline MallocSite* data()             { return &_malloc_site; }
+
+  inline long hash() const { return _malloc_site.hash(); }
+  inline bool equals(const NativeCallStack& stack) const {
+    return _malloc_site.equals(stack);
+  }
+  // Allocation/deallocation on this allocation site
+  inline void allocate(size_t size)   { _malloc_site.allocate(size);   }
+  inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
+  // Memory counters
+  inline size_t size() const  { return _malloc_site.size();  }
+  inline size_t count() const { return _malloc_site.count(); }
+};
+
+// The walker walks every entry on MallocSiteTable
+class MallocSiteWalker : public StackObj {
+ public:
+   virtual bool do_malloc_site(const MallocSite* e) { return false; }
+};
+
+/*
+ * Native memory tracking call site table.
+ * The table is only needed when detail tracking is enabled.
+ */
+class MallocSiteTable : AllStatic {
+ private:
+  // The number of hash bucket in this hashtable. The number should
+  // be tuned if malloc activities changed significantly.
+  // The statistics data can be obtained via Jcmd
+  // jcmd <pid> VM.native_memory statistics.
+
+  // Currently, (number of buckets / number of entires) ratio is
+  // about 1 / 6
+  enum {
+    table_base_size = 128,   // The base size is calculated from statistics to give
+                             // table ratio around 1:6
+    table_size = (table_base_size * NMT_TrackingStackDepth - 1)
+  };
+
+
+  // This is a very special lock, that allows multiple shared accesses (sharedLock), but
+  // once exclusive access (exclusiveLock) is requested, all shared accesses are
+  // rejected forever.
+  class AccessLock : public StackObj {
+    enum LockState {
+      NoLock,
+      SharedLock,
+      ExclusiveLock
+    };
+
+   private:
+    // A very large negative number. The only possibility to "overflow"
+    // this number is when there are more than -min_jint threads in
+    // this process, which is not going to happen in foreseeable future.
+    const static int _MAGIC_ = min_jint;
+
+    LockState      _lock_state;
+    volatile int*  _lock;
+   public:
+    AccessLock(volatile int* lock) :
+      _lock(lock), _lock_state(NoLock) {
+    }
+
+    ~AccessLock() {
+      if (_lock_state == SharedLock) {
+        Atomic::dec((volatile jint*)_lock);
+      }
+    }
+    // Acquire shared lock.
+    // Return true if shared access is granted.
+    inline bool sharedLock() {
+      jint res = Atomic::add(1, _lock);
+      if (res < 0) {
+        Atomic::add(-1, _lock);
+        return false;
+      }
+      _lock_state = SharedLock;
+      return true;
+    }
+    // Acquire exclusive lock
+    void exclusiveLock();
+ };
+
+ public:
+  static bool initialize();
+  static void shutdown();
+
+  NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
+
+  // Number of hash buckets
+  static inline int hash_buckets()      { return (int)table_size; }
+
+  // Access and copy a call stack from this table. Shared lock should be
+  // acquired before access the entry.
+  static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
+    size_t pos_idx) {
+    AccessLock locker(&_access_count);
+    if (locker.sharedLock()) {
+      NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
+      MallocSite* site = malloc_site(bucket_idx, pos_idx);
+      if (site != NULL) {
+        stack = *site->call_stack();
+        return true;
+      }
+    }
+    return false;
+  }
+
+  // Record a new allocation from specified call path.
+  // Return true if the allocation is recorded successfully, bucket_idx
+  // and pos_idx are also updated to indicate the entry where the allocation
+  // information was recorded.
+  // Return false only occurs under rare scenarios:
+  //  1. out of memory
+  //  2. overflow hash bucket
+  static inline bool allocation_at(const NativeCallStack& stack, size_t size,
+    size_t* bucket_idx, size_t* pos_idx) {
+    AccessLock locker(&_access_count);
+    if (locker.sharedLock()) {
+      NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
+      MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx);
+      if (site != NULL) site->allocate(size);
+      return site != NULL;
+    }
+    return false;
+  }
+
+  // Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
+  // information was recorded.
+  static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
+    AccessLock locker(&_access_count);
+    if (locker.sharedLock()) {
+      NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
+      MallocSite* site = malloc_site(bucket_idx, pos_idx);
+      if (site != NULL) {
+        site->deallocate(size);
+        return true;
+      }
+    }
+    return false;
+  }
+
+  // Walk this table.
+  static bool walk_malloc_site(MallocSiteWalker* walker);
+
+ private:
+  static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key);
+  static void reset();
+
+  // Delete a bucket linked list
+  static void delete_linked_list(MallocSiteHashtableEntry* head);
+
+  static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx);
+  static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
+  static bool walk(MallocSiteWalker* walker);
+
+  static inline int hash_to_index(int  hash) {
+    hash = (hash > 0) ? hash : (-hash);
+    return (hash % table_size);
+  }
+
+  static inline const NativeCallStack* hash_entry_allocation_stack() {
+    return (NativeCallStack*)_hash_entry_allocation_stack;
+  }
+
+ private:
+  // Counter for counting concurrent access
+  static volatile int                _access_count;
+
+  // The callsite hashtable. It has to be a static table,
+  // since malloc call can come from C runtime linker.
+  static MallocSiteHashtableEntry*   _table[table_size];
+
+
+  // Reserve enough memory for placing the objects
+
+  // The memory for hashtable entry allocation stack object
+  static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
+  // The memory for hashtable entry allocation callsite object
+  static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
+  NOT_PRODUCT(static int     _peak_count;)
+};
+
+#endif // INCLUDE_NMT
+#endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/mallocTracker.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+
+#include "runtime/atomic.hpp"
+#include "runtime/atomic.inline.hpp"
+#include "services/mallocSiteTable.hpp"
+#include "services/mallocTracker.hpp"
+#include "services/mallocTracker.inline.hpp"
+#include "services/memTracker.hpp"
+
+size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
+
+// Total malloc'd memory amount
+size_t MallocMemorySnapshot::total() const {
+  size_t amount = 0;
+  for (int index = 0; index < mt_number_of_types; index ++) {
+    amount += _malloc[index].malloc_size();
+  }
+  amount += _tracking_header.size() + total_arena();
+  return amount;
+}
+
+// Total malloc'd memory used by arenas
+size_t MallocMemorySnapshot::total_arena() const {
+  size_t amount = 0;
+  for (int index = 0; index < mt_number_of_types; index ++) {
+    amount += _malloc[index].arena_size();
+  }
+  return amount;
+}
+
+// Make adjustment by subtracting chunks used by arenas
+// from total chunks to get total free chunck size
+void MallocMemorySnapshot::make_adjustment() {
+  size_t arena_size = total_arena();
+  int chunk_idx = NMTUtil::flag_to_index(mtChunk);
+  _malloc[chunk_idx].record_free(arena_size);
+}
+
+
+void MallocMemorySummary::initialize() {
+  assert(sizeof(_snapshot) >= sizeof(MallocMemorySnapshot), "Sanity Check");
+  // Uses placement new operator to initialize static area.
+  ::new ((void*)_snapshot)MallocMemorySnapshot();
+}
+
+void MallocHeader::release() const {
+  // Tracking already shutdown, no housekeeping is needed anymore
+  if (MemTracker::tracking_level() <= NMT_minimal) return;
+
+  MallocMemorySummary::record_free(size(), flags());
+  MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
+  if (MemTracker::tracking_level() == NMT_detail) {
+    MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx);
+  }
+}
+
+bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size,
+  size_t* bucket_idx, size_t* pos_idx) const {
+  bool ret =  MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx);
+
+  // Something went wrong, could be OOM or overflow malloc site table.
+  // We want to keep tracking data under OOM circumstance, so transition to
+  // summary tracking.
+  if (!ret) {
+    MemTracker::transition_to(NMT_summary);
+  }
+  return ret;
+}
+
+bool MallocHeader::get_stack(NativeCallStack& stack) const {
+  return MallocSiteTable::access_stack(stack, _bucket_idx, _pos_idx);
+}
+
+bool MallocTracker::initialize(NMT_TrackingLevel level) {
+  if (level >= NMT_summary) {
+    MallocMemorySummary::initialize();
+  }
+
+  if (level == NMT_detail) {
+    return MallocSiteTable::initialize();
+  }
+  return true;
+}
+
+bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
+  assert(from != NMT_off, "Can not transition from off state");
+  assert(to != NMT_off, "Can not transition to off state");
+  assert (from != NMT_minimal, "cannot transition from minimal state");
+
+  if (from == NMT_detail) {
+    assert(to == NMT_minimal || to == NMT_summary, "Just check");
+    MallocSiteTable::shutdown();
+  }
+  return true;
+}
+
+// Record a malloc memory allocation
+void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
+  const NativeCallStack& stack, NMT_TrackingLevel level) {
+  void*         memblock;      // the address for user data
+  MallocHeader* header = NULL;
+
+  if (malloc_base == NULL) {
+    return NULL;
+  }
+
+  // Uses placement global new operator to initialize malloc header
+
+  if (level == NMT_off) {
+    return malloc_base;
+  }
+
+  header = ::new (malloc_base)MallocHeader(size, flags, stack, level);
+  memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
+
+  // The alignment check: 8 bytes alignment for 32 bit systems.
+  //                      16 bytes alignment for 64-bit systems.
+  assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");
+
+#ifdef ASSERT
+  if (level > NMT_minimal) {
+    // Read back
+    assert(get_size(memblock) == size,   "Wrong size");
+    assert(get_flags(memblock) == flags, "Wrong flags");
+  }
+#endif
+
+  return memblock;
+}
+
+void* MallocTracker::record_free(void* memblock) {
+  // Never turned on
+  if (MemTracker::tracking_level() == NMT_off ||
+      memblock == NULL) {
+    return memblock;
+  }
+  MallocHeader* header = malloc_header(memblock);
+  header->release();
+
+  return (void*)header;
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/mallocTracker.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
+#define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
+
+#if INCLUDE_NMT
+
+#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
+#include "services/nmtCommon.hpp"
+#include "utilities/nativeCallStack.hpp"
+
+/*
+ * This counter class counts memory allocation and deallocation,
+ * records total memory allocation size and number of allocations.
+ * The counters are updated atomically.
+ */
+class MemoryCounter VALUE_OBJ_CLASS_SPEC {
+ private:
+  size_t   _count;
+  size_t   _size;
+
+  DEBUG_ONLY(size_t   _peak_count;)
+  DEBUG_ONLY(size_t   _peak_size; )
+
+ public:
+  MemoryCounter() : _count(0), _size(0) {
+    DEBUG_ONLY(_peak_count = 0;)
+    DEBUG_ONLY(_peak_size  = 0;)
+  }
+
+  inline void allocate(size_t sz) {
+    Atomic::add(1, (volatile MemoryCounterType*)&_count);
+    if (sz > 0) {
+      Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
+      DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
+    }
+    DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
+  }
+
+  inline void deallocate(size_t sz) {
+    assert(_count > 0, "Negative counter");
+    assert(_size >= sz, "Negative size");
+    Atomic::add(-1, (volatile MemoryCounterType*)&_count);
+    if (sz > 0) {
+      Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
+    }
+  }
+
+  inline void resize(long sz) {
+    if (sz != 0) {
+      Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
+      DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
+    }
+  }
+
+  inline size_t count() const { return _count; }
+  inline size_t size()  const { return _size;  }
+  DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
+  DEBUG_ONLY(inline size_t peak_size()  const { return _peak_size; })
+
+};
+
+/*
+ * Malloc memory used by a particular subsystem.
+ * It includes the memory acquired through os::malloc()
+ * call and arena's backing memory.
+ */
+class MallocMemory VALUE_OBJ_CLASS_SPEC {
+ private:
+  MemoryCounter _malloc;
+  MemoryCounter _arena;
+
+ public:
+  MallocMemory() { }
+
+  inline void record_malloc(size_t sz) {
+    _malloc.allocate(sz);
+  }
+
+  inline void record_free(size_t sz) {
+    _malloc.deallocate(sz);
+  }
+
+  inline void record_new_arena() {
+    _arena.allocate(0);
+  }
+
+  inline void record_arena_free() {
+    _arena.deallocate(0);
+  }
+
+  inline void record_arena_size_change(long sz) {
+    _arena.resize(sz);
+  }
+
+  inline size_t malloc_size()  const { return _malloc.size(); }
+  inline size_t malloc_count() const { return _malloc.count();}
+  inline size_t arena_size()   const { return _arena.size();  }
+  inline size_t arena_count()  const { return _arena.count(); }
+
+  DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
+  DEBUG_ONLY(inline const MemoryCounter& arena_counter()  const { return _arena;  })
+};
+
+class MallocMemorySummary;
+
+// A snapshot of malloc'd memory, includes malloc memory
+// usage by types and memory used by tracking itself.
+class MallocMemorySnapshot : public ResourceObj {
+  friend class MallocMemorySummary;
+
+ private:
+  MallocMemory      _malloc[mt_number_of_types];
+  MemoryCounter     _tracking_header;
+
+
+ public:
+  inline MallocMemory*  by_type(MEMFLAGS flags) {
+    int index = NMTUtil::flag_to_index(flags);
+    return &_malloc[index];
+  }
+
+  inline MallocMemory* by_index(int index) {
+    assert(index >= 0, "Index out of bound");
+    assert(index < mt_number_of_types, "Index out of bound");
+    return &_malloc[index];
+  }
+
+  inline MemoryCounter* malloc_overhead() {
+    return &_tracking_header;
+  }
+
+  // Total malloc'd memory amount
+  size_t total() const;
+  // Total malloc'd memory used by arenas
+  size_t total_arena() const;
+
+  inline size_t thread_count() const {
+    MallocMemorySnapshot* s = const_cast<MallocMemorySnapshot*>(this);
+    return s->by_type(mtThreadStack)->malloc_count();
+  }
+
+  void copy_to(MallocMemorySnapshot* s) {
+    s->_tracking_header = _tracking_header;
+    for (int index = 0; index < mt_number_of_types; index ++) {
+      s->_malloc[index] = _malloc[index];
+    }
+  }
+
+  // Make adjustment by subtracting chunks used by arenas
+  // from total chunks to get total free chunk size
+  void make_adjustment();
+};
+
+/*
+ * This class is for collecting malloc statistics at summary level
+ */
+class MallocMemorySummary : AllStatic {
+ private:
+  // Reserve memory for placement of MallocMemorySnapshot object
+  static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
+
+ public:
+   static void initialize();
+
+   static inline void record_malloc(size_t size, MEMFLAGS flag) {
+     as_snapshot()->by_type(flag)->record_malloc(size);
+   }
+
+   static inline void record_free(size_t size, MEMFLAGS flag) {
+     as_snapshot()->by_type(flag)->record_free(size);
+   }
+
+   static inline void record_new_arena(MEMFLAGS flag) {
+     as_snapshot()->by_type(flag)->record_new_arena();
+   }
+
+   static inline void record_arena_free(MEMFLAGS flag) {
+     as_snapshot()->by_type(flag)->record_arena_free();
+   }
+
+   static inline void record_arena_size_change(long size, MEMFLAGS flag) {
+     as_snapshot()->by_type(flag)->record_arena_size_change(size);
+   }
+
+   static void snapshot(MallocMemorySnapshot* s) {
+     as_snapshot()->copy_to(s);
+     s->make_adjustment();
+   }
+
+   // Record memory used by malloc tracking header
+   static inline void record_new_malloc_header(size_t sz) {
+     as_snapshot()->malloc_overhead()->allocate(sz);
+   }
+
+   static inline void record_free_malloc_header(size_t sz) {
+     as_snapshot()->malloc_overhead()->deallocate(sz);
+   }
+
+   // The memory used by malloc tracking headers
+   static inline size_t tracking_overhead() {
+     return as_snapshot()->malloc_overhead()->size();
+   }
+
+  static MallocMemorySnapshot* as_snapshot() {
+    return (MallocMemorySnapshot*)_snapshot;
+  }
+};
+
+
+/*
+ * Malloc tracking header.
+ * To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
+ * which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
+ */
+
+class MallocHeader VALUE_OBJ_CLASS_SPEC {
+#ifdef _LP64
+  size_t           _size      : 64;
+  size_t           _flags     : 8;
+  size_t           _pos_idx   : 16;
+  size_t           _bucket_idx: 40;
+#define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(40)
+#define MAX_BUCKET_LENGTH         right_n_bits(16)
+#else
+  size_t           _size      : 32;
+  size_t           _flags     : 8;
+  size_t           _pos_idx   : 8;
+  size_t           _bucket_idx: 16;
+#define MAX_MALLOCSITE_TABLE_SIZE  right_n_bits(16)
+#define MAX_BUCKET_LENGTH          right_n_bits(8)
+#endif  // _LP64
+
+ public:
+  MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) {
+    assert(sizeof(MallocHeader) == sizeof(void*) * 2,
+      "Wrong header size");
+
+    if (level == NMT_minimal) {
+      return;
+    }
+
+    _flags = flags;
+    set_size(size);
+    if (level == NMT_detail) {
+      size_t bucket_idx;
+      size_t pos_idx;
+      if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
+        assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
+        assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
+        _bucket_idx = bucket_idx;
+        _pos_idx = pos_idx;
+      }
+    }
+
+    MallocMemorySummary::record_malloc(size, flags);
+    MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
+  }
+
+  inline size_t   size()  const { return _size; }
+  inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
+  bool get_stack(NativeCallStack& stack) const;
+
+  // Cleanup tracking information before the memory is released.
+  void release() const;
+
+ private:
+  inline void set_size(size_t size) {
+    _size = size;
+  }
+  bool record_malloc_site(const NativeCallStack& stack, size_t size,
+    size_t* bucket_idx, size_t* pos_idx) const;
+};
+
+
+// Main class called from MemTracker to track malloc activities
+class MallocTracker : AllStatic {
+ public:
+  // Initialize malloc tracker for specific tracking level
+  static bool initialize(NMT_TrackingLevel level);
+
+  static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
+
+  // malloc tracking header size for specific tracking level
+  static inline size_t malloc_header_size(NMT_TrackingLevel level) {
+    return (level == NMT_off) ? 0 : sizeof(MallocHeader);
+  }
+
+  // Parameter name convention:
+  // memblock :   the beginning address for user data
+  // malloc_base: the beginning address that includes malloc tracking header
+  //
+  // The relationship:
+  // memblock = (char*)malloc_base + sizeof(nmt header)
+  //
+
+  // Record  malloc on specified memory block
+  static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
+    const NativeCallStack& stack, NMT_TrackingLevel level);
+
+  // Record free on specified memory block
+  static void* record_free(void* memblock);
+
+  // Offset memory address to header address
+  static inline void* get_base(void* memblock);
+  static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
+    if (memblock == NULL || level == NMT_off) return memblock;
+    return (char*)memblock - malloc_header_size(level);
+  }
+
+  // Get memory size
+  static inline size_t get_size(void* memblock) {
+    MallocHeader* header = malloc_header(memblock);
+    return header->size();
+  }
+
+  // Get memory type
+  static inline MEMFLAGS get_flags(void* memblock) {
+    MallocHeader* header = malloc_header(memblock);
+    return header->flags();
+  }
+
+  // Get header size
+  static inline size_t get_header_size(void* memblock) {
+    return (memblock == NULL) ? 0 : sizeof(MallocHeader);
+  }
+
+  static inline void record_new_arena(MEMFLAGS flags) {
+    MallocMemorySummary::record_new_arena(flags);
+  }
+
+  static inline void record_arena_free(MEMFLAGS flags) {
+    MallocMemorySummary::record_arena_free(flags);
+  }
+
+  static inline void record_arena_size_change(int size, MEMFLAGS flags) {
+    MallocMemorySummary::record_arena_size_change(size, flags);
+  }
+ private:
+  static inline MallocHeader* malloc_header(void *memblock) {
+    assert(memblock != NULL, "NULL pointer");
+    MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
+    return header;
+  }
+};
+
+#endif // INCLUDE_NMT
+
+
+#endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/mallocTracker.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
+#define SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
+
+#include "services/mallocTracker.hpp"
+#include "services/memTracker.hpp"
+
+inline void* MallocTracker::get_base(void* memblock){
+  return get_base(memblock, MemTracker::tracking_level());
+}
+
+#endif // SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
+
--- a/src/share/vm/services/management.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/management.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -39,6 +39,7 @@
 #include "runtime/jniHandles.hpp"
 #include "runtime/os.hpp"
 #include "runtime/serviceThread.hpp"
+#include "runtime/thread.inline.hpp"
 #include "services/classLoadingService.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "services/diagnosticFramework.hpp"
@@ -388,23 +389,6 @@
   return (instanceOop) element();
 }
 
-// Helper functions
-static JavaThread* find_java_thread_from_id(jlong thread_id) {
-  assert(Threads_lock->owned_by_self(), "Must hold Threads_lock");
-
-  JavaThread* java_thread = NULL;
-  // Sequential search for now.  Need to do better optimization later.
-  for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
-    oop tobj = thread->threadObj();
-    if (!thread->is_exiting() &&
-        tobj != NULL &&
-        thread_id == java_lang_Thread::thread_id(tobj)) {
-      java_thread = thread;
-      break;
-    }
-  }
-  return java_thread;
-}
 
 static GCMemoryManager* get_gc_memory_manager_from_jobject(jobject mgr, TRAPS) {
   if (mgr == NULL) {
@@ -441,6 +425,8 @@
   return MemoryService::get_memory_pool(ph);
 }
 
+#endif // INCLUDE_MANAGEMENT
+
 static void validate_thread_id_array(typeArrayHandle ids_ah, TRAPS) {
   int num_threads = ids_ah->length();
 
@@ -456,6 +442,8 @@
   }
 }
 
+#if INCLUDE_MANAGEMENT
+
 static void validate_thread_info_array(objArrayHandle infoArray_h, TRAPS) {
   // check if the element of infoArray is of type ThreadInfo class
   Klass* threadinfo_klass = Management::java_lang_management_ThreadInfo_klass(CHECK);
@@ -819,45 +807,6 @@
   return prev;
 JVM_END
 
-// Gets an array containing the amount of memory allocated on the Java
-// heap for a set of threads (in bytes).  Each element of the array is
-// the amount of memory allocated for the thread ID specified in the
-// corresponding entry in the given array of thread IDs; or -1 if the
-// thread does not exist or has terminated.
-JVM_ENTRY(void, jmm_GetThreadAllocatedMemory(JNIEnv *env, jlongArray ids,
-                                             jlongArray sizeArray))
-  // Check if threads is null
-  if (ids == NULL || sizeArray == NULL) {
-    THROW(vmSymbols::java_lang_NullPointerException());
-  }
-
-  ResourceMark rm(THREAD);
-  typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
-  typeArrayHandle ids_ah(THREAD, ta);
-
-  typeArrayOop sa = typeArrayOop(JNIHandles::resolve_non_null(sizeArray));
-  typeArrayHandle sizeArray_h(THREAD, sa);
-
-  // validate the thread id array
-  validate_thread_id_array(ids_ah, CHECK);
-
-  // sizeArray must be of the same length as the given array of thread IDs
-  int num_threads = ids_ah->length();
-  if (num_threads != sizeArray_h->length()) {
-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
-              "The length of the given long array does not match the length of "
-              "the given array of thread IDs");
-  }
-
-  MutexLockerEx ml(Threads_lock);
-  for (int i = 0; i < num_threads; i++) {
-    JavaThread* java_thread = find_java_thread_from_id(ids_ah->long_at(i));
-    if (java_thread != NULL) {
-      sizeArray_h->long_at_put(i, java_thread->cooked_allocated_bytes());
-    }
-  }
-JVM_END
-
 // Returns a java/lang/management/MemoryUsage object representing
 // the memory usage for the heap or non-heap memory.
 JVM_ENTRY(jobject, jmm_GetMemoryUsage(JNIEnv* env, jboolean heap))
@@ -1163,7 +1112,7 @@
     MutexLockerEx ml(Threads_lock);
     for (int i = 0; i < num_threads; i++) {
       jlong tid = ids_ah->long_at(i);
-      JavaThread* jt = find_java_thread_from_id(tid);
+      JavaThread* jt = Threads::find_java_thread_from_java_tid(tid);
       oop thread_obj = (jt != NULL ? jt->threadObj() : (oop)NULL);
       instanceHandle threadObj_h(THREAD, (instanceOop) thread_obj);
       thread_handle_array->append(threadObj_h);
@@ -1242,7 +1191,7 @@
       MutexLockerEx ml(Threads_lock);
       for (int i = 0; i < num_threads; i++) {
         jlong tid = ids_ah->long_at(i);
-        JavaThread* jt = find_java_thread_from_id(tid);
+        JavaThread* jt = Threads::find_java_thread_from_java_tid(tid);
         ThreadSnapshot* ts;
         if (jt == NULL) {
           // if the thread does not exist or now it is terminated,
@@ -1488,7 +1437,7 @@
         }
       } else {
         // reset contention statistics for a given thread
-        JavaThread* java_thread = find_java_thread_from_id(tid);
+        JavaThread* java_thread = Threads::find_java_thread_from_java_tid(tid);
         if (java_thread == NULL) {
           return false;
         }
@@ -1557,7 +1506,7 @@
     return os::current_thread_cpu_time();
   } else {
     MutexLockerEx ml(Threads_lock);
-    java_thread = find_java_thread_from_id(thread_id);
+    java_thread = Threads::find_java_thread_from_java_tid(thread_id);
     if (java_thread != NULL) {
       return os::thread_cpu_time((Thread*) java_thread);
     }
@@ -1565,78 +1514,6 @@
   return -1;
 JVM_END
 
-// Returns the CPU time consumed by a given thread (in nanoseconds).
-// If thread_id == 0, CPU time for the current thread is returned.
-// If user_sys_cpu_time = true, user level and system CPU time of
-// a given thread is returned; otherwise, only user level CPU time
-// is returned.
-JVM_ENTRY(jlong, jmm_GetThreadCpuTimeWithKind(JNIEnv *env, jlong thread_id, jboolean user_sys_cpu_time))
-  if (!os::is_thread_cpu_time_supported()) {
-    return -1;
-  }
-
-  if (thread_id < 0) {
-    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
-               "Invalid thread ID", -1);
-  }
-
-  JavaThread* java_thread = NULL;
-  if (thread_id == 0) {
-    // current thread
-    return os::current_thread_cpu_time(user_sys_cpu_time != 0);
-  } else {
-    MutexLockerEx ml(Threads_lock);
-    java_thread = find_java_thread_from_id(thread_id);
-    if (java_thread != NULL) {
-      return os::thread_cpu_time((Thread*) java_thread, user_sys_cpu_time != 0);
-    }
-  }
-  return -1;
-JVM_END
-
-// Gets an array containing the CPU times consumed by a set of threads
-// (in nanoseconds).  Each element of the array is the CPU time for the
-// thread ID specified in the corresponding entry in the given array
-// of thread IDs; or -1 if the thread does not exist or has terminated.
-// If user_sys_cpu_time = true, the sum of user level and system CPU time
-// for the given thread is returned; otherwise, only user level CPU time
-// is returned.
-JVM_ENTRY(void, jmm_GetThreadCpuTimesWithKind(JNIEnv *env, jlongArray ids,
-                                              jlongArray timeArray,
-                                              jboolean user_sys_cpu_time))
-  // Check if threads is null
-  if (ids == NULL || timeArray == NULL) {
-    THROW(vmSymbols::java_lang_NullPointerException());
-  }
-
-  ResourceMark rm(THREAD);
-  typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
-  typeArrayHandle ids_ah(THREAD, ta);
-
-  typeArrayOop tia = typeArrayOop(JNIHandles::resolve_non_null(timeArray));
-  typeArrayHandle timeArray_h(THREAD, tia);
-
-  // validate the thread id array
-  validate_thread_id_array(ids_ah, CHECK);
-
-  // timeArray must be of the same length as the given array of thread IDs
-  int num_threads = ids_ah->length();
-  if (num_threads != timeArray_h->length()) {
-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
-              "The length of the given long array does not match the length of "
-              "the given array of thread IDs");
-  }
-
-  MutexLockerEx ml(Threads_lock);
-  for (int i = 0; i < num_threads; i++) {
-    JavaThread* java_thread = find_java_thread_from_id(ids_ah->long_at(i));
-    if (java_thread != NULL) {
-      timeArray_h->long_at_put(i, os::thread_cpu_time((Thread*)java_thread,
-                                                      user_sys_cpu_time != 0));
-    }
-  }
-JVM_END
-
 // Returns a String array of all VM global flag names
 JVM_ENTRY(jobjectArray, jmm_GetVMGlobalNames(JNIEnv *env))
   // last flag entry is always NULL, so subtract 1
@@ -1823,7 +1700,7 @@
               "This flag is not writeable.");
   }
 
-  bool succeed;
+  bool succeed = false;
   if (flag->is_bool()) {
     bool bvalue = (new_value.z == JNI_TRUE ? true : false);
     succeed = CommandLineFlags::boolAtPut(name, &bvalue, Flag::MANAGEMENT);
@@ -1855,6 +1732,9 @@
     }
     ccstr svalue = java_lang_String::as_utf8_string(str);
     succeed = CommandLineFlags::ccstrAtPut(name, &svalue, Flag::MANAGEMENT);
+    if (succeed) {
+      FREE_C_HEAP_ARRAY(char, svalue, mtInternal);
+    }
   }
   assert(succeed, "Setting flag should succeed");
 JVM_END
@@ -2319,7 +2199,122 @@
   return (jlong)(((double)ticks / (double)os::elapsed_frequency())
                  * (double)1000.0);
 }
+#endif // INCLUDE_MANAGEMENT
 
+// Gets an array containing the amount of memory allocated on the Java
+// heap for a set of threads (in bytes).  Each element of the array is
+// the amount of memory allocated for the thread ID specified in the
+// corresponding entry in the given array of thread IDs; or -1 if the
+// thread does not exist or has terminated.
+JVM_ENTRY(void, jmm_GetThreadAllocatedMemory(JNIEnv *env, jlongArray ids,
+                                             jlongArray sizeArray))
+  // Check if threads is null
+  if (ids == NULL || sizeArray == NULL) {
+    THROW(vmSymbols::java_lang_NullPointerException());
+  }
+
+  ResourceMark rm(THREAD);
+  typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
+  typeArrayHandle ids_ah(THREAD, ta);
+
+  typeArrayOop sa = typeArrayOop(JNIHandles::resolve_non_null(sizeArray));
+  typeArrayHandle sizeArray_h(THREAD, sa);
+
+  // validate the thread id array
+  validate_thread_id_array(ids_ah, CHECK);
+
+  // sizeArray must be of the same length as the given array of thread IDs
+  int num_threads = ids_ah->length();
+  if (num_threads != sizeArray_h->length()) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "The length of the given long array does not match the length of "
+              "the given array of thread IDs");
+  }
+
+  MutexLockerEx ml(Threads_lock);
+  for (int i = 0; i < num_threads; i++) {
+    JavaThread* java_thread = Threads::find_java_thread_from_java_tid(ids_ah->long_at(i));
+    if (java_thread != NULL) {
+      sizeArray_h->long_at_put(i, java_thread->cooked_allocated_bytes());
+    }
+  }
+JVM_END
+
+// Returns the CPU time consumed by a given thread (in nanoseconds).
+// If thread_id == 0, CPU time for the current thread is returned.
+// If user_sys_cpu_time = true, user level and system CPU time of
+// a given thread is returned; otherwise, only user level CPU time
+// is returned.
+JVM_ENTRY(jlong, jmm_GetThreadCpuTimeWithKind(JNIEnv *env, jlong thread_id, jboolean user_sys_cpu_time))
+  if (!os::is_thread_cpu_time_supported()) {
+    return -1;
+  }
+
+  if (thread_id < 0) {
+    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
+               "Invalid thread ID", -1);
+  }
+
+  JavaThread* java_thread = NULL;
+  if (thread_id == 0) {
+    // current thread
+    return os::current_thread_cpu_time(user_sys_cpu_time != 0);
+  } else {
+    MutexLockerEx ml(Threads_lock);
+    java_thread = Threads::find_java_thread_from_java_tid(thread_id);
+    if (java_thread != NULL) {
+      return os::thread_cpu_time((Thread*) java_thread, user_sys_cpu_time != 0);
+    }
+  }
+  return -1;
+JVM_END
+
+// Gets an array containing the CPU times consumed by a set of threads
+// (in nanoseconds).  Each element of the array is the CPU time for the
+// thread ID specified in the corresponding entry in the given array
+// of thread IDs; or -1 if the thread does not exist or has terminated.
+// If user_sys_cpu_time = true, the sum of user level and system CPU time
+// for the given thread is returned; otherwise, only user level CPU time
+// is returned.
+JVM_ENTRY(void, jmm_GetThreadCpuTimesWithKind(JNIEnv *env, jlongArray ids,
+                                              jlongArray timeArray,
+                                              jboolean user_sys_cpu_time))
+  // Check if threads is null
+  if (ids == NULL || timeArray == NULL) {
+    THROW(vmSymbols::java_lang_NullPointerException());
+  }
+
+  ResourceMark rm(THREAD);
+  typeArrayOop ta = typeArrayOop(JNIHandles::resolve_non_null(ids));
+  typeArrayHandle ids_ah(THREAD, ta);
+
+  typeArrayOop tia = typeArrayOop(JNIHandles::resolve_non_null(timeArray));
+  typeArrayHandle timeArray_h(THREAD, tia);
+
+  // validate the thread id array
+  validate_thread_id_array(ids_ah, CHECK);
+
+  // timeArray must be of the same length as the given array of thread IDs
+  int num_threads = ids_ah->length();
+  if (num_threads != timeArray_h->length()) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "The length of the given long array does not match the length of "
+              "the given array of thread IDs");
+  }
+
+  MutexLockerEx ml(Threads_lock);
+  for (int i = 0; i < num_threads; i++) {
+    JavaThread* java_thread = Threads::find_java_thread_from_java_tid(ids_ah->long_at(i));
+    if (java_thread != NULL) {
+      timeArray_h->long_at_put(i, os::thread_cpu_time((Thread*)java_thread,
+                                                      user_sys_cpu_time != 0));
+    }
+  }
+JVM_END
+
+
+
+#if INCLUDE_MANAGEMENT
 const struct jmmInterface_1_ jmm_interface = {
   NULL,
   NULL,
--- a/src/share/vm/services/memBaseline.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/memBaseline.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,471 +22,279 @@
  *
  */
 #include "precompiled.hpp"
+
 #include "memory/allocation.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.inline.hpp"
 #include "services/memBaseline.hpp"
 #include "services/memTracker.hpp"
 
-
-MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
-  {mtJavaHeap,   "Java Heap"},
-  {mtClass,      "Class"},
-  {mtThreadStack,"Thread Stack"},
-  {mtThread,     "Thread"},
-  {mtCode,       "Code"},
-  {mtGC,         "GC"},
-  {mtCompiler,   "Compiler"},
-  {mtInternal,   "Internal"},
-  {mtOther,      "Other"},
-  {mtSymbol,     "Symbol"},
-  {mtNMT,        "Memory Tracking"},
-  {mtTracing,    "Tracing"},
-  {mtChunk,      "Pooled Free Chunks"},
-  {mtClassShared,"Shared spaces for classes"},
-  {mtTest,       "Test"},
-  {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
-                             // behind
-};
-
-MemBaseline::MemBaseline() {
-  _baselined = false;
-
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    _malloc_data[index].set_type(MemType2NameMap[index]._flag);
-    _vm_data[index].set_type(MemType2NameMap[index]._flag);
-    _arena_data[index].set_type(MemType2NameMap[index]._flag);
+/*
+ * Sizes are sorted in descenting order for reporting
+ */
+int compare_malloc_size(const MallocSite& s1, const MallocSite& s2) {
+  if (s1.size() == s2.size()) {
+    return 0;
+  } else if (s1.size() > s2.size()) {
+    return -1;
+  } else {
+    return 1;
   }
-
-  _malloc_cs = NULL;
-  _vm_cs = NULL;
-  _vm_map = NULL;
-
-  _number_of_classes = 0;
-  _number_of_threads = 0;
 }
 
 
-void MemBaseline::clear() {
-  if (_malloc_cs != NULL) {
-    delete _malloc_cs;
-    _malloc_cs = NULL;
+int compare_virtual_memory_size(const VirtualMemoryAllocationSite& s1,
+  const VirtualMemoryAllocationSite& s2) {
+  if (s1.reserved() == s2.reserved()) {
+    return 0;
+  } else if (s1.reserved() > s2.reserved()) {
+    return -1;
+  } else {
+    return 1;
   }
+}
 
-  if (_vm_cs != NULL) {
-    delete _vm_cs;
-    _vm_cs = NULL;
-  }
-
-  if (_vm_map != NULL) {
-    delete _vm_map;
-    _vm_map = NULL;
-  }
-
-  reset();
+// Sort into allocation site addresses order for baseline comparison
+int compare_malloc_site(const MallocSite& s1, const MallocSite& s2) {
+  return s1.call_stack()->compare(*s2.call_stack());
 }
 
 
-void MemBaseline::reset() {
-  _baselined = false;
-  _total_vm_reserved = 0;
-  _total_vm_committed = 0;
-  _total_malloced = 0;
-  _number_of_classes = 0;
-
-  if (_malloc_cs != NULL) _malloc_cs->clear();
-  if (_vm_cs != NULL) _vm_cs->clear();
-  if (_vm_map != NULL) _vm_map->clear();
-
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    _malloc_data[index].clear();
-    _vm_data[index].clear();
-    _arena_data[index].clear();
-  }
+int compare_virtual_memory_site(const VirtualMemoryAllocationSite& s1,
+  const VirtualMemoryAllocationSite& s2) {
+  return s1.call_stack()->compare(*s2.call_stack());
 }
 
-MemBaseline::~MemBaseline() {
-  clear();
+/*
+ * Walker to walk malloc allocation site table
+ */
+class MallocAllocationSiteWalker : public MallocSiteWalker {
+ private:
+  SortedLinkedList<MallocSite, compare_malloc_size> _malloc_sites;
+  size_t         _count;
+
+  // Entries in MallocSiteTable with size = 0 and count = 0,
+  // when the malloc site is not longer there.
+ public:
+  MallocAllocationSiteWalker() : _count(0) { }
+
+  inline size_t count() const { return _count; }
+
+  LinkedList<MallocSite>* malloc_sites() {
+    return &_malloc_sites;
+  }
+
+  bool do_malloc_site(const MallocSite* site) {
+    if (site->size() >= MemBaseline::SIZE_THRESHOLD) {
+      if (_malloc_sites.add(*site) != NULL) {
+        _count++;
+        return true;
+      } else {
+        return false;  // OOM
+      }
+    } else {
+      // malloc site does not meet threshold, ignore and continue
+      return true;
+    }
+  }
+};
+
+// Compare virtual memory region's base address
+int compare_virtual_memory_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
+  return r1.compare(r2);
 }
 
-// baseline malloc'd memory records, generate overall summary and summaries by
-// memory types
-bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
-  MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
-  MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
-  size_t used_arena_size = 0;
-  int index;
-  while (malloc_ptr != NULL) {
-    index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
-    size_t size = malloc_ptr->size();
-    if (malloc_ptr->is_arena_memory_record()) {
-      // We do have anonymous arenas, they are either used as value objects,
-      // which are embedded inside other objects, or used as stack objects.
-      _arena_data[index].inc(size);
-      used_arena_size += size;
-    } else {
-      _total_malloced += size;
-      _malloc_data[index].inc(size);
-      if (malloc_ptr->is_arena_record()) {
-        // see if arena memory record present
-        MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
-        if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) {
-          assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
-             "Arena records do not match");
-          size = next_malloc_ptr->size();
-          _arena_data[index].inc(size);
-          used_arena_size += size;
-          malloc_itr.next();
-        }
+// Walk all virtual memory regions for baselining
+class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
+ private:
+  SortedLinkedList<ReservedMemoryRegion, compare_virtual_memory_base>
+                _virtual_memory_regions;
+  size_t        _count;
+
+ public:
+  VirtualMemoryAllocationWalker() : _count(0) { }
+
+  bool do_allocation_site(const ReservedMemoryRegion* rgn)  {
+    if (rgn->size() >= MemBaseline::SIZE_THRESHOLD) {
+      if (_virtual_memory_regions.add(*rgn) != NULL) {
+        _count ++;
+        return true;
+      } else {
+        return false;
       }
     }
-    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
+    return true;
   }
 
-  // substract used arena size to get size of arena chunk in free list
-  index = flag2index(mtChunk);
-  _malloc_data[index].reduce(used_arena_size);
-  // we really don't know how many chunks in free list, so just set to
-  // 0
-  _malloc_data[index].overwrite_counter(0);
+  LinkedList<ReservedMemoryRegion>* virtual_memory_allocations() {
+    return &_virtual_memory_regions;
+  }
+};
+
+
+bool MemBaseline::baseline_summary() {
+  MallocMemorySummary::snapshot(&_malloc_memory_snapshot);
+  VirtualMemorySummary::snapshot(&_virtual_memory_snapshot);
+  return true;
+}
+
+bool MemBaseline::baseline_allocation_sites() {
+  // Malloc allocation sites
+  MallocAllocationSiteWalker malloc_walker;
+  if (!MallocSiteTable::walk_malloc_site(&malloc_walker)) {
+    return false;
+  }
+
+  _malloc_sites.move(malloc_walker.malloc_sites());
+  // The malloc sites are collected in size order
+  _malloc_sites_order = by_size;
+
+  // Virtual memory allocation sites
+  VirtualMemoryAllocationWalker virtual_memory_walker;
+  if (!VirtualMemoryTracker::walk_virtual_memory(&virtual_memory_walker)) {
+    return false;
+  }
+
+  // Virtual memory allocations are collected in call stack order
+  _virtual_memory_allocations.move(virtual_memory_walker.virtual_memory_allocations());
+
+  if (!aggregate_virtual_memory_allocation_sites()) {
+    return false;
+  }
+  // Virtual memory allocation sites are aggregrated in call stack order
+  _virtual_memory_sites_order = by_address;
 
   return true;
 }
 
-// check if there is a safepoint in progress, if so, block the thread
-// for the safepoint
-void MemBaseline::check_safepoint(JavaThread* thr) {
-  if (SafepointSynchronize::is_synchronizing()) {
-    // grab and drop the SR_lock to honor the safepoint protocol
-    MutexLocker ml(thr->SR_lock());
-  }
-}
-
-// baseline mmap'd memory records, generate overall summary and summaries by
-// memory types
-bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
-  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
-  VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
-  int index;
-  while (vm_ptr != NULL) {
-    if (vm_ptr->is_reserved_region()) {
-      index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
-    // we use the number of thread stack to count threads
-      if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
-      _number_of_threads ++;
-    }
-      _total_vm_reserved += vm_ptr->size();
-      _vm_data[index].inc(vm_ptr->size(), 0);
-    } else {
-      _total_vm_committed += vm_ptr->size();
-      _vm_data[index].inc(0, vm_ptr->size());
-    }
-    vm_ptr = (VMMemRegion*)vm_itr.next();
-  }
-  return true;
-}
-
-// baseline malloc'd memory by callsites, but only the callsites with memory allocation
-// over 1KB are stored.
-bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
-  assert(MemTracker::track_callsite(), "detail tracking is off");
-
-  MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
-  MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
-  MallocCallsitePointer malloc_callsite;
-
-  // initailize malloc callsite array
-  if (_malloc_cs == NULL) {
-    _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
-    // out of native memory
-    if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
-      return false;
-    }
-  } else {
-    _malloc_cs->clear();
-  }
-
-  MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
-
-  // sort into callsite pc order. Details are aggregated by callsites
-  malloc_data->sort((FN_SORT)malloc_sort_by_pc);
-  bool ret = true;
-
-  // baseline memory that is totaled over 1 KB
-  while (malloc_ptr != NULL) {
-    if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
-      // skip thread stacks
-      if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
-        if (malloc_callsite.addr() != malloc_ptr->pc()) {
-          if ((malloc_callsite.amount()/K) > 0) {
-            if (!_malloc_cs->append(&malloc_callsite)) {
-              ret = false;
-              break;
-            }
-          }
-          malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
-        }
-        malloc_callsite.inc(malloc_ptr->size());
-      }
-    }
-    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
-  }
-
-  // restore to address order. Snapshot malloc data is maintained in memory
-  // address order.
-  malloc_data->sort((FN_SORT)malloc_sort_by_addr);
+bool MemBaseline::baseline(bool summaryOnly) {
+  reset();
 
-  if (!ret) {
-              return false;
-            }
-  // deal with last record
-  if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
-    if (!_malloc_cs->append(&malloc_callsite)) {
-      return false;
-    }
-  }
-  return true;
-}
-
-// baseline mmap'd memory by callsites
-bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
-  assert(MemTracker::track_callsite(), "detail tracking is off");
-
-  VMCallsitePointer  vm_callsite;
-  VMCallsitePointer* cur_callsite = NULL;
-  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
-  VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
-
-  // initialize virtual memory map array
-  if (_vm_map == NULL) {
-    _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
-   if (_vm_map == NULL || _vm_map->out_of_memory()) {
-     return false;
-   }
-  } else {
-    _vm_map->clear();
-  }
-
-  // initialize virtual memory callsite array
-  if (_vm_cs == NULL) {
-    _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
-    if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
-      return false;
-    }
-  } else {
-    _vm_cs->clear();
-  }
-
-  // consolidate virtual memory data
-  VMMemRegionEx*     reserved_rec = NULL;
-  VMMemRegionEx*     committed_rec = NULL;
+  _class_count = InstanceKlass::number_of_instance_classes();
 
-  // vm_ptr is coming in increasing base address order
-  while (vm_ptr != NULL) {
-    if (vm_ptr->is_reserved_region()) {
-      // consolidate reserved memory regions for virtual memory map.
-      // The criteria for consolidation is:
-      // 1. two adjacent reserved memory regions
-      // 2. belong to the same memory type
-      // 3. reserved from the same callsite
-      if (reserved_rec == NULL ||
-        reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
-        FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
-        reserved_rec->pc() != vm_ptr->pc()) {
-        if (!_vm_map->append(vm_ptr)) {
-        return false;
-      }
-        // inserted reserved region, we need the pointer to the element in virtual
-        // memory map array.
-        reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
-      } else {
-        reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
-    }
-
-      if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
-      return false;
-    }
-      vm_callsite = VMCallsitePointer(vm_ptr->pc());
-      cur_callsite = &vm_callsite;
-      vm_callsite.inc(vm_ptr->size(), 0);
-    } else {
-      // consolidate committed memory regions for virtual memory map
-      // The criterial is:
-      // 1. two adjacent committed memory regions
-      // 2. committed from the same callsite
-      if (committed_rec == NULL ||
-        committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
-        committed_rec->pc() != vm_ptr->pc()) {
-        if (!_vm_map->append(vm_ptr)) {
-          return false;
-        }
-        committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
-    } else {
-        committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
-      }
-      vm_callsite.inc(0, vm_ptr->size());
-    }
-    vm_ptr = (VMMemRegionEx*)vm_itr.next();
-  }
-  // deal with last record
-  if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
+  if (!baseline_summary()) {
     return false;
   }
 
-  // sort it into callsite pc order. Details are aggregated by callsites
-  _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
+  _baseline_type = Summary_baselined;
 
-  // walk the array to consolidate record by pc
-  MemPointerArrayIteratorImpl itr(_vm_cs);
-  VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
-  VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
-  while (next_rec != NULL) {
-    assert(callsite_rec != NULL, "Sanity check");
-    if (next_rec->addr() == callsite_rec->addr()) {
-      callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
-      itr.remove();
-      next_rec = (VMCallsitePointer*)itr.current();
-    } else {
-      callsite_rec = next_rec;
-      next_rec = (VMCallsitePointer*)itr.next();
-    }
+  // baseline details
+  if (!summaryOnly &&
+      MemTracker::tracking_level() == NMT_detail) {
+    baseline_allocation_sites();
+    _baseline_type = Detail_baselined;
   }
 
   return true;
 }
 
-// baseline a snapshot. If summary_only = false, memory usages aggregated by
-// callsites are also baselined.
-// The method call can be lengthy, especially when detail tracking info is
-// requested. So the method checks for safepoint explicitly.
-bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
-  Thread* THREAD = Thread::current();
-  assert(THREAD->is_Java_thread(), "must be a JavaThread");
-  MutexLocker snapshot_locker(snapshot._lock);
-  reset();
-  _baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
-  if (_baselined) {
-    check_safepoint((JavaThread*)THREAD);
-    _baselined = baseline_vm_summary(snapshot._vm_ptrs);
+int compare_allocation_site(const VirtualMemoryAllocationSite& s1,
+  const VirtualMemoryAllocationSite& s2) {
+  return s1.call_stack()->compare(*s2.call_stack());
+}
+
+bool MemBaseline::aggregate_virtual_memory_allocation_sites() {
+  SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site> allocation_sites;
+
+  VirtualMemoryAllocationIterator itr = virtual_memory_allocations();
+  const ReservedMemoryRegion* rgn;
+  VirtualMemoryAllocationSite* site;
+  while ((rgn = itr.next()) != NULL) {
+    VirtualMemoryAllocationSite tmp(*rgn->call_stack());
+    site = allocation_sites.find(tmp);
+    if (site == NULL) {
+      LinkedListNode<VirtualMemoryAllocationSite>* node =
+        allocation_sites.add(tmp);
+      if (node == NULL) return false;
+      site = node->data();
+    }
+    site->reserve_memory(rgn->size());
+    site->commit_memory(rgn->committed_size());
   }
-  _number_of_classes = snapshot.number_of_classes();
 
-  if (!summary_only && MemTracker::track_callsite() && _baselined) {
-    check_safepoint((JavaThread*)THREAD);
-    _baselined =  baseline_malloc_details(snapshot._alloc_ptrs);
-    if (_baselined) {
-      check_safepoint((JavaThread*)THREAD);
-      _baselined =  baseline_vm_details(snapshot._vm_ptrs);
-    }
-  }
-  return _baselined;
+  _virtual_memory_sites.move(&allocation_sites);
+  return true;
 }
 
-
-int MemBaseline::flag2index(MEMFLAGS flag) const {
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    if (MemType2NameMap[index]._flag == flag) {
-      return index;
-    }
+MallocSiteIterator MemBaseline::malloc_sites(SortingOrder order) {
+  assert(!_malloc_sites.is_empty(), "Not detail baseline");
+  switch(order) {
+    case by_size:
+      malloc_sites_to_size_order();
+      break;
+    case by_site:
+      malloc_sites_to_allocation_site_order();
+      break;
+    case by_address:
+    default:
+      ShouldNotReachHere();
   }
-  assert(false, "no type");
-  return -1;
+  return MallocSiteIterator(_malloc_sites.head());
 }
 
-const char* MemBaseline::type2name(MEMFLAGS type) {
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    if (MemType2NameMap[index]._flag == type) {
-      return MemType2NameMap[index]._name;
-    }
+VirtualMemorySiteIterator MemBaseline::virtual_memory_sites(SortingOrder order) {
+  assert(!_virtual_memory_sites.is_empty(), "Not detail baseline");
+  switch(order) {
+    case by_size:
+      virtual_memory_sites_to_size_order();
+      break;
+    case by_site:
+      virtual_memory_sites_to_reservation_site_order();
+      break;
+    case by_address:
+    default:
+      ShouldNotReachHere();
   }
-  assert(false, err_msg("bad type %x", type));
-  return NULL;
+  return VirtualMemorySiteIterator(_virtual_memory_sites.head());
 }
 
 
-MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
-  _total_malloced = other._total_malloced;
-  _total_vm_reserved = other._total_vm_reserved;
-  _total_vm_committed = other._total_vm_committed;
-
-  _baselined = other._baselined;
-  _number_of_classes = other._number_of_classes;
-
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    _malloc_data[index] = other._malloc_data[index];
-    _vm_data[index] = other._vm_data[index];
-    _arena_data[index] = other._arena_data[index];
-  }
+// Sorting allocations sites in different orders
+void MemBaseline::malloc_sites_to_size_order() {
+  if (_malloc_sites_order != by_size) {
+    SortedLinkedList<MallocSite, compare_malloc_size> tmp;
 
-  if (MemTracker::track_callsite()) {
-    assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
-    assert(other._malloc_cs != NULL && other._vm_cs != NULL,
-           "not properly baselined");
-    _malloc_cs->clear();
-    _vm_cs->clear();
-    int index;
-    for (index = 0; index < other._malloc_cs->length(); index ++) {
-      _malloc_cs->append(other._malloc_cs->at(index));
-    }
-
-    for (index = 0; index < other._vm_cs->length(); index ++) {
-      _vm_cs->append(other._vm_cs->at(index));
-    }
+    // Add malloc sites to sorted linked list to sort into size order
+    tmp.move(&_malloc_sites);
+    _malloc_sites.set_head(tmp.head());
+    tmp.set_head(NULL);
+    _malloc_sites_order = by_size;
   }
-  return *this;
 }
 
-/* compare functions for sorting */
-
-// sort snapshot malloc'd records in callsite pc order
-int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
-  assert(MemTracker::track_callsite(),"Just check");
-  const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
-  const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
-  return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
+void MemBaseline::malloc_sites_to_allocation_site_order() {
+  if (_malloc_sites_order != by_site) {
+    SortedLinkedList<MallocSite, compare_malloc_site> tmp;
+    // Add malloc sites to sorted linked list to sort into site (address) order
+    tmp.move(&_malloc_sites);
+    _malloc_sites.set_head(tmp.head());
+    tmp.set_head(NULL);
+    _malloc_sites_order = by_site;
+  }
 }
 
-// sort baselined malloc'd records in size order
-int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
-  assert(MemTracker::is_on(), "Just check");
-  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
-  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
-  return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
-}
+void MemBaseline::virtual_memory_sites_to_size_order() {
+  if (_virtual_memory_sites_order != by_size) {
+    SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_size> tmp;
 
-// sort baselined malloc'd records in callsite pc order
-int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
-  assert(MemTracker::is_on(), "Just check");
-  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
-  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
-  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
+    tmp.move(&_virtual_memory_sites);
+
+    _virtual_memory_sites.set_head(tmp.head());
+    tmp.set_head(NULL);
+    _virtual_memory_sites_order = by_size;
+  }
 }
 
+void MemBaseline::virtual_memory_sites_to_reservation_site_order() {
+  if (_virtual_memory_sites_order != by_size) {
+    SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_site> tmp;
 
-// sort baselined mmap'd records in size (reserved size) order
-int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
-  assert(MemTracker::is_on(), "Just check");
-  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
-  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
-  return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
+    tmp.move(&_virtual_memory_sites);
+
+    _virtual_memory_sites.set_head(tmp.head());
+    tmp.set_head(NULL);
+
+    _virtual_memory_sites_order = by_size;
+  }
 }
 
-// sort baselined mmap'd records in callsite pc order
-int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
-  assert(MemTracker::is_on(), "Just check");
-  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
-  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
-  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
-}
-
-
-// sort snapshot malloc'd records in memory block address order
-int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
-  assert(MemTracker::is_on(), "Just check");
-  const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
-  const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
-  int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
-  assert(p1 == p2 || delta != 0, "dup pointer");
-  return delta;
-}
-
--- a/src/share/vm/services/memBaseline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/memBaseline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,425 +25,176 @@
 #ifndef SHARE_VM_SERVICES_MEM_BASELINE_HPP
 #define SHARE_VM_SERVICES_MEM_BASELINE_HPP
 
+#if INCLUDE_NMT
+
 #include "memory/allocation.hpp"
 #include "runtime/mutex.hpp"
-#include "services/memPtr.hpp"
-#include "services/memSnapshot.hpp"
+#include "services/mallocSiteTable.hpp"
+#include "services/mallocTracker.hpp"
+#include "services/nmtCommon.hpp"
+#include "services/virtualMemoryTracker.hpp"
+#include "utilities/linkedlist.hpp"
 
-// compare unsigned number
-#define UNSIGNED_COMPARE(a, b)  ((a > b) ? 1 : ((a == b) ? 0 : -1))
+typedef LinkedListIterator<MallocSite>                   MallocSiteIterator;
+typedef LinkedListIterator<VirtualMemoryAllocationSite>  VirtualMemorySiteIterator;
+typedef LinkedListIterator<ReservedMemoryRegion>         VirtualMemoryAllocationIterator;
 
 /*
- * MallocCallsitePointer and VMCallsitePointer are used
- * to baseline memory blocks with their callsite information.
- * They are only available when detail tracking is turned
- * on.
+ * Baseline a memory snapshot
  */
-
-/* baselined malloc record aggregated by callsite */
-class MallocCallsitePointer : public MemPointer {
- private:
-  size_t    _count;   // number of malloc invocation from this callsite
-  size_t    _amount;  // total amount of memory malloc-ed from this callsite
-
+class MemBaseline VALUE_OBJ_CLASS_SPEC {
  public:
-  MallocCallsitePointer() {
-    _count = 0;
-    _amount = 0;
-  }
-
-  MallocCallsitePointer(address pc) : MemPointer(pc) {
-    _count = 0;
-    _amount = 0;
-  }
+  enum BaselineThreshold {
+    SIZE_THRESHOLD = K        // Only allocation size over this threshold will be baselined.
+  };
 
-  MallocCallsitePointer& operator=(const MallocCallsitePointer& p) {
-    MemPointer::operator=(p);
-    _count = p.count();
-    _amount = p.amount();
-    return *this;
-  }
-
-  inline void inc(size_t size) {
-    _count ++;
-    _amount += size;
+  enum BaselineType {
+    Not_baselined,
+    Summary_baselined,
+    Detail_baselined
   };
 
-  inline size_t count() const {
-    return _count;
-  }
+  enum SortingOrder {
+    by_address,   // by memory address
+    by_size,      // by memory size
+    by_site       // by call site where the memory is allocated from
+  };
+
+ private:
+  // Summary information
+  MallocMemorySnapshot   _malloc_memory_snapshot;
+  VirtualMemorySnapshot  _virtual_memory_snapshot;
+
+  size_t               _class_count;
 
-  inline size_t amount() const {
-    return _amount;
-  }
-};
+  // Allocation sites information
+  // Malloc allocation sites
+  LinkedListImpl<MallocSite>                  _malloc_sites;
+
+  // All virtual memory allocations
+  LinkedListImpl<ReservedMemoryRegion>        _virtual_memory_allocations;
 
-// baselined virtual memory record aggregated by callsite
-class VMCallsitePointer : public MemPointer {
- private:
-  size_t     _count;              // number of invocation from this callsite
-  size_t     _reserved_amount;    // total reserved amount
-  size_t     _committed_amount;   // total committed amount
+  // Virtual memory allocations by allocation sites, always in by_address
+  // order
+  LinkedListImpl<VirtualMemoryAllocationSite> _virtual_memory_sites;
+
+  SortingOrder         _malloc_sites_order;
+  SortingOrder         _virtual_memory_sites_order;
+
+  BaselineType         _baseline_type;
 
  public:
-  VMCallsitePointer() {
-    _count = 0;
-    _reserved_amount = 0;
-    _committed_amount = 0;
-  }
-
-  VMCallsitePointer(address pc) : MemPointer(pc) {
-    _count = 0;
-    _reserved_amount = 0;
-    _committed_amount = 0;
-  }
-
-  VMCallsitePointer& operator=(const VMCallsitePointer& p) {
-    MemPointer::operator=(p);
-    _count = p.count();
-    _reserved_amount = p.reserved_amount();
-    _committed_amount = p.committed_amount();
-    return *this;
-  }
-
-  inline void inc(size_t reserved, size_t committed) {
-    _count ++;
-    _reserved_amount += reserved;
-    _committed_amount += committed;
-  }
-
-  inline size_t count() const {
-    return _count;
-  }
-
-  inline size_t reserved_amount() const {
-    return _reserved_amount;
+  // create a memory baseline
+  MemBaseline():
+    _baseline_type(Not_baselined),
+    _class_count(0) {
   }
 
-  inline size_t committed_amount() const {
-    return _committed_amount;
-  }
-};
-
-// maps a memory type flag to readable name
-typedef struct _memType2Name {
-  MEMFLAGS     _flag;
-  const char*  _name;
-} MemType2Name;
-
+  bool baseline(bool summaryOnly = true);
 
-// This class aggregates malloc'd records by memory type
-class MallocMem VALUE_OBJ_CLASS_SPEC {
- private:
-  MEMFLAGS       _type;
+  BaselineType baseline_type() const { return _baseline_type; }
 
-  size_t         _count;
-  size_t         _amount;
-
- public:
-  MallocMem() {
-    _type = mtNone;
-    _count = 0;
-    _amount = 0;
+  MallocMemorySnapshot* malloc_memory_snapshot() {
+    return &_malloc_memory_snapshot;
   }
 
-  MallocMem(MEMFLAGS flags) {
-    assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
-    _type = FLAGS_TO_MEMORY_TYPE(flags);
-    _count = 0;
-    _amount = 0;
-  }
-
-  inline void set_type(MEMFLAGS flag) {
-    _type = flag;
-  }
-
-  inline void clear() {
-    _count = 0;
-    _amount = 0;
-    _type = mtNone;
+  VirtualMemorySnapshot* virtual_memory_snapshot() {
+    return &_virtual_memory_snapshot;
   }
 
-  MallocMem& operator=(const MallocMem& m) {
-    assert(_type == m.type(), "different type");
-    _count = m.count();
-    _amount = m.amount();
-    return *this;
-  }
+  MallocSiteIterator malloc_sites(SortingOrder order);
+  VirtualMemorySiteIterator virtual_memory_sites(SortingOrder order);
 
-  inline void inc(size_t amt) {
-    _amount += amt;
-    _count ++;
-  }
-
-  inline void reduce(size_t amt) {
-    assert(_amount >= amt, "Just check");
-    _amount -= amt;
-  }
-
-  inline void overwrite_counter(size_t count) {
-    _count = count;
+  // Virtual memory allocation iterator always returns in virtual memory
+  // base address order.
+  VirtualMemoryAllocationIterator virtual_memory_allocations() {
+    assert(!_virtual_memory_allocations.is_empty(), "Not detail baseline");
+    return VirtualMemoryAllocationIterator(_virtual_memory_allocations.head());
   }
 
-  inline MEMFLAGS type() const {
-    return _type;
+  // Total reserved memory = total malloc'd memory + total reserved virtual
+  // memory
+  size_t total_reserved_memory() const {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    size_t amount = _malloc_memory_snapshot.total() +
+           _virtual_memory_snapshot.total_reserved();
+    return amount;
   }
 
-  inline bool is_type(MEMFLAGS flags) const {
-    return FLAGS_TO_MEMORY_TYPE(flags) == _type;
-  }
-
-  inline size_t count() const {
-    return _count;
+  // Total committed memory = total malloc'd memory + total committed
+  // virtual memory
+  size_t total_committed_memory() const {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    size_t amount = _malloc_memory_snapshot.total() +
+           _virtual_memory_snapshot.total_committed();
+    return amount;
   }
 
-  inline size_t amount() const {
-    return _amount;
-  }
-};
-
-// This class records live arena's memory usage
-class ArenaMem : public MallocMem {
- public:
-  ArenaMem(MEMFLAGS typeflag): MallocMem(typeflag) {
-  }
-  ArenaMem() { }
-};
-
-// This class aggregates virtual memory by its memory type
-class VMMem VALUE_OBJ_CLASS_SPEC {
- private:
-  MEMFLAGS       _type;
-
-  size_t         _count;
-  size_t         _reserved_amount;
-  size_t         _committed_amount;
-
- public:
-  VMMem() {
-    _type = mtNone;
-    _count = 0;
-    _reserved_amount = 0;
-    _committed_amount = 0;
+  size_t total_arena_memory() const {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    return _malloc_memory_snapshot.total_arena();
   }
 
-  VMMem(MEMFLAGS flags) {
-    assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
-    _type = FLAGS_TO_MEMORY_TYPE(flags);
-    _count = 0;
-    _reserved_amount = 0;
-    _committed_amount = 0;
+  size_t malloc_tracking_overhead() const {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    MemBaseline* bl = const_cast<MemBaseline*>(this);
+    return bl->_malloc_memory_snapshot.malloc_overhead()->size();
   }
 
-  inline void clear() {
-    _type = mtNone;
-    _count = 0;
-    _reserved_amount = 0;
-    _committed_amount = 0;
+  MallocMemory* malloc_memory(MEMFLAGS flag) {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    return _malloc_memory_snapshot.by_type(flag);
   }
 
-  inline void set_type(MEMFLAGS flags) {
-    _type = FLAGS_TO_MEMORY_TYPE(flags);
-  }
-
-  VMMem& operator=(const VMMem& m) {
-    assert(_type == m.type(), "different type");
-
-    _count = m.count();
-    _reserved_amount = m.reserved_amount();
-    _committed_amount = m.committed_amount();
-    return *this;
+  VirtualMemory* virtual_memory(MEMFLAGS flag) {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    return _virtual_memory_snapshot.by_type(flag);
   }
 
 
-  inline MEMFLAGS type() const {
-    return _type;
-  }
-
-  inline bool is_type(MEMFLAGS flags) const {
-    return FLAGS_TO_MEMORY_TYPE(flags) == _type;
-  }
-
-  inline void inc(size_t reserved_amt, size_t committed_amt) {
-    _reserved_amount += reserved_amt;
-    _committed_amount += committed_amt;
-    _count ++;
-  }
-
-  inline size_t count() const {
-    return _count;
-  }
-
-  inline size_t reserved_amount() const {
-    return _reserved_amount;
+  size_t class_count() const {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    return _class_count;
   }
 
-  inline size_t committed_amount() const {
-    return _committed_amount;
+  size_t thread_count() const {
+    assert(baseline_type() != Not_baselined, "Not yet baselined");
+    return _malloc_memory_snapshot.thread_count();
   }
-};
-
-
-
-#define NUMBER_OF_MEMORY_TYPE    (mt_number_of_types + 1)
-
-class BaselineReporter;
-class BaselineComparisonReporter;
-
-/*
- * This class baselines current memory snapshot.
- * A memory baseline summarizes memory usage by memory type,
- * aggregates memory usage by callsites when detail tracking
- * is on.
- */
-class MemBaseline VALUE_OBJ_CLASS_SPEC {
-  friend class BaselineReporter;
-  friend class BaselineComparisonReporter;
-
- private:
-  // overall summaries
-  size_t        _total_malloced;
-  size_t        _total_vm_reserved;
-  size_t        _total_vm_committed;
-  size_t        _number_of_classes;
-  size_t        _number_of_threads;
-
-  // if it has properly baselined
-  bool          _baselined;
-
-  // we categorize memory into three categories within the memory type
-  MallocMem     _malloc_data[NUMBER_OF_MEMORY_TYPE];
-  VMMem         _vm_data[NUMBER_OF_MEMORY_TYPE];
-  ArenaMem      _arena_data[NUMBER_OF_MEMORY_TYPE];
-
-  // memory records that aggregate memory usage by callsites.
-  // only available when detail tracking is on.
-  MemPointerArray*  _malloc_cs;
-  MemPointerArray*  _vm_cs;
-  // virtual memory map
-  MemPointerArray*  _vm_map;
-
- private:
-  static MemType2Name  MemType2NameMap[NUMBER_OF_MEMORY_TYPE];
-
- private:
-  // should not use copy constructor
-  MemBaseline(MemBaseline& copy) { ShouldNotReachHere(); }
-
-  // check and block at a safepoint
-  static inline void check_safepoint(JavaThread* thr);
-
- public:
-  // create a memory baseline
-  MemBaseline();
-
-  ~MemBaseline();
-
-  inline bool baselined() const {
-    return _baselined;
-  }
-
-  MemBaseline& operator=(const MemBaseline& other);
 
   // reset the baseline for reuse
-  void clear();
-
-  // baseline the snapshot
-  bool baseline(MemSnapshot& snapshot, bool summary_only = true);
-
-  bool baseline(const MemPointerArray* malloc_records,
-                const MemPointerArray* vm_records,
-                bool summary_only = true);
+  void reset() {
+    _baseline_type = Not_baselined;
+    // _malloc_memory_snapshot and _virtual_memory_snapshot are copied over.
+    _class_count  = 0;
 
-  // total malloc'd memory of specified memory type
-  inline size_t malloc_amount(MEMFLAGS flag) const {
-    return _malloc_data[flag2index(flag)].amount();
-  }
-  // number of malloc'd memory blocks of specified memory type
-  inline size_t malloc_count(MEMFLAGS flag) const {
-    return _malloc_data[flag2index(flag)].count();
-  }
-  // total memory used by arenas of specified memory type
-  inline size_t arena_amount(MEMFLAGS flag) const {
-    return _arena_data[flag2index(flag)].amount();
-  }
-  // number of arenas of specified memory type
-  inline size_t arena_count(MEMFLAGS flag) const {
-    return _arena_data[flag2index(flag)].count();
-  }
-  // total reserved memory of specified memory type
-  inline size_t reserved_amount(MEMFLAGS flag) const {
-    return _vm_data[flag2index(flag)].reserved_amount();
-  }
-  // total committed memory of specified memory type
-  inline size_t committed_amount(MEMFLAGS flag) const {
-    return _vm_data[flag2index(flag)].committed_amount();
-  }
-  // total memory (malloc'd + mmap'd + arena) of specified
-  // memory type
-  inline size_t total_amount(MEMFLAGS flag) const {
-    int index = flag2index(flag);
-    return _malloc_data[index].amount() +
-           _vm_data[index].reserved_amount() +
-           _arena_data[index].amount();
+    _malloc_sites.clear();
+    _virtual_memory_sites.clear();
+    _virtual_memory_allocations.clear();
   }
 
-  /* overall summaries */
+ private:
+  // Baseline summary information
+  bool baseline_summary();
 
-  // total malloc'd memory in snapshot
-  inline size_t total_malloc_amount() const {
-    return _total_malloced;
-  }
-  // total mmap'd memory in snapshot
-  inline size_t total_reserved_amount() const {
-    return _total_vm_reserved;
-  }
-  // total committed memory in snapshot
-  inline size_t total_committed_amount() const {
-    return _total_vm_committed;
-  }
-  // number of loaded classes
-  inline size_t number_of_classes() const {
-    return _number_of_classes;
-  }
-  // number of running threads
-  inline size_t number_of_threads() const {
-    return _number_of_threads;
-  }
-  // lookup human readable name of a memory type
-  static const char* type2name(MEMFLAGS type);
+  // Baseline allocation sites (detail tracking only)
+  bool baseline_allocation_sites();
+
+  // Aggregate virtual memory allocation by allocation sites
+  bool aggregate_virtual_memory_allocation_sites();
 
- private:
-  // convert memory flag to the index to mapping table
-  int         flag2index(MEMFLAGS flag) const;
-
-  // reset baseline values
-  void reset();
-
-  // summarize the records in global snapshot
-  bool baseline_malloc_summary(const MemPointerArray* malloc_records);
-  bool baseline_vm_summary(const MemPointerArray* vm_records);
-  bool baseline_malloc_details(const MemPointerArray* malloc_records);
-  bool baseline_vm_details(const MemPointerArray* vm_records);
+  // Sorting allocation sites in different orders
+  // Sort allocation sites in size order
+  void malloc_sites_to_size_order();
+  // Sort allocation sites in call site address order
+  void malloc_sites_to_allocation_site_order();
 
-  // print a line of malloc'd memory aggregated by callsite
-  void print_malloc_callsite(outputStream* st, address pc, size_t size,
-    size_t count, int diff_amt, int diff_count) const;
-  // print a line of mmap'd memory aggregated by callsite
-  void print_vm_callsite(outputStream* st, address pc, size_t rsz,
-    size_t csz, int diff_rsz, int diff_csz) const;
-
-  // sorting functions for raw records
-  static int malloc_sort_by_pc(const void* p1, const void* p2);
-  static int malloc_sort_by_addr(const void* p1, const void* p2);
-
- private:
-  // sorting functions for baselined records
-  static int bl_malloc_sort_by_size(const void* p1, const void* p2);
-  static int bl_vm_sort_by_size(const void* p1, const void* p2);
-  static int bl_malloc_sort_by_pc(const void* p1, const void* p2);
-  static int bl_vm_sort_by_pc(const void* p1, const void* p2);
+  // Sort allocation sites in reserved size order
+  void virtual_memory_sites_to_size_order();
+  // Sort allocation sites in call site address order
+  void virtual_memory_sites_to_reservation_site_order();
 };
 
+#endif // INCLUDE_NMT
 
 #endif // SHARE_VM_SERVICES_MEM_BASELINE_HPP
--- a/src/share/vm/services/memPtr.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "services/memPtr.hpp"
-#include "services/memTracker.hpp"
-
-volatile jint SequenceGenerator::_seq_number = 1;
-volatile unsigned long SequenceGenerator::_generation = 1;
-NOT_PRODUCT(jint SequenceGenerator::_max_seq_number = 1;)
-
-jint SequenceGenerator::next() {
-  jint seq = Atomic::add(1, &_seq_number);
-  if (seq < 0) {
-    MemTracker::shutdown(MemTracker::NMT_sequence_overflow);
-  } else {
-    NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
-  }
-  return seq;
-}
-
--- a/src/share/vm/services/memPtr.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,510 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SERVICES_MEM_PTR_HPP
-#define SHARE_VM_SERVICES_MEM_PTR_HPP
-
-#include "memory/allocation.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-#include "runtime/safepoint.hpp"
-
-/*
- * global sequence generator that generates sequence numbers to serialize
- * memory records.
- */
-class SequenceGenerator : AllStatic {
- public:
-  static jint next();
-
-  // peek last sequence number
-  static jint peek() {
-    return _seq_number;
-  }
-
-  // reset sequence number
-  static void reset() {
-    assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
-    _seq_number = 1;
-    _generation ++;
-  };
-
-  static unsigned long current_generation() { return _generation; }
-  NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; })
-
- private:
-  static volatile jint             _seq_number;
-  static volatile unsigned long    _generation;
-  NOT_PRODUCT(static jint          _max_seq_number; )
-};
-
-/*
- * followings are the classes that are used to hold memory activity records in different stages.
- *   MemPointer
- *     |--------MemPointerRecord
- *                     |
- *                     |----MemPointerRecordEx
- *                     |           |
- *                     |           |-------SeqMemPointerRecordEx
- *                     |
- *                     |----SeqMemPointerRecord
- *                     |
- *                     |----VMMemRegion
- *                               |
- *                               |-----VMMemRegionEx
- *
- *
- *  prefix 'Seq' - sequenced, the record contains a sequence number
- *  surfix 'Ex'  - extension, the record contains a caller's pc
- *
- *  per-thread recorder : SeqMemPointerRecord(Ex)
- *  snapshot staging    : SeqMemPointerRecord(Ex)
- *  snapshot            : MemPointerRecord(Ex) and VMMemRegion(Ex)
- *
- */
-
-/*
- * class that wraps an address to a memory block,
- * the memory pointer either points to a malloc'd
- * memory block, or a mmap'd memory block
- */
-class MemPointer VALUE_OBJ_CLASS_SPEC {
- public:
-  MemPointer(): _addr(0) { }
-  MemPointer(address addr): _addr(addr) { }
-
-  MemPointer(const MemPointer& copy_from) {
-    _addr = copy_from.addr();
-  }
-
-  inline address addr() const {
-    return _addr;
-  }
-
-  inline operator address() const {
-    return addr();
-  }
-
-  inline bool operator == (const MemPointer& other) const {
-    return addr() == other.addr();
-  }
-
-  inline MemPointer& operator = (const MemPointer& other) {
-    _addr = other.addr();
-    return *this;
-  }
-
- protected:
-  inline void set_addr(address addr) { _addr = addr; }
-
- protected:
-  // memory address
-  address    _addr;
-};
-
-/* MemPointerRecord records an activityand associated
- * attributes on a memory block.
- */
-class MemPointerRecord : public MemPointer {
- private:
-  MEMFLAGS       _flags;
-  size_t         _size;
-
-public:
-  /* extension of MemoryType enum
-   * see share/vm/memory/allocation.hpp for details.
-   *
-   * The tag values are associated to sorting orders, so be
-   * careful if changes are needed.
-   * The allocation records should be sorted ahead of tagging
-   * records, which in turn ahead of deallocation records
-   */
-  enum MemPointerTags {
-    tag_alloc            = 0x0001, // malloc or reserve record
-    tag_commit           = 0x0002, // commit record
-    tag_type             = 0x0003, // tag virtual memory to a memory type
-    tag_uncommit         = 0x0004, // uncommit record
-    tag_release          = 0x0005, // free or release record
-    tag_size             = 0x0006, // arena size
-    tag_masks            = 0x0007, // all tag bits
-    vmBit                = 0x0008
-  };
-
-  /* helper functions to interpret the tagging flags */
-
-  inline static bool is_allocation_record(MEMFLAGS flags) {
-    return (flags & tag_masks) == tag_alloc;
-  }
-
-  inline static bool is_deallocation_record(MEMFLAGS flags) {
-    return (flags & tag_masks) == tag_release;
-  }
-
-  inline static bool is_arena_record(MEMFLAGS flags) {
-    return (flags & (otArena | tag_size)) == otArena;
-  }
-
-  inline static bool is_arena_memory_record(MEMFLAGS flags) {
-    return (flags & (otArena | tag_size)) == (otArena | tag_size);
-  }
-
-  inline static bool is_virtual_memory_record(MEMFLAGS flags) {
-    return (flags & vmBit) != 0;
-  }
-
-  inline static bool is_virtual_memory_reserve_record(MEMFLAGS flags) {
-    return (flags & 0x0F) == (tag_alloc | vmBit);
-  }
-
-  inline static bool is_virtual_memory_commit_record(MEMFLAGS flags) {
-    return (flags & 0x0F) == (tag_commit | vmBit);
-  }
-
-  inline static bool is_virtual_memory_uncommit_record(MEMFLAGS flags) {
-    return (flags & 0x0F) == (tag_uncommit | vmBit);
-  }
-
-  inline static bool is_virtual_memory_release_record(MEMFLAGS flags) {
-    return (flags & 0x0F) == (tag_release | vmBit);
-  }
-
-  inline static bool is_virtual_memory_type_record(MEMFLAGS flags) {
-    return (flags & 0x0F) == (tag_type | vmBit);
-  }
-
-  /* tagging flags */
-  inline static MEMFLAGS malloc_tag()                 { return tag_alloc;   }
-  inline static MEMFLAGS free_tag()                   { return tag_release; }
-  inline static MEMFLAGS arena_size_tag()             { return tag_size | otArena; }
-  inline static MEMFLAGS virtual_memory_tag()         { return vmBit; }
-  inline static MEMFLAGS virtual_memory_reserve_tag() { return (tag_alloc | vmBit); }
-  inline static MEMFLAGS virtual_memory_commit_tag()  { return (tag_commit | vmBit); }
-  inline static MEMFLAGS virtual_memory_uncommit_tag(){ return (tag_uncommit | vmBit); }
-  inline static MEMFLAGS virtual_memory_release_tag() { return (tag_release | vmBit); }
-  inline static MEMFLAGS virtual_memory_type_tag()    { return (tag_type | vmBit); }
-
- public:
-  MemPointerRecord(): _size(0), _flags(mtNone) { }
-
-  MemPointerRecord(address addr, MEMFLAGS memflags, size_t size = 0):
-      MemPointer(addr), _flags(memflags), _size(size) { }
-
-  MemPointerRecord(const MemPointerRecord& copy_from):
-    MemPointer(copy_from), _flags(copy_from.flags()),
-    _size(copy_from.size()) {
-  }
-
-  /* MemPointerRecord is not sequenced, it always return
-   * 0 to indicate non-sequenced
-   */
-  virtual jint seq() const               { return 0; }
-
-  inline size_t   size()  const          { return _size; }
-  inline void set_size(size_t size)      { _size = size; }
-
-  inline MEMFLAGS flags() const          { return _flags; }
-  inline void set_flags(MEMFLAGS flags)  { _flags = flags; }
-
-  MemPointerRecord& operator= (const MemPointerRecord& ptr) {
-    MemPointer::operator=(ptr);
-    _flags = ptr.flags();
-#ifdef ASSERT
-    if (IS_ARENA_OBJ(_flags)) {
-      assert(!is_vm_pointer(), "wrong flags");
-      assert((_flags & ot_masks) == otArena, "wrong flags");
-    }
-#endif
-    _size = ptr.size();
-    return *this;
-  }
-
-  // if the pointer represents a malloc-ed memory address
-  inline bool is_malloced_pointer() const {
-    return !is_vm_pointer();
-  }
-
-  // if the pointer represents a virtual memory address
-  inline bool is_vm_pointer() const {
-    return is_virtual_memory_record(_flags);
-  }
-
-  // if this record records a 'malloc' or virtual memory
-  // 'reserve' call
-  inline bool is_allocation_record() const {
-    return is_allocation_record(_flags);
-  }
-
-  // if this record records a size information of an arena
-  inline bool is_arena_memory_record() const {
-    return is_arena_memory_record(_flags);
-  }
-
-  // if this pointer represents an address to an arena object
-  inline bool is_arena_record() const {
-    return is_arena_record(_flags);
-  }
-
-  // if this record represents a size information of specific arena
-  inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) {
-    assert(is_arena_memory_record(), "not size record");
-    assert(arena_rc->is_arena_record(), "not arena record");
-    return (arena_rc->addr() + sizeof(void*)) == addr();
-  }
-
-  // if this record records a 'free' or virtual memory 'free' call
-  inline bool is_deallocation_record() const {
-    return is_deallocation_record(_flags);
-  }
-
-  // if this record records a virtual memory 'commit' call
-  inline bool is_commit_record() const {
-    return is_virtual_memory_commit_record(_flags);
-  }
-
-  // if this record records a virtual memory 'uncommit' call
-  inline bool is_uncommit_record() const {
-    return is_virtual_memory_uncommit_record(_flags);
-  }
-
-  // if this record is a tagging record of a virtual memory block
-  inline bool is_type_tagging_record() const {
-    return is_virtual_memory_type_record(_flags);
-  }
-
-  // if the two memory pointer records actually represent the same
-  // memory block
-  inline bool is_same_region(const MemPointerRecord* other) const {
-    return (addr() == other->addr() && size() == other->size());
-  }
-
-  // if this memory region fully contains another one
-  inline bool contains_region(const MemPointerRecord* other) const {
-    return contains_region(other->addr(), other->size());
-  }
-
-  // if this memory region fully contains specified memory range
-  inline bool contains_region(address add, size_t sz) const {
-    return (addr() <= add && addr() + size() >= add + sz);
-  }
-
-  inline bool contains_address(address add) const {
-    return (addr() <= add && addr() + size() > add);
-  }
-
-  // if this memory region overlaps another region
-  inline bool overlaps_region(const MemPointerRecord* other) const {
-    assert(other != NULL, "Just check");
-    assert(size() > 0 && other->size() > 0, "empty range");
-    return contains_address(other->addr()) ||
-           contains_address(other->addr() + other->size() - 1) || // exclude end address
-           other->contains_address(addr()) ||
-           other->contains_address(addr() + size() - 1); // exclude end address
-  }
-
-};
-
-// MemPointerRecordEx also records callsite pc, from where
-// the memory block is allocated
-class MemPointerRecordEx : public MemPointerRecord {
- private:
-  address      _pc;  // callsite pc
-
- public:
-  MemPointerRecordEx(): _pc(0) { }
-
-  MemPointerRecordEx(address addr, MEMFLAGS memflags, size_t size = 0, address pc = 0):
-    MemPointerRecord(addr, memflags, size), _pc(pc) {}
-
-  MemPointerRecordEx(const MemPointerRecordEx& copy_from):
-    MemPointerRecord(copy_from), _pc(copy_from.pc()) {}
-
-  inline address pc() const { return _pc; }
-
-  void init(const MemPointerRecordEx* mpe) {
-    MemPointerRecord::operator=(*mpe);
-    _pc = mpe->pc();
-  }
-
-  void init(const MemPointerRecord* mp) {
-    MemPointerRecord::operator=(*mp);
-    _pc = 0;
-  }
-};
-
-// a virtual memory region. The region can represent a reserved
-// virtual memory region or a committed memory region
-class VMMemRegion : public MemPointerRecord {
-public:
-  VMMemRegion() { }
-
-  void init(const MemPointerRecord* mp) {
-    assert(mp->is_vm_pointer(), "Sanity check");
-    _addr = mp->addr();
-      set_size(mp->size());
-    set_flags(mp->flags());
-  }
-
-  VMMemRegion& operator=(const VMMemRegion& other) {
-    MemPointerRecord::operator=(other);
-    return *this;
-  }
-
-  inline bool is_reserved_region() const {
-    return is_allocation_record();
-  }
-
-  inline bool is_committed_region() const {
-    return is_commit_record();
-  }
-
-  /* base address of this virtual memory range */
-  inline address base() const {
-    return addr();
-  }
-
-  /* tag this virtual memory range to the specified memory type */
-  inline void tag(MEMFLAGS f) {
-    set_flags(flags() | (f & mt_masks));
-  }
-
-  // expand this region to also cover specified range.
-  // The range has to be on either end of the memory region.
-  void expand_region(address addr, size_t sz) {
-    if (addr < base()) {
-      assert(addr + sz == base(), "Sanity check");
-      _addr = addr;
-      set_size(size() + sz);
-    } else {
-      assert(base() + size() == addr, "Sanity check");
-      set_size(size() + sz);
-    }
-  }
-
-  // exclude the specified address range from this region.
-  // The excluded memory range has to be on either end of this memory
-  // region.
-  inline void exclude_region(address add, size_t sz) {
-    assert(is_reserved_region() || is_committed_region(), "Sanity check");
-    assert(addr() != NULL && size() != 0, "Sanity check");
-    assert(add >= addr() && add < addr() + size(), "Sanity check");
-    assert(add == addr() || (add + sz) == (addr() + size()),
-      "exclude in the middle");
-    if (add == addr()) {
-      set_addr(add + sz);
-      set_size(size() - sz);
-    } else {
-      set_size(size() - sz);
-    }
-  }
-};
-
-class VMMemRegionEx : public VMMemRegion {
- private:
-  jint   _seq;  // sequence number
-
- public:
-  VMMemRegionEx(): _pc(0) { }
-
-  void init(const MemPointerRecordEx* mpe) {
-    VMMemRegion::init(mpe);
-    _pc = mpe->pc();
-  }
-
-  void init(const MemPointerRecord* mpe) {
-    VMMemRegion::init(mpe);
-    _pc = 0;
-  }
-
-  VMMemRegionEx& operator=(const VMMemRegionEx& other) {
-    VMMemRegion::operator=(other);
-    _pc = other.pc();
-    return *this;
-  }
-
-  inline address pc() const { return _pc; }
- private:
-  address   _pc;
-};
-
-/*
- * Sequenced memory record
- */
-class SeqMemPointerRecord : public MemPointerRecord {
- private:
-   jint _seq;  // sequence number
-
- public:
-  SeqMemPointerRecord(): _seq(0){ }
-
-  SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq)
-    : MemPointerRecord(addr, flags, size), _seq(seq)  {
-  }
-
-  SeqMemPointerRecord(const SeqMemPointerRecord& copy_from)
-    : MemPointerRecord(copy_from) {
-    _seq = copy_from.seq();
-  }
-
-  SeqMemPointerRecord& operator= (const SeqMemPointerRecord& ptr) {
-    MemPointerRecord::operator=(ptr);
-    _seq = ptr.seq();
-    return *this;
-  }
-
-  inline jint seq() const {
-    return _seq;
-  }
-};
-
-
-
-class SeqMemPointerRecordEx : public MemPointerRecordEx {
- private:
-  jint    _seq;  // sequence number
-
- public:
-  SeqMemPointerRecordEx(): _seq(0) { }
-
-  SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size,
-    jint seq, address pc):
-    MemPointerRecordEx(addr, flags, size, pc), _seq(seq)  {
-  }
-
-  SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from)
-    : MemPointerRecordEx(copy_from) {
-    _seq = copy_from.seq();
-  }
-
-  SeqMemPointerRecordEx& operator= (const SeqMemPointerRecordEx& ptr) {
-    MemPointerRecordEx::operator=(ptr);
-    _seq = ptr.seq();
-    return *this;
-  }
-
-  inline jint seq() const {
-    return _seq;
-  }
-};
-
-#endif // SHARE_VM_SERVICES_MEM_PTR_HPP
--- a/src/share/vm/services/memPtrArray.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,306 +0,0 @@
-/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-#ifndef SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
-#define SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
-
-#include "memory/allocation.hpp"
-#include "services/memPtr.hpp"
-
-class MemPtr;
-class MemRecorder;
-class ArenaInfo;
-class MemSnapshot;
-
-extern "C" {
-  typedef int (*FN_SORT)(const void *, const void *);
-}
-
-
-// Memory pointer array interface. This array is used by NMT to hold
-// various memory block information.
-// The memory pointer arrays are usually walked with their iterators.
-
-class MemPointerArray : public CHeapObj<mtNMT> {
- public:
-  virtual ~MemPointerArray() { }
-
-  // return true if it can not allocate storage for the data
-  virtual bool out_of_memory() const = 0;
-  virtual bool is_empty() const = 0;
-  virtual bool is_full() = 0;
-  virtual int  length() const = 0;
-  virtual void clear() = 0;
-  virtual bool append(MemPointer* ptr) = 0;
-  virtual bool insert_at(MemPointer* ptr, int pos) = 0;
-  virtual bool remove_at(int pos) = 0;
-  virtual MemPointer* at(int index) const = 0;
-  virtual void sort(FN_SORT fn) = 0;
-  virtual size_t instance_size() const = 0;
-  virtual bool shrink() = 0;
-
-  NOT_PRODUCT(virtual int capacity() const = 0;)
-};
-
-// Iterator interface
-class MemPointerArrayIterator VALUE_OBJ_CLASS_SPEC {
- public:
-  // return the pointer at current position
-  virtual MemPointer* current() const = 0;
-  // return the next pointer and advance current position
-  virtual MemPointer* next() = 0;
-  // return next pointer without advancing current position
-  virtual MemPointer* peek_next() const = 0;
-  // return previous pointer without changing current position
-  virtual MemPointer* peek_prev() const = 0;
-  // remove the pointer at current position
-  virtual void        remove() = 0;
-  // insert the pointer at current position
-  virtual bool        insert(MemPointer* ptr) = 0;
-  // insert specified element after current position and
-  // move current position to newly inserted position
-  virtual bool        insert_after(MemPointer* ptr) = 0;
-};
-
-// implementation class
-class MemPointerArrayIteratorImpl : public MemPointerArrayIterator {
- protected:
-  MemPointerArray*  _array;
-  int               _pos;
-
- public:
-  MemPointerArrayIteratorImpl(MemPointerArray* arr) {
-    assert(arr != NULL, "Parameter check");
-    _array = arr;
-    _pos = 0;
-  }
-
-  virtual MemPointer* current() const {
-    if (_pos < _array->length()) {
-      return _array->at(_pos);
-    }
-    return NULL;
-  }
-
-  virtual MemPointer* next() {
-    if (_pos + 1 < _array->length()) {
-      return _array->at(++_pos);
-    }
-    _pos = _array->length();
-    return NULL;
-  }
-
-  virtual MemPointer* peek_next() const {
-    if (_pos + 1 < _array->length()) {
-      return _array->at(_pos + 1);
-    }
-    return NULL;
-  }
-
-  virtual MemPointer* peek_prev() const {
-    if (_pos > 0) {
-      return _array->at(_pos - 1);
-    }
-    return NULL;
-  }
-
-  virtual void remove() {
-    if (_pos < _array->length()) {
-      _array->remove_at(_pos);
-    }
-  }
-
-  virtual bool insert(MemPointer* ptr) {
-    return _array->insert_at(ptr, _pos);
-  }
-
-  virtual bool insert_after(MemPointer* ptr) {
-    if (_array->insert_at(ptr, _pos + 1)) {
-      _pos ++;
-      return true;
-    }
-    return false;
-  }
-};
-
-
-
-// Memory pointer array implementation.
-// This implementation implements expandable array
-#define DEFAULT_PTR_ARRAY_SIZE 1024
-
-template <class E> class MemPointerArrayImpl : public MemPointerArray {
- private:
-  int                   _max_size;
-  int                   _size;
-  bool                  _init_elements;
-  E*                    _data;
-
- public:
-  MemPointerArrayImpl(int initial_size = DEFAULT_PTR_ARRAY_SIZE, bool init_elements = true):
-   _max_size(initial_size), _size(0), _init_elements(init_elements) {
-    _data = (E*)raw_allocate(sizeof(E), initial_size);
-    if (_init_elements) {
-      for (int index = 0; index < _max_size; index ++) {
-        ::new ((void*)&_data[index]) E();
-      }
-    }
-  }
-
-  virtual ~MemPointerArrayImpl() {
-    if (_data != NULL) {
-      raw_free(_data);
-    }
-  }
-
- public:
-  bool out_of_memory() const {
-    return (_data == NULL);
-  }
-
-  size_t instance_size() const {
-    return sizeof(MemPointerArrayImpl<E>) + _max_size * sizeof(E);
-  }
-
-  bool is_empty() const {
-    assert(_data != NULL, "Just check");
-    return _size == 0;
-  }
-
-  bool is_full() {
-    assert(_data != NULL, "Just check");
-    if (_size < _max_size) {
-      return false;
-    } else {
-      return !expand_array();
-    }
-  }
-
-  int length() const {
-    assert(_data != NULL, "Just check");
-    return _size;
-  }
-
-  NOT_PRODUCT(int capacity() const { return _max_size; })
-
-  void clear() {
-    assert(_data != NULL, "Just check");
-    _size = 0;
-  }
-
-  bool append(MemPointer* ptr) {
-    assert(_data != NULL, "Just check");
-    if (is_full()) {
-      return false;
-    }
-    _data[_size ++] = *(E*)ptr;
-    return true;
-  }
-
-  bool insert_at(MemPointer* ptr, int pos) {
-    assert(_data != NULL, "Just check");
-    if (is_full()) {
-      return false;
-    }
-    for (int index = _size; index > pos; index --) {
-      _data[index] = _data[index - 1];
-    }
-    _data[pos] = *(E*)ptr;
-    _size ++;
-    return true;
-  }
-
-  bool remove_at(int pos) {
-    assert(_data != NULL, "Just check");
-    if (_size <= pos && pos >= 0) {
-      return false;
-    }
-    -- _size;
-
-    for (int index = pos; index < _size; index ++) {
-      _data[index] = _data[index + 1];
-    }
-    return true;
-  }
-
-  MemPointer* at(int index) const {
-    assert(_data != NULL, "Just check");
-    assert(index >= 0 && index < _size, "illegal index");
-    return &_data[index];
-  }
-
-  bool shrink() {
-    float used = ((float)_size) / ((float)_max_size);
-    if (used < 0.40) {
-      E* old_ptr = _data;
-      int new_size = ((_max_size) / (2 * DEFAULT_PTR_ARRAY_SIZE) + 1) * DEFAULT_PTR_ARRAY_SIZE;
-      _data = (E*)raw_reallocate(_data, sizeof(E), new_size);
-      if (_data == NULL) {
-        _data = old_ptr;
-        return false;
-      } else {
-        _max_size = new_size;
-        return true;
-      }
-    }
-    return false;
-  }
-
-  void sort(FN_SORT fn) {
-    assert(_data != NULL, "Just check");
-    qsort((void*)_data, _size, sizeof(E), fn);
-  }
-
- private:
-  bool  expand_array() {
-    assert(_data != NULL, "Not yet allocated");
-    E* old_ptr = _data;
-    if ((_data = (E*)raw_reallocate((void*)_data, sizeof(E),
-      _max_size + DEFAULT_PTR_ARRAY_SIZE)) == NULL) {
-      _data = old_ptr;
-      return false;
-    } else {
-      _max_size += DEFAULT_PTR_ARRAY_SIZE;
-      if (_init_elements) {
-        for (int index = _size; index < _max_size; index ++) {
-          ::new ((void*)&_data[index]) E();
-        }
-      }
-      return true;
-    }
-  }
-
-  void* raw_allocate(size_t elementSize, int items) {
-    return os::malloc(elementSize * items, mtNMT);
-  }
-
-  void* raw_reallocate(void* ptr, size_t elementSize, int items) {
-    return os::realloc(ptr, elementSize * items, mtNMT);
-  }
-
-  void  raw_free(void* ptr) {
-    os::free(ptr, mtNMT);
-  }
-};
-
-#endif // SHARE_VM_UTILITIES_MEM_PTR_ARRAY_HPP
--- a/src/share/vm/services/memRecorder.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,171 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-
-#include "runtime/atomic.hpp"
-#include "services/memBaseline.hpp"
-#include "services/memRecorder.hpp"
-#include "services/memPtr.hpp"
-#include "services/memTracker.hpp"
-
-MemPointer* SequencedRecordIterator::next_record() {
-  MemPointerRecord* itr_cur = (MemPointerRecord*)_itr.current();
-  if (itr_cur == NULL)  {
-    return itr_cur;
-  }
-
-  MemPointerRecord* itr_next = (MemPointerRecord*)_itr.next();
-
-  // don't collapse virtual memory records
-  while (itr_next != NULL && !itr_cur->is_vm_pointer() &&
-    !itr_next->is_vm_pointer() &&
-    same_kind(itr_cur, itr_next)) {
-    itr_cur = itr_next;
-    itr_next = (MemPointerRecord*)_itr.next();
-  }
-
-  return itr_cur;
-}
-
-
-volatile jint MemRecorder::_instance_count = 0;
-
-MemRecorder::MemRecorder() {
-  assert(MemTracker::is_on(), "Native memory tracking is off");
-  Atomic::inc(&_instance_count);
-  set_generation();
-
-  if (MemTracker::track_callsite()) {
-    _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecordEx,
-        DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
-  } else {
-    _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecord,
-        DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
-  }
-  _next = NULL;
-
-
-  if (_pointer_records != NULL) {
-    // recode itself
-    address pc = CURRENT_PC;
-    record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
-        sizeof(MemRecorder), SequenceGenerator::next(), pc);
-    record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
-        _pointer_records->instance_size(), SequenceGenerator::next(), pc);
-  }
-}
-
-MemRecorder::~MemRecorder() {
-  if (_pointer_records != NULL) {
-    if (MemTracker::is_on()) {
-      MemTracker::record_free((address)_pointer_records, mtNMT);
-      MemTracker::record_free((address)this, mtNMT);
-    }
-    delete _pointer_records;
-  }
-  // delete all linked recorders
-  while (_next != NULL) {
-    MemRecorder* tmp = _next;
-    _next = _next->next();
-    tmp->set_next(NULL);
-    delete tmp;
-  }
-  Atomic::dec(&_instance_count);
-}
-
-// Sorting order:
-//   1. memory block address
-//   2. mem pointer record tags
-//   3. sequence number
-int MemRecorder::sort_record_fn(const void* e1, const void* e2) {
-  const MemPointerRecord* p1 = (const MemPointerRecord*)e1;
-  const MemPointerRecord* p2 = (const MemPointerRecord*)e2;
-  int delta = UNSIGNED_COMPARE(p1->addr(), p2->addr());
-  if (delta == 0) {
-    int df = UNSIGNED_COMPARE((p1->flags() & MemPointerRecord::tag_masks),
-                              (p2->flags() & MemPointerRecord::tag_masks));
-    if (df == 0) {
-      assert(p1->seq() != p2->seq(), "dup seq");
-      return p1->seq() - p2->seq();
-    } else {
-      return df;
-    }
-  } else {
-    return delta;
-  }
-}
-
-bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, jint seq, address pc) {
-  assert(seq > 0, "No sequence number");
-#ifdef ASSERT
-  if (MemPointerRecord::is_virtual_memory_record(flags)) {
-    assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record");
-  } else {
-    assert((flags & MemPointerRecord::tag_masks) == MemPointerRecord::malloc_tag() ||
-           (flags & MemPointerRecord::tag_masks) == MemPointerRecord::free_tag() ||
-           IS_ARENA_OBJ(flags),
-           "bad malloc record");
-  }
-  // a recorder should only hold records within the same generation
-  unsigned long cur_generation = SequenceGenerator::current_generation();
-  assert(cur_generation == _generation,
-         "this thread did not enter sync point");
-#endif
-
-  if (MemTracker::track_callsite()) {
-    SeqMemPointerRecordEx ap(p, flags, size, seq, pc);
-    debug_only(check_dup_seq(ap.seq());)
-    return _pointer_records->append(&ap);
-  } else {
-    SeqMemPointerRecord ap(p, flags, size, seq);
-    debug_only(check_dup_seq(ap.seq());)
-    return _pointer_records->append(&ap);
-  }
-}
-
-  // iterator for alloc pointers
-SequencedRecordIterator MemRecorder::pointer_itr() {
-  assert(_pointer_records != NULL, "just check");
-  _pointer_records->sort((FN_SORT)sort_record_fn);
-  return SequencedRecordIterator(_pointer_records);
-}
-
-
-void MemRecorder::set_generation() {
-  _generation = SequenceGenerator::current_generation();
-}
-
-#ifdef ASSERT
-
-void MemRecorder::check_dup_seq(jint seq) const {
-  MemPointerArrayIteratorImpl itr(_pointer_records);
-  MemPointerRecord* rc = (MemPointerRecord*)itr.current();
-  while (rc != NULL) {
-    assert(rc->seq() != seq, "dup seq");
-    rc = (MemPointerRecord*)itr.next();
-  }
-}
-
-#endif
--- a/src/share/vm/services/memRecorder.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,271 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SERVICES_MEM_RECORDER_HPP
-#define SHARE_VM_SERVICES_MEM_RECORDER_HPP
-
-#include "memory/allocation.hpp"
-#include "runtime/os.hpp"
-#include "services/memPtrArray.hpp"
-
-class MemSnapshot;
-class MemTracker;
-class MemTrackWorker;
-
-// Fixed size memory pointer array implementation
-template <class E, int SIZE> class FixedSizeMemPointerArray :
-  public MemPointerArray {
-  // This implementation is for memory recorder only
-  friend class MemRecorder;
-
- private:
-  E      _data[SIZE];
-  int    _size;
-
- protected:
-  FixedSizeMemPointerArray(bool init_elements = false):
-   _size(0){
-    if (init_elements) {
-      for (int index = 0; index < SIZE; index ++) {
-        ::new ((void*)&_data[index]) E();
-      }
-    }
-  }
-
-  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
-    // the instance is part of memRecorder, needs to be tagged with 'otNMTRecorder'
-    // to avoid recursion
-    return os::malloc(size, (mtNMT | otNMTRecorder));
-  }
-
-  void* operator new(size_t size) throw() {
-    assert(false, "use nothrow version");
-    return NULL;
-  }
-
-  void operator delete(void* p) {
-    os::free(p, (mtNMT | otNMTRecorder));
-  }
-
-  // instance size
-  inline size_t instance_size() const {
-    return sizeof(FixedSizeMemPointerArray<E, SIZE>);
-  }
-
-  NOT_PRODUCT(int capacity() const { return SIZE; })
-
- public:
-  // implementation of public interface
-  bool out_of_memory() const { return false; }
-  bool is_empty()      const { return _size == 0; }
-  bool is_full()             { return length() >= SIZE; }
-  int  length()        const { return _size; }
-
-  void clear() {
-    _size = 0;
-  }
-
-  bool append(MemPointer* ptr) {
-    if (is_full()) return false;
-    _data[_size ++] = *(E*)ptr;
-    return true;
-  }
-
-  virtual bool insert_at(MemPointer* p, int pos) {
-    assert(false, "append only");
-    return false;
-  }
-
-  virtual bool remove_at(int pos) {
-    assert(false, "not supported");
-    return false;
-  }
-
-  MemPointer* at(int index) const {
-    assert(index >= 0 && index < length(),
-      "parameter check");
-    return ((E*)&_data[index]);
-  }
-
-  void sort(FN_SORT fn) {
-    qsort((void*)_data, _size, sizeof(E), fn);
-  }
-
-  bool shrink() {
-    return false;
-  }
-};
-
-
-// This iterator requires pre-sorted MemPointerArray, which is sorted by:
-//  1. address
-//  2. allocation type
-//  3. sequence number
-// During the array walking, iterator collapses pointers with the same
-// address and allocation type, and only returns the one with highest
-// sequence number.
-//
-// This is read-only iterator, update methods are asserted.
-class SequencedRecordIterator : public MemPointerArrayIterator {
- private:
-   MemPointerArrayIteratorImpl _itr;
-   MemPointer*                 _cur;
-
- public:
-  SequencedRecordIterator(const MemPointerArray* arr):
-    _itr(const_cast<MemPointerArray*>(arr)) {
-    _cur = next_record();
-  }
-
-  SequencedRecordIterator(const SequencedRecordIterator& itr):
-    _itr(itr._itr) {
-    _cur = next_record();
-  }
-
-  // return the pointer at current position
-  virtual MemPointer* current() const {
-    return _cur;
-  };
-
-  // return the next pointer and advance current position
-  virtual MemPointer* next() {
-    _cur = next_record();
-    return _cur;
-  }
-
-  // return the next pointer without advancing current position
-  virtual MemPointer* peek_next() const {
-    assert(false, "not implemented");
-    return NULL;
-
-  }
-  // return the previous pointer without changing current position
-  virtual MemPointer* peek_prev() const {
-    assert(false, "not implemented");
-    return NULL;
-  }
-
-  // remove the pointer at current position
-  virtual void remove() {
-    assert(false, "read-only iterator");
-  };
-  // insert the pointer at current position
-  virtual bool insert(MemPointer* ptr) {
-    assert(false, "read-only iterator");
-    return false;
-  }
-
-  virtual bool insert_after(MemPointer* ptr) {
-    assert(false, "read-only iterator");
-    return false;
-  }
- private:
-  // collapse the 'same kind' of records, and return this 'kind' of
-  // record with highest sequence number
-  MemPointer* next_record();
-
-  // Test if the two records are the same kind: the same memory block and allocation
-  // type.
-  inline bool same_kind(const MemPointerRecord* p1, const MemPointerRecord* p2) const {
-    assert(!p1->is_vm_pointer() && !p2->is_vm_pointer(), "malloc pointer only");
-    return (p1->addr() == p2->addr() &&
-      (p1->flags() &MemPointerRecord::tag_masks) ==
-      (p2->flags() & MemPointerRecord::tag_masks));
-  }
-};
-
-
-
-#define DEFAULT_RECORDER_PTR_ARRAY_SIZE 512
-
-class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> {
-  friend class MemSnapshot;
-  friend class MemTracker;
-  friend class MemTrackWorker;
-  friend class GenerationData;
-
- protected:
-  // the array that holds memory records
-  MemPointerArray*         _pointer_records;
-
- private:
-  // used for linked list
-  MemRecorder*             _next;
-  // active recorder can only record a certain generation data
-  unsigned long            _generation;
-
- protected:
-  _NOINLINE_ MemRecorder();
-  ~MemRecorder();
-
-  // record a memory operation
-  bool record(address addr, MEMFLAGS flags, size_t size, jint seq, address caller_pc = 0);
-
-  // linked list support
-  inline void set_next(MemRecorder* rec) {
-    _next = rec;
-  }
-
-  inline MemRecorder* next() const {
-    return _next;
-  }
-
-  // if the recorder is full
-  inline bool is_full() const {
-    assert(_pointer_records != NULL, "just check");
-    return _pointer_records->is_full();
-  }
-
-  // if running out of memory when initializing recorder's internal
-  // data
-  inline bool out_of_memory() const {
-    return (_pointer_records == NULL ||
-      _pointer_records->out_of_memory());
-  }
-
-  inline void clear() {
-    assert(_pointer_records != NULL, "Just check");
-    _pointer_records->clear();
-  }
-
-  SequencedRecordIterator pointer_itr();
-
-  // return the generation of this recorder which it belongs to
-  unsigned long get_generation() const { return _generation; }
- protected:
-  // number of MemRecorder instance
-  static volatile jint _instance_count;
-
- private:
-  // sorting function, sort records into following order
-  // 1. memory address
-  // 2. allocation type
-  // 3. sequence number
-  static int sort_record_fn(const void* e1, const void* e2);
-
-  debug_only(void check_dup_seq(jint seq) const;)
-  void set_generation();
-};
-
-#endif // SHARE_VM_SERVICES_MEM_RECORDER_HPP
--- a/src/share/vm/services/memReporter.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/memReporter.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -22,618 +22,595 @@
  *
  */
 #include "precompiled.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "runtime/os.hpp"
+
+#include "memory/allocation.hpp"
+#include "services/mallocTracker.hpp"
 #include "services/memReporter.hpp"
-#include "services/memPtrArray.hpp"
-#include "services/memTracker.hpp"
+#include "services/virtualMemoryTracker.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+size_t MemReporterBase::reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const {
+  return malloc->malloc_size() + malloc->arena_size() + vm->reserved();
+}
+
+size_t MemReporterBase::committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const {
+  return malloc->malloc_size() + malloc->arena_size() + vm->committed();
+}
 
-PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
+void MemReporterBase::print_total(size_t reserved, size_t committed) const {
+  const char* scale = current_scale();
+  output()->print("reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s",
+    amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
+}
+
+void MemReporterBase::print_malloc(size_t amount, size_t count) const {
+  const char* scale = current_scale();
+  outputStream* out = output();
+  out->print("(malloc=" SIZE_FORMAT "%s",
+    amount_in_current_scale(amount), scale);
+
+  if (count > 0) {
+    out->print(" #" SIZE_FORMAT "", count);
+  }
 
-const char* BaselineOutputer::memory_unit(size_t scale) {
-  switch(scale) {
-    case K: return "KB";
-    case M: return "MB";
-    case G: return "GB";
-  }
-  ShouldNotReachHere();
-  return NULL;
+  out->print(")");
+}
+
+void MemReporterBase::print_virtual_memory(size_t reserved, size_t committed) const {
+  const char* scale = current_scale();
+  output()->print("(mmap: reserved=" SIZE_FORMAT "%s, committed=" SIZE_FORMAT "%s)",
+    amount_in_current_scale(reserved), scale, amount_in_current_scale(committed), scale);
+}
+
+void MemReporterBase::print_malloc_line(size_t amount, size_t count) const {
+  output()->print("%28s", " ");
+  print_malloc(amount, count);
+  output()->print_cr(" ");
+}
+
+void MemReporterBase::print_virtual_memory_line(size_t reserved, size_t committed) const {
+  output()->print("%28s", " ");
+  print_virtual_memory(reserved, committed);
+  output()->print_cr(" ");
+}
+
+void MemReporterBase::print_arena_line(size_t amount, size_t count) const {
+  const char* scale = current_scale();
+  output()->print_cr("%27s (arena=" SIZE_FORMAT "%s #" SIZE_FORMAT ")", " ",
+    amount_in_current_scale(amount), scale, count);
+}
+
+void MemReporterBase::print_virtual_memory_region(const char* type, address base, size_t size) const {
+  const char* scale = current_scale();
+  output()->print("[" PTR_FORMAT " - " PTR_FORMAT "] %s " SIZE_FORMAT "%s",
+    p2i(base), p2i(base + size), type, amount_in_current_scale(size), scale);
 }
 
 
-void BaselineReporter::report_baseline(const MemBaseline& baseline, bool summary_only) {
-  assert(MemTracker::is_on(), "Native memory tracking is off");
-  _outputer.start(scale());
-  _outputer.total_usage(
-    amount_in_current_scale(baseline.total_malloc_amount() + baseline.total_reserved_amount()),
-    amount_in_current_scale(baseline.total_malloc_amount() + baseline.total_committed_amount()));
-
-  _outputer.num_of_classes(baseline.number_of_classes());
-  _outputer.num_of_threads(baseline.number_of_threads());
-
-  report_summaries(baseline);
-  if (!summary_only && MemTracker::track_callsite()) {
-    report_virtual_memory_map(baseline);
-    report_callsites(baseline);
-  }
-  _outputer.done();
-}
-
-void BaselineReporter::report_summaries(const MemBaseline& baseline) {
-  _outputer.start_category_summary();
-  MEMFLAGS type;
+void MemSummaryReporter::report() {
+  const char* scale = current_scale();
+  outputStream* out = output();
+  size_t total_reserved_amount = _malloc_snapshot->total() +
+    _vm_snapshot->total_reserved();
+  size_t total_committed_amount = _malloc_snapshot->total() +
+    _vm_snapshot->total_committed();
 
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    type = MemBaseline::MemType2NameMap[index]._flag;
-    _outputer.category_summary(type,
-      amount_in_current_scale(baseline.reserved_amount(type)),
-      amount_in_current_scale(baseline.committed_amount(type)),
-      amount_in_current_scale(baseline.malloc_amount(type)),
-      baseline.malloc_count(type),
-      amount_in_current_scale(baseline.arena_amount(type)),
-      baseline.arena_count(type));
-  }
-
-  _outputer.done_category_summary();
-}
+  // Overall total
+  out->print_cr("\nNative Memory Tracking:\n");
+  out->print("Total: ");
+  print_total(total_reserved_amount, total_committed_amount);
+  out->print("\n");
 
-void BaselineReporter::report_virtual_memory_map(const MemBaseline& baseline) {
-  _outputer.start_virtual_memory_map();
-  MemBaseline* pBL = const_cast<MemBaseline*>(&baseline);
-  MemPointerArrayIteratorImpl itr = MemPointerArrayIteratorImpl(pBL->_vm_map);
-  VMMemRegionEx* rgn = (VMMemRegionEx*)itr.current();
-  while (rgn != NULL) {
-    if (rgn->is_reserved_region()) {
-      _outputer.reserved_memory_region(FLAGS_TO_MEMORY_TYPE(rgn->flags()),
-        rgn->base(), rgn->base() + rgn->size(), amount_in_current_scale(rgn->size()), rgn->pc());
-    } else {
-      _outputer.committed_memory_region(rgn->base(), rgn->base() + rgn->size(),
-        amount_in_current_scale(rgn->size()), rgn->pc());
-    }
-    rgn = (VMMemRegionEx*)itr.next();
+  // Summary by memory type
+  for (int index = 0; index < mt_number_of_types; index ++) {
+    MEMFLAGS flag = NMTUtil::index_to_flag(index);
+    // thread stack is reported as part of thread category
+    if (flag == mtThreadStack) continue;
+    MallocMemory* malloc_memory = _malloc_snapshot->by_type(flag);
+    VirtualMemory* virtual_memory = _vm_snapshot->by_type(flag);
+
+    report_summary_of_type(flag, malloc_memory, virtual_memory);
   }
-
-  _outputer.done_virtual_memory_map();
 }
 
-void BaselineReporter::report_callsites(const MemBaseline& baseline) {
-  _outputer.start_callsite();
-  MemBaseline* pBL = const_cast<MemBaseline*>(&baseline);
+void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
+  MallocMemory*  malloc_memory, VirtualMemory* virtual_memory) {
 
-  pBL->_malloc_cs->sort((FN_SORT)MemBaseline::bl_malloc_sort_by_size);
-  pBL->_vm_cs->sort((FN_SORT)MemBaseline::bl_vm_sort_by_size);
+  size_t reserved_amount  = reserved_total (malloc_memory, virtual_memory);
+  size_t committed_amount = committed_total(malloc_memory, virtual_memory);
 
-  // walk malloc callsites
-  MemPointerArrayIteratorImpl malloc_itr(pBL->_malloc_cs);
-  MallocCallsitePointer*      malloc_callsite =
-                  (MallocCallsitePointer*)malloc_itr.current();
-  while (malloc_callsite != NULL) {
-    _outputer.malloc_callsite(malloc_callsite->addr(),
-        amount_in_current_scale(malloc_callsite->amount()), malloc_callsite->count());
-    malloc_callsite = (MallocCallsitePointer*)malloc_itr.next();
+  // Count thread's native stack in "Thread" category
+  if (flag == mtThread) {
+    const VirtualMemory* thread_stack_usage =
+      (const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack);
+    reserved_amount  += thread_stack_usage->reserved();
+    committed_amount += thread_stack_usage->committed();
+  } else if (flag == mtNMT) {
+    // Count malloc headers in "NMT" category
+    reserved_amount  += _malloc_snapshot->malloc_overhead()->size();
+    committed_amount += _malloc_snapshot->malloc_overhead()->size();
   }
 
-  // walk virtual memory callsite
-  MemPointerArrayIteratorImpl vm_itr(pBL->_vm_cs);
-  VMCallsitePointer*          vm_callsite = (VMCallsitePointer*)vm_itr.current();
-  while (vm_callsite != NULL) {
-    _outputer.virtual_memory_callsite(vm_callsite->addr(),
-      amount_in_current_scale(vm_callsite->reserved_amount()),
-      amount_in_current_scale(vm_callsite->committed_amount()));
-    vm_callsite = (VMCallsitePointer*)vm_itr.next();
-  }
-  pBL->_malloc_cs->sort((FN_SORT)MemBaseline::bl_malloc_sort_by_pc);
-  pBL->_vm_cs->sort((FN_SORT)MemBaseline::bl_vm_sort_by_pc);
-  _outputer.done_callsite();
-}
+  if (amount_in_current_scale(reserved_amount) > 0) {
+    outputStream* out   = output();
+    const char*   scale = current_scale();
+    out->print("-%26s (", NMTUtil::flag_to_name(flag));
+    print_total(reserved_amount, committed_amount);
+    out->print_cr(")");
 
-void BaselineReporter::diff_baselines(const MemBaseline& cur, const MemBaseline& prev,
-  bool summary_only) {
-  assert(MemTracker::is_on(), "Native memory tracking is off");
-  _outputer.start(scale());
-  size_t total_reserved = cur.total_malloc_amount() + cur.total_reserved_amount();
-  size_t total_committed = cur.total_malloc_amount() + cur.total_committed_amount();
-
-  _outputer.diff_total_usage(
-    amount_in_current_scale(total_reserved), amount_in_current_scale(total_committed),
-    diff_in_current_scale(total_reserved,  (prev.total_malloc_amount() + prev.total_reserved_amount())),
-    diff_in_current_scale(total_committed, (prev.total_committed_amount() + prev.total_malloc_amount())));
+    if (flag == mtClass) {
+      // report class count
+      out->print_cr("%27s (classes #" SIZE_FORMAT ")", " ", _class_count);
+    } else if (flag == mtThread) {
+      // report thread count
+      out->print_cr("%27s (thread #" SIZE_FORMAT ")", " ", _malloc_snapshot->thread_count());
+      const VirtualMemory* thread_stack_usage =
+       _vm_snapshot->by_type(mtThreadStack);
+      out->print("%27s (stack: ", " ");
+      print_total(thread_stack_usage->reserved(), thread_stack_usage->committed());
+      out->print_cr(")");
+    }
 
-  _outputer.diff_num_of_classes(cur.number_of_classes(),
-       diff(cur.number_of_classes(), prev.number_of_classes()));
-  _outputer.diff_num_of_threads(cur.number_of_threads(),
-       diff(cur.number_of_threads(), prev.number_of_threads()));
+     // report malloc'd memory
+    if (amount_in_current_scale(malloc_memory->malloc_size()) > 0) {
+      // We don't know how many arena chunks are in used, so don't report the count
+      size_t count = (flag == mtChunk) ? 0 : malloc_memory->malloc_count();
+      print_malloc_line(malloc_memory->malloc_size(), count);
+    }
 
-  diff_summaries(cur, prev);
-  if (!summary_only && MemTracker::track_callsite()) {
-    diff_callsites(cur, prev);
-  }
-  _outputer.done();
-}
-
-void BaselineReporter::diff_summaries(const MemBaseline& cur, const MemBaseline& prev) {
-  _outputer.start_category_summary();
-  MEMFLAGS type;
+    if (amount_in_current_scale(virtual_memory->reserved()) > 0) {
+      print_virtual_memory_line(virtual_memory->reserved(), virtual_memory->committed());
+    }
 
-  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
-    type = MemBaseline::MemType2NameMap[index]._flag;
-    _outputer.diff_category_summary(type,
-      amount_in_current_scale(cur.reserved_amount(type)),
-      amount_in_current_scale(cur.committed_amount(type)),
-      amount_in_current_scale(cur.malloc_amount(type)),
-      cur.malloc_count(type),
-      amount_in_current_scale(cur.arena_amount(type)),
-      cur.arena_count(type),
-      diff_in_current_scale(cur.reserved_amount(type), prev.reserved_amount(type)),
-      diff_in_current_scale(cur.committed_amount(type), prev.committed_amount(type)),
-      diff_in_current_scale(cur.malloc_amount(type), prev.malloc_amount(type)),
-      diff(cur.malloc_count(type), prev.malloc_count(type)),
-      diff_in_current_scale(cur.arena_amount(type), prev.arena_amount(type)),
-      diff(cur.arena_count(type), prev.arena_count(type)));
+    if (amount_in_current_scale(malloc_memory->arena_size()) > 0) {
+      print_arena_line(malloc_memory->arena_size(), malloc_memory->arena_count());
+    }
+
+    if (flag == mtNMT &&
+      amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()) > 0) {
+      out->print_cr("%27s (tracking overhead=" SIZE_FORMAT "%s)", " ",
+        amount_in_current_scale(_malloc_snapshot->malloc_overhead()->size()), scale);
+    }
+
+    out->print_cr(" ");
   }
-
-  _outputer.done_category_summary();
 }
 
-void BaselineReporter::diff_callsites(const MemBaseline& cur, const MemBaseline& prev) {
-  _outputer.start_callsite();
-  MemBaseline* pBL_cur = const_cast<MemBaseline*>(&cur);
-  MemBaseline* pBL_prev = const_cast<MemBaseline*>(&prev);
+void MemDetailReporter::report_detail() {
+  // Start detail report
+  outputStream* out = output();
+  out->print_cr("Details:\n");
+
+  report_malloc_sites();
+  report_virtual_memory_allocation_sites();
+}
+
+void MemDetailReporter::report_malloc_sites() {
+  MallocSiteIterator         malloc_itr = _baseline.malloc_sites(MemBaseline::by_size);
+  if (malloc_itr.is_empty()) return;
+
+  outputStream* out = output();
 
-  // walk malloc callsites
-  MemPointerArrayIteratorImpl cur_malloc_itr(pBL_cur->_malloc_cs);
-  MemPointerArrayIteratorImpl prev_malloc_itr(pBL_prev->_malloc_cs);
+  const MallocSite* malloc_site;
+  while ((malloc_site = malloc_itr.next()) != NULL) {
+    // Don't report if size is too small
+    if (amount_in_current_scale(malloc_site->size()) == 0)
+      continue;
 
-  MallocCallsitePointer*      cur_malloc_callsite =
-                  (MallocCallsitePointer*)cur_malloc_itr.current();
-  MallocCallsitePointer*      prev_malloc_callsite =
-                  (MallocCallsitePointer*)prev_malloc_itr.current();
+    const NativeCallStack* stack = malloc_site->call_stack();
+    stack->print_on(out);
+    out->print("%29s", " ");
+    print_malloc(malloc_site->size(), malloc_site->count());
+    out->print_cr("\n");
+  }
+}
+
+void MemDetailReporter::report_virtual_memory_allocation_sites()  {
+  VirtualMemorySiteIterator  virtual_memory_itr =
+    _baseline.virtual_memory_sites(MemBaseline::by_size);
+
+  if (virtual_memory_itr.is_empty()) return;
+
+  outputStream* out = output();
+  const VirtualMemoryAllocationSite*  virtual_memory_site;
 
-  while (cur_malloc_callsite != NULL || prev_malloc_callsite != NULL) {
-    if (prev_malloc_callsite == NULL) {
-      assert(cur_malloc_callsite != NULL, "sanity check");
-      // this is a new callsite
-      _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
-        amount_in_current_scale(cur_malloc_callsite->amount()),
-        cur_malloc_callsite->count(),
-        diff_in_current_scale(cur_malloc_callsite->amount(), 0),
-        diff(cur_malloc_callsite->count(), 0));
-      cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
-    } else if (cur_malloc_callsite == NULL) {
-      assert(prev_malloc_callsite != NULL, "Sanity check");
-      // this callsite is already gone
-      _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(),
-        0, 0,
-        diff_in_current_scale(0, prev_malloc_callsite->amount()),
-        diff(0, prev_malloc_callsite->count()));
-      prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
-    } else {
-      assert(cur_malloc_callsite  != NULL,  "Sanity check");
-      assert(prev_malloc_callsite != NULL,  "Sanity check");
-      if (cur_malloc_callsite->addr() < prev_malloc_callsite->addr()) {
-        // this is a new callsite
-        _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
-          amount_in_current_scale(cur_malloc_callsite->amount()),
-          cur_malloc_callsite->count(),
-          diff_in_current_scale(cur_malloc_callsite->amount(), 0),
-          diff(cur_malloc_callsite->count(), 0));
-          cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
-      } else if (cur_malloc_callsite->addr() > prev_malloc_callsite->addr()) {
-        // this callsite is already gone
-        _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(),
-          0, 0,
-          diff_in_current_scale(0, prev_malloc_callsite->amount()),
-          diff(0, prev_malloc_callsite->count()));
-        prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
-      } else {
-        // the same callsite
-        _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
-          amount_in_current_scale(cur_malloc_callsite->amount()),
-          cur_malloc_callsite->count(),
-          diff_in_current_scale(cur_malloc_callsite->amount(), prev_malloc_callsite->amount()),
-          diff(cur_malloc_callsite->count(), prev_malloc_callsite->count()));
-        cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
-        prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
-      }
-    }
+  while ((virtual_memory_site = virtual_memory_itr.next()) != NULL) {
+    // Don't report if size is too small
+    if (amount_in_current_scale(virtual_memory_site->reserved()) == 0)
+      continue;
+
+    const NativeCallStack* stack = virtual_memory_site->call_stack();
+    stack->print_on(out);
+    out->print("%28s (", " ");
+    print_total(virtual_memory_site->reserved(), virtual_memory_site->committed());
+    out->print_cr(")\n");
+  }
+}
+
+
+void MemDetailReporter::report_virtual_memory_map() {
+  // Virtual memory map always in base address order
+  VirtualMemoryAllocationIterator itr = _baseline.virtual_memory_allocations();
+  const ReservedMemoryRegion* rgn;
+
+  output()->print_cr("Virtual memory map:");
+  while ((rgn = itr.next()) != NULL) {
+    report_virtual_memory_region(rgn);
+  }
+}
+
+void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* reserved_rgn) {
+  assert(reserved_rgn != NULL, "NULL pointer");
+
+  // Don't report if size is too small
+  if (amount_in_current_scale(reserved_rgn->size()) == 0) return;
+
+  outputStream* out = output();
+  const char* scale = current_scale();
+  const NativeCallStack*  stack = reserved_rgn->call_stack();
+  bool all_committed = reserved_rgn->all_committed();
+  const char* region_type = (all_committed ? "reserved and committed" : "reserved");
+  out->print_cr(" ");
+  print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size());
+  out->print(" for %s", NMTUtil::flag_to_name(reserved_rgn->flag()));
+  if (stack->is_empty()) {
+    out->print_cr(" ");
+  } else {
+    out->print_cr(" from");
+    stack->print_on(out, 4);
   }
 
-  // walk virtual memory callsite
-  MemPointerArrayIteratorImpl cur_vm_itr(pBL_cur->_vm_cs);
-  MemPointerArrayIteratorImpl prev_vm_itr(pBL_prev->_vm_cs);
-  VMCallsitePointer*          cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.current();
-  VMCallsitePointer*          prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.current();
-  while (cur_vm_callsite != NULL || prev_vm_callsite != NULL) {
-    if (prev_vm_callsite == NULL || cur_vm_callsite->addr() < prev_vm_callsite->addr()) {
-      // this is a new callsite
-      _outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(),
-        amount_in_current_scale(cur_vm_callsite->reserved_amount()),
-        amount_in_current_scale(cur_vm_callsite->committed_amount()),
-        diff_in_current_scale(cur_vm_callsite->reserved_amount(), 0),
-        diff_in_current_scale(cur_vm_callsite->committed_amount(), 0));
-      cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.next();
-    } else if (cur_vm_callsite == NULL || cur_vm_callsite->addr() > prev_vm_callsite->addr()) {
-      // this callsite is already gone
-      _outputer.diff_virtual_memory_callsite(prev_vm_callsite->addr(),
-        amount_in_current_scale(0),
-        amount_in_current_scale(0),
-        diff_in_current_scale(0, prev_vm_callsite->reserved_amount()),
-        diff_in_current_scale(0, prev_vm_callsite->committed_amount()));
-      prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next();
-    } else { // the same callsite
-      _outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(),
-        amount_in_current_scale(cur_vm_callsite->reserved_amount()),
-        amount_in_current_scale(cur_vm_callsite->committed_amount()),
-        diff_in_current_scale(cur_vm_callsite->reserved_amount(), prev_vm_callsite->reserved_amount()),
-        diff_in_current_scale(cur_vm_callsite->committed_amount(), prev_vm_callsite->committed_amount()));
-      cur_vm_callsite  = (VMCallsitePointer*)cur_vm_itr.next();
-      prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next();
-    }
-  }
-
-  _outputer.done_callsite();
-}
-
-size_t BaselineReporter::amount_in_current_scale(size_t amt) const {
-  return (size_t)(((float)amt/(float)_scale) + 0.5);
-}
-
-int BaselineReporter::diff_in_current_scale(size_t value1, size_t value2) const {
-  return (int)(((float)value1 - (float)value2)/((float)_scale) + 0.5);
-}
-
-int BaselineReporter::diff(size_t value1, size_t value2) const {
-  return ((int)value1 - (int)value2);
-}
-
-void BaselineTTYOutputer::start(size_t scale, bool report_diff) {
-  _scale = scale;
-  _output->print_cr(" ");
-  _output->print_cr("Native Memory Tracking:");
-  _output->print_cr(" ");
-}
-
-void BaselineTTYOutputer::done() {
-
-}
+  if (all_committed) return;
 
-void BaselineTTYOutputer::total_usage(size_t total_reserved, size_t total_committed) {
-  const char* unit = memory_unit(_scale);
-  _output->print_cr("Total:  reserved=%d%s,  committed=%d%s",
-    total_reserved, unit, total_committed, unit);
-}
-
-void BaselineTTYOutputer::start_category_summary() {
-  _output->print_cr(" ");
-}
-
-/**
- * report a summary of memory type
- */
-void BaselineTTYOutputer::category_summary(MEMFLAGS type,
-  size_t reserved_amt, size_t committed_amt, size_t malloc_amt,
-  size_t malloc_count, size_t arena_amt, size_t arena_count) {
-
-  // we report mtThreadStack under mtThread category
-  if (type == mtThreadStack) {
-    assert(malloc_amt == 0 && malloc_count == 0 && arena_amt == 0,
-      "Just check");
-    _thread_stack_reserved = reserved_amt;
-    _thread_stack_committed = committed_amt;
-  } else {
-    const char* unit = memory_unit(_scale);
-    size_t total_reserved = (reserved_amt + malloc_amt + arena_amt);
-    size_t total_committed = (committed_amt + malloc_amt + arena_amt);
-    if (type == mtThread) {
-      total_reserved += _thread_stack_reserved;
-      total_committed += _thread_stack_committed;
-    }
-
-    if (total_reserved > 0) {
-      _output->print_cr("-%26s (reserved=%d%s, committed=%d%s)",
-        MemBaseline::type2name(type), total_reserved, unit,
-        total_committed, unit);
-
-      if (type == mtClass) {
-        _output->print_cr("%27s (classes #%d)", " ", _num_of_classes);
-      } else if (type == mtThread) {
-        _output->print_cr("%27s (thread #%d)", " ", _num_of_threads);
-        _output->print_cr("%27s (stack: reserved=%d%s, committed=%d%s)", " ",
-          _thread_stack_reserved, unit, _thread_stack_committed, unit);
-      }
-
-      if (malloc_amt > 0) {
-        if (type != mtChunk) {
-          _output->print_cr("%27s (malloc=%d%s, #%d)", " ", malloc_amt, unit,
-            malloc_count);
-        } else {
-          _output->print_cr("%27s (malloc=%d%s)", " ", malloc_amt, unit);
-        }
-      }
-
-      if (reserved_amt > 0) {
-        _output->print_cr("%27s (mmap: reserved=%d%s, committed=%d%s)",
-          " ", reserved_amt, unit, committed_amt, unit);
-      }
-
-      if (arena_amt > 0) {
-        _output->print_cr("%27s (arena=%d%s, #%d)", " ", arena_amt, unit, arena_count);
-      }
-
-      _output->print_cr(" ");
+  CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
+  const CommittedMemoryRegion* committed_rgn;
+  while ((committed_rgn = itr.next()) != NULL) {
+    // Don't report if size is too small
+    if (amount_in_current_scale(committed_rgn->size()) == 0) continue;
+    stack = committed_rgn->call_stack();
+    out->print("\n\t");
+    print_virtual_memory_region("committed", committed_rgn->base(), committed_rgn->size());
+    if (stack->is_empty()) {
+      out->print_cr(" ");
+    } else {
+      out->print_cr(" from");
+      stack->print_on(out, 12);
     }
   }
 }
 
-void BaselineTTYOutputer::done_category_summary() {
-  _output->print_cr(" ");
-}
+void MemSummaryDiffReporter::report_diff() {
+  const char* scale = current_scale();
+  outputStream* out = output();
+  out->print_cr("\nNative Memory Tracking:\n");
 
-
-void BaselineTTYOutputer::start_virtual_memory_map() {
-  _output->print_cr("Virtual memory map:");
-}
+  // Overall diff
+  out->print("Total: ");
+  print_virtual_memory_diff(_current_baseline.total_reserved_memory(),
+    _current_baseline.total_committed_memory(), _early_baseline.total_reserved_memory(),
+    _early_baseline.total_committed_memory());
 
-void BaselineTTYOutputer::reserved_memory_region(MEMFLAGS type, address base, address end,
-                                                 size_t size, address pc) {
-  const char* unit = memory_unit(_scale);
-  char buf[128];
-  int  offset;
-  _output->print_cr(" ");
-  _output->print_cr("[" PTR_FORMAT " - " PTR_FORMAT "] reserved %d%s for %s", base, end, size, unit,
-            MemBaseline::type2name(type));
-  if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
-      _output->print_cr("\t\tfrom [%s+0x%x]", buf, offset);
-  }
-}
+  out->print_cr("\n");
 
-void BaselineTTYOutputer::committed_memory_region(address base, address end, size_t size, address pc) {
-  const char* unit = memory_unit(_scale);
-  char buf[128];
-  int  offset;
-  _output->print("\t[" PTR_FORMAT " - " PTR_FORMAT "] committed %d%s", base, end, size, unit);
-  if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
-      _output->print_cr(" from [%s+0x%x]", buf, offset);
+  // Summary diff by memory type
+  for (int index = 0; index < mt_number_of_types; index ++) {
+    MEMFLAGS flag = NMTUtil::index_to_flag(index);
+    // thread stack is reported as part of thread category
+    if (flag == mtThreadStack) continue;
+    diff_summary_of_type(flag, _early_baseline.malloc_memory(flag),
+      _early_baseline.virtual_memory(flag), _current_baseline.malloc_memory(flag),
+      _current_baseline.virtual_memory(flag));
   }
 }
 
-void BaselineTTYOutputer::done_virtual_memory_map() {
-  _output->print_cr(" ");
-}
-
-
-
-void BaselineTTYOutputer::start_callsite() {
-  _output->print_cr("Details:");
-  _output->print_cr(" ");
-}
-
-void BaselineTTYOutputer::done_callsite() {
-  _output->print_cr(" ");
-}
+void MemSummaryDiffReporter::print_malloc_diff(size_t current_amount, size_t current_count,
+    size_t early_amount, size_t early_count) const {
+  const char* scale = current_scale();
+  outputStream* out = output();
 
-void BaselineTTYOutputer::malloc_callsite(address pc, size_t malloc_amt,
-  size_t malloc_count) {
-  if (malloc_amt > 0) {
-    const char* unit = memory_unit(_scale);
-    char buf[128];
-    int  offset;
-    if (pc == 0) {
-      _output->print("[BOOTSTRAP]%18s", " ");
-    } else if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
-      _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
-      _output->print("%28s", " ");
-    } else {
-      _output->print("[" PTR_FORMAT "]%18s", pc, " ");
+  out->print("malloc=" SIZE_FORMAT "%s", amount_in_current_scale(current_amount), scale);
+  long amount_diff = diff_in_current_scale(current_amount, early_amount);
+  if (amount_diff != 0) {
+    out->print(" %+ld%s", amount_diff, scale);
+  }
+  if (current_count > 0) {
+    out->print(" #" SIZE_FORMAT "", current_count);
+    if (current_count != early_count) {
+      out->print(" %+d", (int)(current_count - early_count));
     }
-
-    _output->print_cr("(malloc=%d%s #%d)", malloc_amt, unit, malloc_count);
-    _output->print_cr(" ");
   }
 }
 
-void BaselineTTYOutputer::virtual_memory_callsite(address pc, size_t reserved_amt,
-  size_t committed_amt) {
-  if (reserved_amt > 0) {
-    const char* unit = memory_unit(_scale);
-    char buf[128];
-    int  offset;
-    if (pc == 0) {
-      _output->print("[BOOTSTRAP]%18s", " ");
-    } else if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
-      _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
-      _output->print("%28s", " ");
-    } else {
-      _output->print("[" PTR_FORMAT "]%18s", pc, " ");
-    }
+void MemSummaryDiffReporter::print_arena_diff(size_t current_amount, size_t current_count,
+  size_t early_amount, size_t early_count) const {
+  const char* scale = current_scale();
+  outputStream* out = output();
+  out->print("arena=" SIZE_FORMAT "%s", amount_in_current_scale(current_amount), scale);
+  if (diff_in_current_scale(current_amount, early_amount) != 0) {
+    out->print(" %+ld", diff_in_current_scale(current_amount, early_amount));
+  }
+
+  out->print(" #" SIZE_FORMAT "", current_count);
+  if (current_count != early_count) {
+    out->print(" %+d", (int)(current_count - early_count));
+  }
+}
 
-    _output->print_cr("(mmap: reserved=%d%s, committed=%d%s)",
-      reserved_amt, unit, committed_amt, unit);
-    _output->print_cr(" ");
+void MemSummaryDiffReporter::print_virtual_memory_diff(size_t current_reserved, size_t current_committed,
+    size_t early_reserved, size_t early_committed) const {
+  const char* scale = current_scale();
+  outputStream* out = output();
+  out->print("reserved=" SIZE_FORMAT "%s", amount_in_current_scale(current_reserved), scale);
+  long reserved_diff = diff_in_current_scale(current_reserved, early_reserved);
+  if (reserved_diff != 0) {
+    out->print(" %+ld%s", reserved_diff, scale);
+  }
+
+  out->print(", committed=" SIZE_FORMAT "%s", amount_in_current_scale(current_committed), scale);
+  long committed_diff = diff_in_current_scale(current_committed, early_committed);
+  if (committed_diff != 0) {
+    out->print(" %+ld%s", committed_diff, scale);
   }
 }
 
-void BaselineTTYOutputer::diff_total_usage(size_t total_reserved,
-  size_t total_committed, int reserved_diff, int committed_diff) {
-  const char* unit = memory_unit(_scale);
-  _output->print_cr("Total:  reserved=%d%s  %+d%s, committed=%d%s %+d%s",
-    total_reserved, unit, reserved_diff, unit, total_committed, unit,
-    committed_diff, unit);
-}
+
+void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, const MallocMemory* early_malloc,
+  const VirtualMemory* early_vm, const MallocMemory* current_malloc,
+  const VirtualMemory* current_vm) const {
+
+  outputStream* out = output();
+  const char* scale = current_scale();
+
+  // Total reserved and committed memory in current baseline
+  size_t current_reserved_amount  = reserved_total (current_malloc, current_vm);
+  size_t current_committed_amount = committed_total(current_malloc, current_vm);
+
+  // Total reserved and committed memory in early baseline
+  size_t early_reserved_amount  = reserved_total(early_malloc, early_vm);
+  size_t early_committed_amount = committed_total(early_malloc, early_vm);
 
-void BaselineTTYOutputer::diff_category_summary(MEMFLAGS type,
-  size_t cur_reserved_amt, size_t cur_committed_amt,
-  size_t cur_malloc_amt, size_t cur_malloc_count,
-  size_t cur_arena_amt, size_t cur_arena_count,
-  int reserved_diff, int committed_diff, int malloc_diff,
-  int malloc_count_diff, int arena_diff, int arena_count_diff) {
+  // Adjust virtual memory total
+  if (flag == mtThread) {
+    const VirtualMemory* early_thread_stack_usage =
+      _early_baseline.virtual_memory(mtThreadStack);
+    const VirtualMemory* current_thread_stack_usage =
+      _current_baseline.virtual_memory(mtThreadStack);
+
+    early_reserved_amount  += early_thread_stack_usage->reserved();
+    early_committed_amount += early_thread_stack_usage->committed();
+
+    current_reserved_amount  += current_thread_stack_usage->reserved();
+    current_committed_amount += current_thread_stack_usage->committed();
+  } else if (flag == mtNMT) {
+    early_reserved_amount  += _early_baseline.malloc_tracking_overhead();
+    early_committed_amount += _early_baseline.malloc_tracking_overhead();
+
+    current_reserved_amount  += _current_baseline.malloc_tracking_overhead();
+    current_committed_amount += _current_baseline.malloc_tracking_overhead();
+  }
 
-  if (type == mtThreadStack) {
-    assert(cur_malloc_amt == 0 && cur_malloc_count == 0 &&
-      cur_arena_amt == 0, "Just check");
-    _thread_stack_reserved = cur_reserved_amt;
-    _thread_stack_committed = cur_committed_amt;
-    _thread_stack_reserved_diff = reserved_diff;
-    _thread_stack_committed_diff = committed_diff;
-  } else {
-    const char* unit = memory_unit(_scale);
-    size_t total_reserved = (cur_reserved_amt + cur_malloc_amt + cur_arena_amt);
-    // nothing to report in this category
-    if (total_reserved == 0) {
-      return;
-    }
-    int    diff_reserved = (reserved_diff + malloc_diff + arena_diff);
+  if (amount_in_current_scale(current_reserved_amount) > 0 ||
+      diff_in_current_scale(current_reserved_amount, early_reserved_amount) != 0) {
+
+    // print summary line
+    out->print("-%26s (", NMTUtil::flag_to_name(flag));
+    print_virtual_memory_diff(current_reserved_amount, current_committed_amount,
+      early_reserved_amount, early_committed_amount);
+    out->print_cr(")");
 
-    // category summary
-    _output->print("-%26s (reserved=%d%s", MemBaseline::type2name(type),
-      total_reserved, unit);
+    // detail lines
+    if (flag == mtClass) {
+      // report class count
+      out->print("%27s (classes #" SIZE_FORMAT "", " ", _current_baseline.class_count());
+      int class_count_diff = (int)(_current_baseline.class_count() -
+        _early_baseline.class_count());
+      if (_current_baseline.class_count() != _early_baseline.class_count()) {
+        out->print(" %+d", (int)(_current_baseline.class_count() - _early_baseline.class_count()));
+      }
+      out->print_cr(")");
+    } else if (flag == mtThread) {
+      // report thread count
+      out->print("%27s (thread #" SIZE_FORMAT "", " ", _current_baseline.thread_count());
+      int thread_count_diff = (int)(_current_baseline.thread_count() -
+          _early_baseline.thread_count());
+      if (thread_count_diff != 0) {
+        out->print(" %+d", thread_count_diff);
+      }
+      out->print_cr(")");
 
-    if (diff_reserved != 0) {
-      _output->print(" %+d%s", diff_reserved, unit);
-    }
+      // report thread stack
+      const VirtualMemory* current_thread_stack =
+          _current_baseline.virtual_memory(mtThreadStack);
+      const VirtualMemory* early_thread_stack =
+        _early_baseline.virtual_memory(mtThreadStack);
 
-    size_t total_committed = cur_committed_amt + cur_malloc_amt + cur_arena_amt;
-    _output->print(", committed=%d%s", total_committed, unit);
-
-    int total_committed_diff = committed_diff + malloc_diff + arena_diff;
-    if (total_committed_diff != 0) {
-      _output->print(" %+d%s", total_committed_diff, unit);
+      out->print("%27s (stack: ", " ");
+      print_virtual_memory_diff(current_thread_stack->reserved(), current_thread_stack->committed(),
+        early_thread_stack->reserved(), early_thread_stack->committed());
+      out->print_cr(")");
     }
 
-    _output->print_cr(")");
+    // Report malloc'd memory
+    size_t current_malloc_amount = current_malloc->malloc_size();
+    size_t early_malloc_amount   = early_malloc->malloc_size();
+    if (amount_in_current_scale(current_malloc_amount) > 0 ||
+        diff_in_current_scale(current_malloc_amount, early_malloc_amount) != 0) {
+      out->print("%28s(", " ");
+      print_malloc_diff(current_malloc_amount, (flag == mtChunk) ? 0 : current_malloc->malloc_count(),
+        early_malloc_amount, early_malloc->malloc_count());
+      out->print_cr(")");
+    }
 
-    // special cases
-    if (type == mtClass) {
-      _output->print("%27s (classes #%d", " ", _num_of_classes);
-      if (_num_of_classes_diff != 0) {
-        _output->print(" %+d", _num_of_classes_diff);
-      }
-      _output->print_cr(")");
-    } else if (type == mtThread) {
-      // thread count
-      _output->print("%27s (thread #%d", " ", _num_of_threads);
-      if (_num_of_threads_diff != 0) {
-        _output->print_cr(" %+d)", _num_of_threads_diff);
-      } else {
-        _output->print_cr(")");
-      }
-      _output->print("%27s (stack: reserved=%d%s", " ", _thread_stack_reserved, unit);
-      if (_thread_stack_reserved_diff != 0) {
-        _output->print(" %+d%s", _thread_stack_reserved_diff, unit);
-      }
-
-      _output->print(", committed=%d%s", _thread_stack_committed, unit);
-      if (_thread_stack_committed_diff != 0) {
-        _output->print(" %+d%s",_thread_stack_committed_diff, unit);
-      }
-
-      _output->print_cr(")");
+    // Report virtual memory
+    if (amount_in_current_scale(current_vm->reserved()) > 0 ||
+        diff_in_current_scale(current_vm->reserved(), early_vm->reserved()) != 0) {
+      out->print("%27s (mmap: ", " ");
+      print_virtual_memory_diff(current_vm->reserved(), current_vm->committed(),
+        early_vm->reserved(), early_vm->committed());
+      out->print_cr(")");
     }
 
-    // malloc'd memory
-    if (cur_malloc_amt > 0) {
-      _output->print("%27s (malloc=%d%s", " ", cur_malloc_amt, unit);
-      if (malloc_diff != 0) {
-        _output->print(" %+d%s", malloc_diff, unit);
-      }
-      if (type != mtChunk) {
-        _output->print(", #%d", cur_malloc_count);
-        if (malloc_count_diff) {
-          _output->print(" %+d", malloc_count_diff);
-        }
-      }
-      _output->print_cr(")");
+    // Report arena memory
+    if (amount_in_current_scale(current_malloc->arena_size()) > 0 ||
+        diff_in_current_scale(current_malloc->arena_size(), early_malloc->arena_size()) != 0) {
+      out->print("%28s(", " ");
+      print_arena_diff(current_malloc->arena_size(), current_malloc->arena_count(),
+        early_malloc->arena_size(), early_malloc->arena_count());
+      out->print_cr(")");
     }
 
-    // mmap'd memory
-    if (cur_reserved_amt > 0) {
-      _output->print("%27s (mmap: reserved=%d%s", " ", cur_reserved_amt, unit);
-      if (reserved_diff != 0) {
-        _output->print(" %+d%s", reserved_diff, unit);
-      }
+    // Report native memory tracking overhead
+    if (flag == mtNMT) {
+      size_t current_tracking_overhead = amount_in_current_scale(_current_baseline.malloc_tracking_overhead());
+      size_t early_tracking_overhead   = amount_in_current_scale(_early_baseline.malloc_tracking_overhead());
 
-      _output->print(", committed=%d%s", cur_committed_amt, unit);
-      if (committed_diff != 0) {
-        _output->print(" %+d%s", committed_diff, unit);
-      }
-      _output->print_cr(")");
-    }
+      out->print("%27s (tracking overhead=" SIZE_FORMAT "%s", " ",
+        amount_in_current_scale(_current_baseline.malloc_tracking_overhead()), scale);
 
-    // arena memory
-    if (cur_arena_amt > 0) {
-      _output->print("%27s (arena=%d%s", " ", cur_arena_amt, unit);
-      if (arena_diff != 0) {
-        _output->print(" %+d%s", arena_diff, unit);
+      long overhead_diff = diff_in_current_scale(_current_baseline.malloc_tracking_overhead(),
+           _early_baseline.malloc_tracking_overhead());
+      if (overhead_diff != 0) {
+        out->print(" %+ld%s", overhead_diff, scale);
       }
-      _output->print(", #%d", cur_arena_count);
-      if (arena_count_diff != 0) {
-        _output->print(" %+d", arena_count_diff);
-      }
-      _output->print_cr(")");
+      out->print_cr(")");
     }
-
-    _output->print_cr(" ");
+    out->print_cr(" ");
   }
 }
 
-void BaselineTTYOutputer::diff_malloc_callsite(address pc,
-    size_t cur_malloc_amt, size_t cur_malloc_count,
-    int malloc_diff, int malloc_count_diff) {
-  if (malloc_diff != 0) {
-    const char* unit = memory_unit(_scale);
-    char buf[128];
-    int  offset;
-    if (pc == 0) {
-      _output->print_cr("[BOOTSTRAP]%18s", " ");
+void MemDetailDiffReporter::report_diff() {
+  MemSummaryDiffReporter::report_diff();
+  diff_malloc_sites();
+  diff_virtual_memory_sites();
+}
+
+void MemDetailDiffReporter::diff_malloc_sites() const {
+  MallocSiteIterator early_itr = _early_baseline.malloc_sites(MemBaseline::by_site);
+  MallocSiteIterator current_itr = _current_baseline.malloc_sites(MemBaseline::by_site);
+
+  const MallocSite* early_site   = early_itr.next();
+  const MallocSite* current_site = current_itr.next();
+
+  while (early_site != NULL || current_site != NULL) {
+    if (early_site == NULL) {
+      new_malloc_site(current_site);
+      current_site = current_itr.next();
+    } else if (current_site == NULL) {
+      old_malloc_site(early_site);
+      early_site = early_itr.next();
     } else {
-      if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
-        _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
-        _output->print("%28s", " ");
+      int compVal = current_site->call_stack()->compare(*early_site->call_stack());
+      if (compVal < 0) {
+        new_malloc_site(current_site);
+        current_site = current_itr.next();
+      } else if (compVal > 0) {
+        old_malloc_site(early_site);
+        early_site = early_itr.next();
       } else {
-        _output->print("[" PTR_FORMAT "]%18s", pc, " ");
+        diff_malloc_site(early_site, current_site);
+        early_site   = early_itr.next();
+        current_site = current_itr.next();
       }
     }
+  }
+}
 
-    _output->print("(malloc=%d%s", cur_malloc_amt, unit);
-    if (malloc_diff != 0) {
-      _output->print(" %+d%s", malloc_diff, unit);
+void MemDetailDiffReporter::diff_virtual_memory_sites() const {
+  VirtualMemorySiteIterator early_itr = _early_baseline.virtual_memory_sites(MemBaseline::by_site);
+  VirtualMemorySiteIterator current_itr = _current_baseline.virtual_memory_sites(MemBaseline::by_site);
+
+  const VirtualMemoryAllocationSite* early_site   = early_itr.next();
+  const VirtualMemoryAllocationSite* current_site = current_itr.next();
+
+  while (early_site != NULL || current_site != NULL) {
+    if (early_site == NULL) {
+      new_virtual_memory_site(current_site);
+      current_site = current_itr.next();
+    } else if (current_site == NULL) {
+      old_virtual_memory_site(early_site);
+      early_site = early_itr.next();
+    } else {
+      int compVal = current_site->call_stack()->compare(*early_site->call_stack());
+      if (compVal < 0) {
+        new_virtual_memory_site(current_site);
+        current_site = current_itr.next();
+      } else if (compVal > 0) {
+        old_virtual_memory_site(early_site);
+        early_site = early_itr.next();
+      } else {
+        diff_virtual_memory_site(early_site, current_site);
+        early_site   = early_itr.next();
+        current_site = current_itr.next();
+      }
     }
-    _output->print(", #%d", cur_malloc_count);
-    if (malloc_count_diff != 0) {
-      _output->print(" %+d", malloc_count_diff);
-    }
-    _output->print_cr(")");
-    _output->print_cr(" ");
   }
 }
 
-void BaselineTTYOutputer::diff_virtual_memory_callsite(address pc,
-    size_t cur_reserved_amt, size_t cur_committed_amt,
-    int reserved_diff, int committed_diff) {
-  if (reserved_diff != 0 || committed_diff != 0) {
-    const char* unit = memory_unit(_scale);
-    char buf[64];
-    int  offset;
-    if (pc == 0) {
-      _output->print_cr("[BOOSTRAP]%18s", " ");
-    } else {
-      if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
-        _output->print_cr("[" PTR_FORMAT "] %s+0x%x", pc, buf, offset);
-        _output->print("%28s", " ");
-      } else {
-        _output->print("[" PTR_FORMAT "]%18s", pc, " ");
-      }
-    }
+
+void MemDetailDiffReporter::new_malloc_site(const MallocSite* malloc_site) const {
+  diff_malloc_site(malloc_site->call_stack(), malloc_site->size(), malloc_site->count(),
+    0, 0);
+}
+
+void MemDetailDiffReporter::old_malloc_site(const MallocSite* malloc_site) const {
+  diff_malloc_site(malloc_site->call_stack(), 0, 0, malloc_site->size(),
+    malloc_site->count());
+}
+
+void MemDetailDiffReporter::diff_malloc_site(const MallocSite* early,
+  const MallocSite* current)  const {
+  diff_malloc_site(current->call_stack(), current->size(), current->count(),
+    early->size(), early->count());
+}
+
+void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_t current_size,
+  size_t current_count, size_t early_size, size_t early_count) const {
+  outputStream* out = output();
+
+  assert(stack != NULL, "NULL stack");
+
+  if (diff_in_current_scale(current_size, early_size) == 0) {
+      return;
+  }
+
+  stack->print_on(out);
+  out->print("%28s (", " ");
+  print_malloc_diff(current_size, current_count,
+    early_size, early_count);
 
-    _output->print("(mmap: reserved=%d%s", cur_reserved_amt, unit);
-    if (reserved_diff != 0) {
-      _output->print(" %+d%s", reserved_diff, unit);
-    }
-    _output->print(", committed=%d%s", cur_committed_amt, unit);
-    if (committed_diff != 0) {
-      _output->print(" %+d%s", committed_diff, unit);
-    }
-    _output->print_cr(")");
-    _output->print_cr(" ");
+  out->print_cr(")\n");
+}
+
+
+void MemDetailDiffReporter::new_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
+  diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0);
+}
+
+void MemDetailDiffReporter::old_virtual_memory_site(const VirtualMemoryAllocationSite* site) const {
+  diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed());
+}
+
+void MemDetailDiffReporter::diff_virtual_memory_site(const VirtualMemoryAllocationSite* early,
+  const VirtualMemoryAllocationSite* current) const {
+  diff_virtual_memory_site(current->call_stack(), current->reserved(), current->committed(),
+    early->reserved(), early->committed());
+}
+
+void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved,
+  size_t current_committed, size_t early_reserved, size_t early_committed) const  {
+  outputStream* out = output();
+
+  // no change
+  if (diff_in_current_scale(current_reserved, early_reserved) == 0 &&
+      diff_in_current_scale(current_committed, early_committed) == 0) {
+    return;
   }
-}
+
+  stack->print_on(out);
+  out->print("%28s (mmap: ", " ");
+  print_virtual_memory_diff(current_reserved, current_committed,
+    early_reserved, early_committed);
+
+  out->print_cr(")\n");
+ }
+
--- a/src/share/vm/services/memReporter.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/memReporter.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,262 +25,203 @@
 #ifndef SHARE_VM_SERVICES_MEM_REPORTER_HPP
 #define SHARE_VM_SERVICES_MEM_REPORTER_HPP
 
-#include "runtime/mutexLocker.hpp"
+#if INCLUDE_NMT
+
+#include "oops/instanceKlass.hpp"
 #include "services/memBaseline.hpp"
-#include "services/memTracker.hpp"
-#include "utilities/ostream.hpp"
-#include "utilities/macros.hpp"
-
-#if INCLUDE_NMT
+#include "services/nmtCommon.hpp"
+#include "services/mallocTracker.hpp"
+#include "services/virtualMemoryTracker.hpp"
 
 /*
- * MemBaselineReporter reports data to this outputer class,
- * ReportOutputer is responsible for format, store and redirect
- * the data to the final destination.
- */
-class BaselineOutputer : public StackObj {
+ * Base class that provides helpers
+*/
+class MemReporterBase : public StackObj {
+ private:
+  size_t        _scale;  // report in this scale
+  outputStream* _output; // destination
+
  public:
-  // start to report memory usage in specified scale.
-  // if report_diff = true, the reporter reports baseline comparison
-  // information.
-
-  virtual void start(size_t scale, bool report_diff = false) = 0;
-  // Done reporting
-  virtual void done() = 0;
+  MemReporterBase(outputStream* out = NULL, size_t scale = K)
+    : _scale(scale) {
+    _output = (out == NULL) ? tty : out;
+  }
 
-  /* report baseline summary information */
-  virtual void total_usage(size_t total_reserved,
-                           size_t total_committed) = 0;
-  virtual void num_of_classes(size_t classes) = 0;
-  virtual void num_of_threads(size_t threads) = 0;
-
-  virtual void thread_info(size_t stack_reserved_amt, size_t stack_committed_amt) = 0;
+ protected:
+  inline outputStream* output() const {
+    return _output;
+  }
+  // Current reporting scale
+  inline const char* current_scale() const {
+    return NMTUtil::scale_name(_scale);
+  }
+  // Convert memory amount in bytes to current reporting scale
+  inline size_t amount_in_current_scale(size_t amount) const {
+    return NMTUtil::amount_in_scale(amount, _scale);
+  }
 
-  /* report baseline summary comparison */
-  virtual void diff_total_usage(size_t total_reserved,
-                                size_t total_committed,
-                                int reserved_diff,
-                                int committed_diff) = 0;
-  virtual void diff_num_of_classes(size_t classes, int diff) = 0;
-  virtual void diff_num_of_threads(size_t threads, int diff) = 0;
+  // Convert diff amount in bytes to current reporting scale
+  inline long diff_in_current_scale(size_t s1, size_t s2) const {
+    long amount = (long)(s1 - s2);
+    long scale = (long)_scale;
+    amount = (amount > 0) ? (amount + scale / 2) : (amount - scale / 2);
+    return amount / scale;
+  }
 
-  virtual void diff_thread_info(size_t stack_reserved, size_t stack_committed,
-        int stack_reserved_diff, int stack_committed_diff) = 0;
+  // Helper functions
+  // Calculate total reserved and committed amount
+  size_t reserved_total(const MallocMemory* malloc, const VirtualMemory* vm) const;
+  size_t committed_total(const MallocMemory* malloc, const VirtualMemory* vm) const;
 
 
-  /*
-   * memory summary by memory types.
-   * for each memory type, following summaries are reported:
-   *  - reserved amount, committed amount
-   *  - malloc'd amount, malloc count
-   *  - arena amount, arena count
-   */
-
-  // start reporting memory summary by memory type
-  virtual void start_category_summary() = 0;
+  // Print summary total, malloc and virtual memory
+  void print_total(size_t reserved, size_t committed) const;
+  void print_malloc(size_t amount, size_t count) const;
+  void print_virtual_memory(size_t reserved, size_t committed) const;
 
-  virtual void category_summary(MEMFLAGS type, size_t reserved_amt,
-                                size_t committed_amt,
-                                size_t malloc_amt, size_t malloc_count,
-                                size_t arena_amt, size_t arena_count) = 0;
+  void print_malloc_line(size_t amount, size_t count) const;
+  void print_virtual_memory_line(size_t reserved, size_t committed) const;
+  void print_arena_line(size_t amount, size_t count) const;
 
-  virtual void diff_category_summary(MEMFLAGS type, size_t cur_reserved_amt,
-                                size_t cur_committed_amt,
-                                size_t cur_malloc_amt, size_t cur_malloc_count,
-                                size_t cur_arena_amt, size_t cur_arena_count,
-                                int reserved_diff, int committed_diff, int malloc_diff,
-                                int malloc_count_diff, int arena_diff,
-                                int arena_count_diff) = 0;
+  void print_virtual_memory_region(const char* type, address base, size_t size) const;
+};
 
-  virtual void done_category_summary() = 0;
-
-  virtual void start_virtual_memory_map() = 0;
-  virtual void reserved_memory_region(MEMFLAGS type, address base, address end, size_t size, address pc) = 0;
-  virtual void committed_memory_region(address base, address end, size_t size, address pc) = 0;
-  virtual void done_virtual_memory_map() = 0;
+/*
+ * The class is for generating summary tracking report.
+ */
+class MemSummaryReporter : public MemReporterBase {
+ private:
+  MallocMemorySnapshot*   _malloc_snapshot;
+  VirtualMemorySnapshot*  _vm_snapshot;
+  size_t                  _class_count;
 
-  /*
-   *  Report callsite information
-   */
-  virtual void start_callsite() = 0;
-  virtual void malloc_callsite(address pc, size_t malloc_amt, size_t malloc_count) = 0;
-  virtual void virtual_memory_callsite(address pc, size_t reserved_amt, size_t committed_amt) = 0;
+ public:
+  // This constructor is for normal reporting from a recent baseline.
+  MemSummaryReporter(MemBaseline& baseline, outputStream* output,
+    size_t scale = K) : MemReporterBase(output, scale),
+    _malloc_snapshot(baseline.malloc_memory_snapshot()),
+    _vm_snapshot(baseline.virtual_memory_snapshot()),
+    _class_count(baseline.class_count()) { }
 
-  virtual void diff_malloc_callsite(address pc, size_t cur_malloc_amt, size_t cur_malloc_count,
-              int malloc_diff, int malloc_count_diff) = 0;
-  virtual void diff_virtual_memory_callsite(address pc, size_t cur_reserved_amt, size_t cur_committed_amt,
-              int reserved_diff, int committed_diff) = 0;
 
-  virtual void done_callsite() = 0;
-
-  // return current scale in "KB", "MB" or "GB"
-  static const char* memory_unit(size_t scale);
+  // Generate summary report
+  virtual void report();
+ private:
+  // Report summary for each memory type
+  void report_summary_of_type(MEMFLAGS type, MallocMemory* malloc_memory,
+    VirtualMemory* virtual_memory);
 };
 
 /*
- * This class reports processed data from a baseline or
- * the changes between the two baseline.
+ * The class is for generating detail tracking report.
  */
-class BaselineReporter : public StackObj {
+class MemDetailReporter : public MemSummaryReporter {
  private:
-  BaselineOutputer&  _outputer;
-  size_t             _scale;
+  MemBaseline&   _baseline;
 
  public:
-  // construct a reporter that reports memory usage
-  // in specified scale
-  BaselineReporter(BaselineOutputer& outputer, size_t scale = K):
-    _outputer(outputer) {
-    _scale = scale;
+  MemDetailReporter(MemBaseline& baseline, outputStream* output, size_t scale = K) :
+    MemSummaryReporter(baseline, output, scale),
+     _baseline(baseline) { }
+
+  // Generate detail report.
+  // The report contains summary and detail sections.
+  virtual void report() {
+    MemSummaryReporter::report();
+    report_virtual_memory_map();
+    report_detail();
   }
-  virtual void report_baseline(const MemBaseline& baseline, bool summary_only = false);
-  virtual void diff_baselines(const MemBaseline& cur, const MemBaseline& prev,
-                              bool summary_only = false);
-
-  void set_scale(size_t scale);
-  size_t scale() const { return _scale; }
 
  private:
-  void report_summaries(const MemBaseline& baseline);
-  void report_virtual_memory_map(const MemBaseline& baseline);
-  void report_callsites(const MemBaseline& baseline);
-
-  void diff_summaries(const MemBaseline& cur, const MemBaseline& prev);
-  void diff_callsites(const MemBaseline& cur, const MemBaseline& prev);
+  // Report detail tracking data.
+  void report_detail();
+  // Report virtual memory map
+  void report_virtual_memory_map();
+  // Report malloc allocation sites
+  void report_malloc_sites();
+  // Report virtual memory reservation sites
+  void report_virtual_memory_allocation_sites();
 
-  // calculate memory size in current memory scale
-  size_t amount_in_current_scale(size_t amt) const;
-  // diff two unsigned values in current memory scale
-  int    diff_in_current_scale(size_t value1, size_t value2) const;
-  // diff two unsigned value
-  int    diff(size_t value1, size_t value2) const;
+  // Report a virtual memory region
+  void report_virtual_memory_region(const ReservedMemoryRegion* rgn);
 };
 
 /*
- * tty output implementation. Native memory tracking
- * DCmd uses this outputer.
+ * The class is for generating summary comparison report.
+ * It compares current memory baseline against an early baseline.
  */
-class BaselineTTYOutputer : public BaselineOutputer {
- private:
-  size_t         _scale;
-
-  size_t         _num_of_classes;
-  size_t         _num_of_threads;
-  size_t         _thread_stack_reserved;
-  size_t         _thread_stack_committed;
-
-  int            _num_of_classes_diff;
-  int            _num_of_threads_diff;
-  int            _thread_stack_reserved_diff;
-  int            _thread_stack_committed_diff;
-
-  outputStream*  _output;
+class MemSummaryDiffReporter : public MemReporterBase {
+ protected:
+  MemBaseline&      _early_baseline;
+  MemBaseline&      _current_baseline;
 
  public:
-  BaselineTTYOutputer(outputStream* st) {
-    _scale = K;
-    _num_of_classes = 0;
-    _num_of_threads = 0;
-    _thread_stack_reserved = 0;
-    _thread_stack_committed = 0;
-    _num_of_classes_diff = 0;
-    _num_of_threads_diff = 0;
-    _thread_stack_reserved_diff = 0;
-    _thread_stack_committed_diff = 0;
-    _output = st;
-  }
-
-  // begin reporting memory usage in specified scale
-  void start(size_t scale, bool report_diff = false);
-  // done reporting
-  void done();
-
-  // total memory usage
-  void total_usage(size_t total_reserved,
-                   size_t total_committed);
-  // report total loaded classes
-  void num_of_classes(size_t classes) {
-    _num_of_classes = classes;
-  }
-
-  void num_of_threads(size_t threads) {
-    _num_of_threads = threads;
-  }
-
-  void thread_info(size_t stack_reserved_amt, size_t stack_committed_amt) {
-    _thread_stack_reserved = stack_reserved_amt;
-    _thread_stack_committed = stack_committed_amt;
+  MemSummaryDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline,
+    outputStream* output, size_t scale = K) : MemReporterBase(output, scale),
+    _early_baseline(early_baseline), _current_baseline(current_baseline) {
+    assert(early_baseline.baseline_type()   != MemBaseline::Not_baselined, "Not baselined");
+    assert(current_baseline.baseline_type() != MemBaseline::Not_baselined, "Not baselined");
   }
 
-  void diff_total_usage(size_t total_reserved,
-                        size_t total_committed,
-                        int reserved_diff,
-                        int committed_diff);
-
-  void diff_num_of_classes(size_t classes, int diff) {
-    _num_of_classes = classes;
-    _num_of_classes_diff = diff;
-  }
-
-  void diff_num_of_threads(size_t threads, int diff) {
-    _num_of_threads = threads;
-    _num_of_threads_diff = diff;
-  }
-
-  void diff_thread_info(size_t stack_reserved_amt, size_t stack_committed_amt,
-               int stack_reserved_diff, int stack_committed_diff) {
-    _thread_stack_reserved = stack_reserved_amt;
-    _thread_stack_committed = stack_committed_amt;
-    _thread_stack_reserved_diff = stack_reserved_diff;
-    _thread_stack_committed_diff = stack_committed_diff;
-  }
+  // Generate summary comparison report
+  virtual void report_diff();
 
-  /*
-   * Report memory summary categoriuzed by memory types.
-   * For each memory type, following summaries are reported:
-   *  - reserved amount, committed amount
-   *  - malloc-ed amount, malloc count
-   *  - arena amount, arena count
-   */
-  // start reporting memory summary by memory type
-  void start_category_summary();
-  void category_summary(MEMFLAGS type, size_t reserved_amt, size_t committed_amt,
-                               size_t malloc_amt, size_t malloc_count,
-                               size_t arena_amt, size_t arena_count);
-
-  void diff_category_summary(MEMFLAGS type, size_t cur_reserved_amt,
-                          size_t cur_committed_amt,
-                          size_t cur_malloc_amt, size_t cur_malloc_count,
-                          size_t cur_arena_amt, size_t cur_arena_count,
-                          int reserved_diff, int committed_diff, int malloc_diff,
-                          int malloc_count_diff, int arena_diff,
-                          int arena_count_diff);
+ private:
+  // report the comparison of each memory type
+  void diff_summary_of_type(MEMFLAGS type,
+    const MallocMemory* early_malloc, const VirtualMemory* early_vm,
+    const MallocMemory* current_malloc, const VirtualMemory* current_vm) const;
 
-  void done_category_summary();
-
-  // virtual memory map
-  void start_virtual_memory_map();
-  void reserved_memory_region(MEMFLAGS type, address base, address end, size_t size, address pc);
-  void committed_memory_region(address base, address end, size_t size, address pc);
-  void done_virtual_memory_map();
-
-
-  /*
-   *  Report callsite information
-   */
-  void start_callsite();
-  void malloc_callsite(address pc, size_t malloc_amt, size_t malloc_count);
-  void virtual_memory_callsite(address pc, size_t reserved_amt, size_t committed_amt);
-
-  void diff_malloc_callsite(address pc, size_t cur_malloc_amt, size_t cur_malloc_count,
-              int malloc_diff, int malloc_count_diff);
-  void diff_virtual_memory_callsite(address pc, size_t cur_reserved_amt, size_t cur_committed_amt,
-              int reserved_diff, int committed_diff);
-
-  void done_callsite();
+ protected:
+  void print_malloc_diff(size_t current_amount, size_t current_count,
+    size_t early_amount, size_t early_count) const;
+  void print_virtual_memory_diff(size_t current_reserved, size_t current_committed,
+    size_t early_reserved, size_t early_committed) const;
+  void print_arena_diff(size_t current_amount, size_t current_count,
+    size_t early_amount, size_t early_count) const;
 };
 
+/*
+ * The class is for generating detail comparison report.
+ * It compares current memory baseline against an early baseline,
+ * both baselines have to be detail baseline.
+ */
+class MemDetailDiffReporter : public MemSummaryDiffReporter {
+ public:
+  MemDetailDiffReporter(MemBaseline& early_baseline, MemBaseline& current_baseline,
+    outputStream* output, size_t scale = K) :
+    MemSummaryDiffReporter(early_baseline, current_baseline, output, scale) { }
+
+  // Generate detail comparison report
+  virtual void report_diff();
+
+  // Malloc allocation site comparison
+  void diff_malloc_sites() const;
+  // Virutal memory reservation site comparison
+  void diff_virtual_memory_sites() const;
+
+  // New malloc allocation site in recent baseline
+  void new_malloc_site (const MallocSite* site) const;
+  // The malloc allocation site is not in recent baseline
+  void old_malloc_site (const MallocSite* site) const;
+  // Compare malloc allocation site, it is in both baselines
+  void diff_malloc_site(const MallocSite* early, const MallocSite* current)  const;
+
+  // New virtual memory allocation site in recent baseline
+  void new_virtual_memory_site (const VirtualMemoryAllocationSite* callsite) const;
+  // The virtual memory allocation site is not in recent baseline
+  void old_virtual_memory_site (const VirtualMemoryAllocationSite* callsite) const;
+  // Compare virtual memory allocation site, it is in both baseline
+  void diff_virtual_memory_site(const VirtualMemoryAllocationSite* early,
+                                const VirtualMemoryAllocationSite* current)  const;
+
+  void diff_malloc_site(const NativeCallStack* stack, size_t current_size,
+    size_t currrent_count, size_t early_size, size_t early_count) const;
+  void diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved,
+    size_t current_committed, size_t early_reserved, size_t early_committed) const;
+};
 
 #endif // INCLUDE_NMT
 
-#endif // SHARE_VM_SERVICES_MEM_REPORTER_HPP
+#endif
+
--- a/src/share/vm/services/memSnapshot.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,748 +0,0 @@
-/*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "utilities/decoder.hpp"
-#include "services/memBaseline.hpp"
-#include "services/memPtr.hpp"
-#include "services/memPtrArray.hpp"
-#include "services/memSnapshot.hpp"
-#include "services/memTracker.hpp"
-
-PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
-
-#ifdef ASSERT
-
-void decode_pointer_record(MemPointerRecord* rec) {
-  tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT  "] size = %d bytes", rec->addr(),
-    rec->addr() + rec->size(), (int)rec->size());
-  tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
-  if (rec->is_vm_pointer()) {
-    if (rec->is_allocation_record()) {
-      tty->print_cr(" (reserve)");
-    } else if (rec->is_commit_record()) {
-      tty->print_cr(" (commit)");
-    } else if (rec->is_uncommit_record()) {
-      tty->print_cr(" (uncommit)");
-    } else if (rec->is_deallocation_record()) {
-      tty->print_cr(" (release)");
-    } else {
-      tty->print_cr(" (tag)");
-    }
-  } else {
-    if (rec->is_arena_memory_record()) {
-      tty->print_cr(" (arena size)");
-    } else if (rec->is_allocation_record()) {
-      tty->print_cr(" (malloc)");
-    } else {
-      tty->print_cr(" (free)");
-    }
-  }
-  if (MemTracker::track_callsite()) {
-    char buf[1024];
-    address pc = ((MemPointerRecordEx*)rec)->pc();
-    if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
-      tty->print_cr("\tfrom %s", buf);
-    } else {
-      tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
-    }
-  }
-}
-
-void decode_vm_region_record(VMMemRegion* rec) {
-  tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(),
-    rec->addr() + rec->size());
-  tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
-  if (rec->is_allocation_record()) {
-    tty->print_cr(" (reserved)");
-  } else if (rec->is_commit_record()) {
-    tty->print_cr(" (committed)");
-  } else {
-    ShouldNotReachHere();
-  }
-  if (MemTracker::track_callsite()) {
-    char buf[1024];
-    address pc = ((VMMemRegionEx*)rec)->pc();
-    if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
-      tty->print_cr("\tfrom %s", buf);
-    } else {
-      tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
-    }
-
-  }
-}
-
-#endif
-
-
-bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
-  VMMemRegionEx new_rec;
-  assert(rec->is_allocation_record() || rec->is_commit_record(),
-    "Sanity check");
-  if (MemTracker::track_callsite()) {
-    new_rec.init((MemPointerRecordEx*)rec);
-  } else {
-    new_rec.init(rec);
-  }
-  return insert(&new_rec);
-}
-
-bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
-  VMMemRegionEx new_rec;
-  assert(rec->is_allocation_record() || rec->is_commit_record(),
-    "Sanity check");
-  if (MemTracker::track_callsite()) {
-    new_rec.init((MemPointerRecordEx*)rec);
-  } else {
-    new_rec.init(rec);
-  }
-  return insert_after(&new_rec);
-}
-
-// we don't consolidate reserved regions, since they may be categorized
-// in different types.
-bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
-  assert(rec->is_allocation_record(), "Sanity check");
-  VMMemRegion* reserved_region = (VMMemRegion*)current();
-
-  // we don't have anything yet
-  if (reserved_region == NULL) {
-    return insert_record(rec);
-  }
-
-  assert(reserved_region->is_reserved_region(), "Sanity check");
-  // duplicated records
-  if (reserved_region->is_same_region(rec)) {
-    return true;
-  }
-  // Overlapping stack regions indicate that a JNI thread failed to
-  // detach from the VM before exiting. This leaks the JavaThread object.
-  if (CheckJNICalls)  {
-      guarantee(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) != mtThreadStack ||
-         !reserved_region->overlaps_region(rec),
-         "Attached JNI thread exited without being detached");
-  }
-  // otherwise, we should not have overlapping reserved regions
-  assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
-    reserved_region->base() > rec->addr(), "Just check: locate()");
-  assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
-    !reserved_region->overlaps_region(rec), "overlapping reserved regions");
-
-  return insert_record(rec);
-}
-
-// we do consolidate committed regions
-bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
-  assert(rec->is_commit_record(), "Sanity check");
-  VMMemRegion* reserved_rgn = (VMMemRegion*)current();
-  assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec),
-    "Sanity check");
-
-  // thread's native stack is always marked as "committed", ignore
-  // the "commit" operation for creating stack guard pages
-  if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack &&
-      FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
-    return true;
-  }
-
-  // if the reserved region has any committed regions
-  VMMemRegion* committed_rgn  = (VMMemRegion*)next();
-  while (committed_rgn != NULL && committed_rgn->is_committed_region()) {
-    // duplicated commit records
-    if(committed_rgn->contains_region(rec)) {
-      return true;
-    } else if (committed_rgn->overlaps_region(rec)) {
-      // overlaps front part
-      if (rec->addr() < committed_rgn->addr()) {
-        committed_rgn->expand_region(rec->addr(),
-          committed_rgn->addr() - rec->addr());
-      } else {
-        // overlaps tail part
-        address committed_rgn_end = committed_rgn->addr() +
-              committed_rgn->size();
-        assert(committed_rgn_end < rec->addr() + rec->size(),
-             "overlap tail part");
-        committed_rgn->expand_region(committed_rgn_end,
-          (rec->addr() + rec->size()) - committed_rgn_end);
-      }
-    } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) {
-      // adjunct each other
-      committed_rgn->expand_region(rec->addr(), rec->size());
-      VMMemRegion* next_reg = (VMMemRegion*)next();
-      // see if we can consolidate next committed region
-      if (next_reg != NULL && next_reg->is_committed_region() &&
-        next_reg->base() == committed_rgn->base() + committed_rgn->size()) {
-          committed_rgn->expand_region(next_reg->base(), next_reg->size());
-          // delete merged region
-          remove();
-      }
-      return true;
-    } else if (committed_rgn->base() > rec->addr()) {
-      // found the location, insert this committed region
-      return insert_record(rec);
-    }
-    committed_rgn = (VMMemRegion*)next();
-  }
-  return insert_record(rec);
-}
-
-bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
-  assert(rec->is_uncommit_record(), "sanity check");
-  VMMemRegion* cur;
-  cur = (VMMemRegion*)current();
-  assert(cur->is_reserved_region() && cur->contains_region(rec),
-    "Sanity check");
-  // thread's native stack is always marked as "committed", ignore
-  // the "commit" operation for creating stack guard pages
-  if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
-      FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
-    return true;
-  }
-
-  cur = (VMMemRegion*)next();
-  while (cur != NULL && cur->is_committed_region()) {
-    // region already uncommitted, must be due to duplicated record
-    if (cur->addr() >= rec->addr() + rec->size()) {
-      break;
-    } else if (cur->contains_region(rec)) {
-      // uncommit whole region
-      if (cur->is_same_region(rec)) {
-        remove();
-        break;
-      } else if (rec->addr() == cur->addr() ||
-        rec->addr() + rec->size() == cur->addr() + cur->size()) {
-        // uncommitted from either end of current memory region.
-        cur->exclude_region(rec->addr(), rec->size());
-        break;
-      } else { // split the committed region and release the middle
-        address high_addr = cur->addr() + cur->size();
-        size_t sz = high_addr - rec->addr();
-        cur->exclude_region(rec->addr(), sz);
-        sz = high_addr - (rec->addr() + rec->size());
-        if (MemTracker::track_callsite()) {
-          MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
-             ((VMMemRegionEx*)cur)->pc());
-          return insert_record_after(&tmp);
-        } else {
-          MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
-          return insert_record_after(&tmp);
-        }
-      }
-    }
-    cur = (VMMemRegion*)next();
-  }
-
-  // we may not find committed record due to duplicated records
-  return true;
-}
-
-bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
-  assert(rec->is_deallocation_record(), "Sanity check");
-  VMMemRegion* cur = (VMMemRegion*)current();
-  assert(cur->is_reserved_region() && cur->contains_region(rec),
-    "Sanity check");
-  if (rec->is_same_region(cur)) {
-
-    // In snapshot, the virtual memory records are sorted in following orders:
-    // 1. virtual memory's base address
-    // 2. virtual memory reservation record, followed by commit records within this reservation.
-    //    The commit records are also in base address order.
-    // When a reserved region is released, we want to remove the reservation record and all
-    // commit records following it.
-#ifdef ASSERT
-    address low_addr = cur->addr();
-    address high_addr = low_addr + cur->size();
-#endif
-    // remove virtual memory reservation record
-    remove();
-    // remove committed regions within above reservation
-    VMMemRegion* next_region = (VMMemRegion*)current();
-    while (next_region != NULL && next_region->is_committed_region()) {
-      assert(next_region->addr() >= low_addr &&
-             next_region->addr() + next_region->size() <= high_addr,
-            "Range check");
-      remove();
-      next_region = (VMMemRegion*)current();
-    }
-  } else if (rec->addr() == cur->addr() ||
-    rec->addr() + rec->size() == cur->addr() + cur->size()) {
-    // released region is at either end of this region
-    cur->exclude_region(rec->addr(), rec->size());
-    assert(check_reserved_region(), "Integrity check");
-  } else { // split the reserved region and release the middle
-    address high_addr = cur->addr() + cur->size();
-    size_t sz = high_addr - rec->addr();
-    cur->exclude_region(rec->addr(), sz);
-    sz = high_addr - rec->addr() - rec->size();
-    if (MemTracker::track_callsite()) {
-      MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
-        ((VMMemRegionEx*)cur)->pc());
-      bool ret = insert_reserved_region(&tmp);
-      assert(!ret || check_reserved_region(), "Integrity check");
-      return ret;
-    } else {
-      MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
-      bool ret = insert_reserved_region(&tmp);
-      assert(!ret || check_reserved_region(), "Integrity check");
-      return ret;
-    }
-  }
-  return true;
-}
-
-bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
-  // skip all 'commit' records associated with previous reserved region
-  VMMemRegion* p = (VMMemRegion*)next();
-  while (p != NULL && p->is_committed_region() &&
-         p->base() + p->size() < rec->addr()) {
-    p = (VMMemRegion*)next();
-  }
-  return insert_record(rec);
-}
-
-bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
-  assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
-  address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
-  if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
-    size_t sz = rgn->size() - new_rgn_size;
-    // the original region becomes 'new' region
-    rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
-     // remaining becomes next region
-    MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
-    return insert_reserved_region(&next_rgn);
-  } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
-    rgn->exclude_region(new_rgn_addr, new_rgn_size);
-    MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
-    return insert_reserved_region(&next_rgn);
-  } else {
-    // the orginal region will be split into three
-    address rgn_high_addr = rgn->base() + rgn->size();
-    // first region
-    rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
-    // the second region is the new region
-    MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
-    if (!insert_reserved_region(&new_rgn)) return false;
-    // the remaining region
-    MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
-      rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
-    return insert_reserved_region(&rem_rgn);
-  }
-}
-
-static int sort_in_seq_order(const void* p1, const void* p2) {
-  assert(p1 != NULL && p2 != NULL, "Sanity check");
-  const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
-  const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
-  return (mp1->seq() - mp2->seq());
-}
-
-bool StagingArea::init() {
-  if (MemTracker::track_callsite()) {
-    _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
-    _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
-  } else {
-    _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
-    _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
-  }
-
-  if (_malloc_data != NULL && _vm_data != NULL &&
-      !_malloc_data->out_of_memory() &&
-      !_vm_data->out_of_memory()) {
-    return true;
-  } else {
-    if (_malloc_data != NULL) delete _malloc_data;
-    if (_vm_data != NULL) delete _vm_data;
-    _malloc_data = NULL;
-    _vm_data = NULL;
-    return false;
-  }
-}
-
-
-VMRecordIterator StagingArea::virtual_memory_record_walker() {
-  MemPointerArray* arr = vm_data();
-  // sort into seq number order
-  arr->sort((FN_SORT)sort_in_seq_order);
-  return VMRecordIterator(arr);
-}
-
-
-MemSnapshot::MemSnapshot() {
-  if (MemTracker::track_callsite()) {
-    _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
-    _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
-  } else {
-    _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
-    _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
-  }
-
-  _staging_area.init();
-  _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
-  NOT_PRODUCT(_untracked_count = 0;)
-  _number_of_classes = 0;
-}
-
-MemSnapshot::~MemSnapshot() {
-  assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
-  {
-    MutexLockerEx locker(_lock);
-    if (_alloc_ptrs != NULL) {
-      delete _alloc_ptrs;
-      _alloc_ptrs = NULL;
-    }
-
-    if (_vm_ptrs != NULL) {
-      delete _vm_ptrs;
-      _vm_ptrs = NULL;
-    }
-  }
-
-  if (_lock != NULL) {
-    delete _lock;
-    _lock = NULL;
-  }
-}
-
-
-void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
-  assert(dest != NULL && src != NULL, "Just check");
-  assert(dest->addr() == src->addr(), "Just check");
-  assert(dest->seq() > 0 && src->seq() > 0, "not sequenced");
-
-  if (MemTracker::track_callsite()) {
-    *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src;
-  } else {
-    *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src;
-  }
-}
-
-void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) {
-  assert(src != NULL && dest != NULL, "Just check");
-  assert(dest->seq() == 0 && src->seq() >0, "cast away sequence");
-
-  if (MemTracker::track_callsite()) {
-    *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
-  } else {
-    *(MemPointerRecord*)dest = *(MemPointerRecord*)src;
-  }
-}
-
-// merge a recorder to the staging area
-bool MemSnapshot::merge(MemRecorder* rec) {
-  assert(rec != NULL && !rec->out_of_memory(), "Just check");
-
-  SequencedRecordIterator itr(rec->pointer_itr());
-
-  MutexLockerEx lock(_lock, true);
-  MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
-  MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current();
-  MemPointerRecord* matched_rec;
-
-  while (incoming_rec != NULL) {
-    if (incoming_rec->is_vm_pointer()) {
-      // we don't do anything with virtual memory records during merge
-      if (!_staging_area.vm_data()->append(incoming_rec)) {
-        return false;
-      }
-    } else {
-      // locate matched record and/or also position the iterator to proper
-      // location for this incoming record.
-      matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr());
-      // we have not seen this memory block in this generation,
-      // so just add to staging area
-      if (matched_rec == NULL) {
-        if (!malloc_staging_itr.insert(incoming_rec)) {
-          return false;
-        }
-      } else if (incoming_rec->addr() == matched_rec->addr()) {
-        // whoever has higher sequence number wins
-        if (incoming_rec->seq() > matched_rec->seq()) {
-          copy_seq_pointer(matched_rec, incoming_rec);
-        }
-      } else if (incoming_rec->addr() < matched_rec->addr()) {
-        if (!malloc_staging_itr.insert(incoming_rec)) {
-          return false;
-        }
-      } else {
-        ShouldNotReachHere();
-      }
-    }
-    incoming_rec = (MemPointerRecord*)itr.next();
-  }
-  NOT_PRODUCT(void check_staging_data();)
-  return true;
-}
-
-
-// promote data to next generation
-bool MemSnapshot::promote(int number_of_classes) {
-  assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
-  assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
-         "Just check");
-  MutexLockerEx lock(_lock, true);
-
-  MallocRecordIterator  malloc_itr = _staging_area.malloc_record_walker();
-  bool promoted = false;
-  if (promote_malloc_records(&malloc_itr)) {
-    VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
-    if (promote_virtual_memory_records(&vm_itr)) {
-      promoted = true;
-    }
-  }
-
-  NOT_PRODUCT(check_malloc_pointers();)
-  _staging_area.clear();
-  _number_of_classes = number_of_classes;
-  return promoted;
-}
-
-bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
-  MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
-  MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
-  MemPointerRecord* matched_rec;
-  while (new_rec != NULL) {
-    matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
-    // found matched memory block
-    if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
-      // snapshot already contains 'live' records
-      assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(),
-             "Sanity check");
-      // update block states
-      if (new_rec->is_allocation_record()) {
-        assign_pointer(matched_rec, new_rec);
-      } else if (new_rec->is_arena_memory_record()) {
-        if (new_rec->size() == 0) {
-          // remove size record once size drops to 0
-          malloc_snapshot_itr.remove();
-        } else {
-          assign_pointer(matched_rec, new_rec);
-        }
-      } else {
-        // a deallocation record
-        assert(new_rec->is_deallocation_record(), "Sanity check");
-        // an arena record can be followed by a size record, we need to remove both
-        if (matched_rec->is_arena_record()) {
-          MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
-          if (next != NULL && next->is_arena_memory_record() &&
-              next->is_memory_record_of_arena(matched_rec)) {
-            malloc_snapshot_itr.remove();
-          }
-        }
-        // the memory is deallocated, remove related record(s)
-        malloc_snapshot_itr.remove();
-      }
-    } else {
-      // don't insert size 0 record
-      if (new_rec->is_arena_memory_record() && new_rec->size() == 0) {
-        new_rec = NULL;
-      }
-
-      if (new_rec != NULL) {
-        if  (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) {
-          if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
-            if (!malloc_snapshot_itr.insert_after(new_rec)) {
-              return false;
-            }
-          } else {
-            if (!malloc_snapshot_itr.insert(new_rec)) {
-              return false;
-            }
-          }
-        }
-#ifndef PRODUCT
-        else if (!has_allocation_record(new_rec->addr())) {
-          // NMT can not track some startup memory, which is allocated before NMT is on
-          _untracked_count ++;
-        }
-#endif
-      }
-    }
-    new_rec = (MemPointerRecord*)itr->next();
-  }
-  return true;
-}
-
-bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
-  VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
-  MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
-  VMMemRegion*  reserved_rec;
-  while (new_rec != NULL) {
-    assert(new_rec->is_vm_pointer(), "Sanity check");
-
-    // locate a reserved region that contains the specified address, or
-    // the nearest reserved region has base address just above the specified
-    // address
-    reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
-    if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
-      // snapshot can only have 'live' records
-      assert(reserved_rec->is_reserved_region(), "Sanity check");
-      if (new_rec->is_allocation_record()) {
-        if (!reserved_rec->is_same_region(new_rec)) {
-          // only deal with split a bigger reserved region into smaller regions.
-          // So far, CDS is the only use case.
-          if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
-            return false;
-          }
-        }
-      } else if (new_rec->is_uncommit_record()) {
-        if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
-          return false;
-        }
-      } else if (new_rec->is_commit_record()) {
-        // insert or expand existing committed region to cover this
-        // newly committed region
-        if (!vm_snapshot_itr.add_committed_region(new_rec)) {
-          return false;
-        }
-      } else if (new_rec->is_deallocation_record()) {
-        // release part or all memory region
-        if (!vm_snapshot_itr.remove_released_region(new_rec)) {
-          return false;
-        }
-      } else if (new_rec->is_type_tagging_record()) {
-        // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
-        // to different type.
-        assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
-               FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
-               "Sanity check");
-        reserved_rec->tag(new_rec->flags());
-    } else {
-        ShouldNotReachHere();
-          }
-        } else {
-      /*
-       * The assertion failure indicates mis-matched virtual memory records. The likely
-       * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
-       * api, which have to be tracked manually. (perfMemory is an example).
-      */
-      assert(new_rec->is_allocation_record(), "Sanity check");
-      if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
-            return false;
-          }
-  }
-    new_rec = (MemPointerRecord*)itr->next();
-  }
-  return true;
-}
-
-#ifndef PRODUCT
-void MemSnapshot::print_snapshot_stats(outputStream* st) {
-  st->print_cr("Snapshot:");
-  st->print_cr("\tMalloced: %d/%d [%5.2f%%]  %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
-    (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
-
-  st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
-    (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
-
-  st->print_cr("\tMalloc staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
-    _staging_area.malloc_data()->capacity(),
-    (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
-    _staging_area.malloc_data()->instance_size()/K);
-
-  st->print_cr("\tVirtual memory staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
-    _staging_area.vm_data()->capacity(),
-    (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
-    _staging_area.vm_data()->instance_size()/K);
-
-  st->print_cr("\tUntracked allocation: %d", _untracked_count);
-}
-
-void MemSnapshot::check_malloc_pointers() {
-  MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
-  MemPointerRecord* p = (MemPointerRecord*)mItr.current();
-  MemPointerRecord* prev = NULL;
-  while (p != NULL) {
-    if (prev != NULL) {
-      assert(p->addr() >= prev->addr(), "sorting order");
-    }
-    prev = p;
-    p = (MemPointerRecord*)mItr.next();
-  }
-}
-
-bool MemSnapshot::has_allocation_record(address addr) {
-  MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
-  MemPointerRecord* cur = (MemPointerRecord*)itr.current();
-  while (cur != NULL) {
-    if (cur->addr() == addr && cur->is_allocation_record()) {
-      return true;
-    }
-    cur = (MemPointerRecord*)itr.next();
-  }
-  return false;
-}
-#endif // PRODUCT
-
-#ifdef ASSERT
-void MemSnapshot::check_staging_data() {
-  MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
-  MemPointerRecord* cur = (MemPointerRecord*)itr.current();
-  MemPointerRecord* next = (MemPointerRecord*)itr.next();
-  while (next != NULL) {
-    assert((next->addr() > cur->addr()) ||
-      ((next->flags() & MemPointerRecord::tag_masks) >
-       (cur->flags() & MemPointerRecord::tag_masks)),
-       "sorting order");
-    cur = next;
-    next = (MemPointerRecord*)itr.next();
-  }
-
-  MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
-  cur = (MemPointerRecord*)vm_itr.current();
-  while (cur != NULL) {
-    assert(cur->is_vm_pointer(), "virtual memory pointer only");
-    cur = (MemPointerRecord*)vm_itr.next();
-  }
-}
-
-void MemSnapshot::dump_all_vm_pointers() {
-  MemPointerArrayIteratorImpl itr(_vm_ptrs);
-  VMMemRegion* ptr = (VMMemRegion*)itr.current();
-  tty->print_cr("dump virtual memory pointers:");
-  while (ptr != NULL) {
-    if (ptr->is_committed_region()) {
-      tty->print("\t");
-    }
-    tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
-      (ptr->addr() + ptr->size()), ptr->flags());
-
-    if (MemTracker::track_callsite()) {
-      VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
-      if (ex->pc() != NULL) {
-        char buf[1024];
-        if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
-          tty->print_cr("\t%s", buf);
-        } else {
-          tty->cr();
-        }
-      }
-    }
-
-    ptr = (VMMemRegion*)itr.next();
-  }
-  tty->flush();
-}
-#endif // ASSERT
-
--- a/src/share/vm/services/memSnapshot.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,408 +0,0 @@
-/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
-#define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
-
-#include "memory/allocation.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "services/memBaseline.hpp"
-#include "services/memPtrArray.hpp"
-
-// Snapshot pointer array iterator
-
-// The pointer array contains malloc-ed pointers
-class MemPointerIterator : public MemPointerArrayIteratorImpl {
- public:
-  MemPointerIterator(MemPointerArray* arr):
-    MemPointerArrayIteratorImpl(arr) {
-    assert(arr != NULL, "null array");
-  }
-
-#ifdef ASSERT
-  virtual bool is_dup_pointer(const MemPointer* ptr1,
-    const MemPointer* ptr2) const {
-    MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
-    MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
-
-    if (p1->addr() != p2->addr()) return false;
-    if ((p1->flags() & MemPointerRecord::tag_masks) !=
-        (p2->flags() & MemPointerRecord::tag_masks)) {
-      return false;
-    }
-    // we do see multiple commit/uncommit on the same memory, it is ok
-    return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
-           (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
-  }
-
-  virtual bool insert(MemPointer* ptr) {
-    if (_pos > 0) {
-      MemPointer* p1 = (MemPointer*)ptr;
-      MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
-      assert(!is_dup_pointer(p1, p2),
-        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
-    }
-     if (_pos < _array->length() -1) {
-      MemPointer* p1 = (MemPointer*)ptr;
-      MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
-      assert(!is_dup_pointer(p1, p2),
-        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
-     }
-    return _array->insert_at(ptr, _pos);
-  }
-
-  virtual bool insert_after(MemPointer* ptr) {
-    if (_pos > 0) {
-      MemPointer* p1 = (MemPointer*)ptr;
-      MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
-      assert(!is_dup_pointer(p1, p2),
-        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
-    }
-    if (_pos < _array->length() - 1) {
-      MemPointer* p1 = (MemPointer*)ptr;
-      MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
-
-      assert(!is_dup_pointer(p1, p2),
-        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
-     }
-    if (_array->insert_at(ptr, _pos + 1)) {
-      _pos ++;
-      return true;
-    }
-    return false;
-  }
-#endif
-
-  virtual MemPointer* locate(address addr) {
-    MemPointer* cur = current();
-    while (cur != NULL && cur->addr() < addr) {
-      cur = next();
-    }
-    return cur;
-  }
-};
-
-class VMMemPointerIterator : public MemPointerIterator {
- public:
-  VMMemPointerIterator(MemPointerArray* arr):
-      MemPointerIterator(arr) {
-  }
-
-  // locate an existing reserved memory region that contains specified address,
-  // or the reserved region just above this address, where the incoming
-  // reserved region should be inserted.
-  virtual MemPointer* locate(address addr) {
-    reset();
-    VMMemRegion* reg = (VMMemRegion*)current();
-    while (reg != NULL) {
-      if (reg->is_reserved_region()) {
-        if (reg->contains_address(addr) || addr < reg->base()) {
-          return reg;
-      }
-    }
-      reg = (VMMemRegion*)next();
-    }
-      return NULL;
-    }
-
-  // following methods update virtual memory in the context
-  // of 'current' position, which is properly positioned by
-  // callers via locate method.
-  bool add_reserved_region(MemPointerRecord* rec);
-  bool add_committed_region(MemPointerRecord* rec);
-  bool remove_uncommitted_region(MemPointerRecord* rec);
-  bool remove_released_region(MemPointerRecord* rec);
-
-  // split a reserved region to create a new memory region with specified base and size
-  bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size);
- private:
-  bool insert_record(MemPointerRecord* rec);
-  bool insert_record_after(MemPointerRecord* rec);
-
-  bool insert_reserved_region(MemPointerRecord* rec);
-
-  // reset current position
-  inline void reset() { _pos = 0; }
-#ifdef ASSERT
-  // check integrity of records on current reserved memory region.
-  bool check_reserved_region() {
-    VMMemRegion* reserved_region = (VMMemRegion*)current();
-    assert(reserved_region != NULL && reserved_region->is_reserved_region(),
-          "Sanity check");
-    // all committed regions that follow current reserved region, should all
-    // belong to the reserved region.
-    VMMemRegion* next_region = (VMMemRegion*)next();
-    for (; next_region != NULL && next_region->is_committed_region();
-         next_region = (VMMemRegion*)next() ) {
-      if(!reserved_region->contains_region(next_region)) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  virtual bool is_dup_pointer(const MemPointer* ptr1,
-    const MemPointer* ptr2) const {
-    VMMemRegion* p1 = (VMMemRegion*)ptr1;
-    VMMemRegion* p2 = (VMMemRegion*)ptr2;
-
-    if (p1->addr() != p2->addr()) return false;
-    if ((p1->flags() & MemPointerRecord::tag_masks) !=
-        (p2->flags() & MemPointerRecord::tag_masks)) {
-      return false;
-    }
-    // we do see multiple commit/uncommit on the same memory, it is ok
-    return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
-           (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
-  }
-#endif
-};
-
-class MallocRecordIterator : public MemPointerArrayIterator {
- private:
-  MemPointerArrayIteratorImpl  _itr;
-
-
-
- public:
-  MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
-  }
-
-  virtual MemPointer* current() const {
-#ifdef ASSERT
-    MemPointer* cur_rec = _itr.current();
-    if (cur_rec != NULL) {
-      MemPointer* prev_rec = _itr.peek_prev();
-      MemPointer* next_rec = _itr.peek_next();
-      assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
-      assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
-    }
-#endif
-    return _itr.current();
-  }
-  virtual MemPointer* next() {
-    MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
-    // arena memory record is a special case, which we have to compare
-    // sequence number against its associated arena record.
-    if (next_rec != NULL && next_rec->is_arena_memory_record()) {
-      MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
-      // if there is an associated arena record, it has to be previous
-      // record because of sorting order (by address) - NMT generates a pseudo address
-      // for arena's size record by offsetting arena's address, that guarantees
-      // the order of arena record and it's size record.
-      if (prev_rec != NULL && prev_rec->is_arena_record() &&
-        next_rec->is_memory_record_of_arena(prev_rec)) {
-        if (prev_rec->seq() > next_rec->seq()) {
-          // Skip this arena memory record
-          // Two scenarios:
-          //   - if the arena record is an allocation record, this early
-          //     size record must be leftover by previous arena,
-          //     and the last size record should have size = 0.
-          //   - if the arena record is a deallocation record, this
-          //     size record should be its cleanup record, which should
-          //     also have size = 0. In other world, arena alway reset
-          //     its size before gone (see Arena's destructor)
-          assert(next_rec->size() == 0, "size not reset");
-          return _itr.next();
-        } else {
-          assert(prev_rec->is_allocation_record(),
-            "Arena size record ahead of allocation record");
-        }
-      }
-    }
-    return next_rec;
-  }
-
-  MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
-  MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
-  void remove()                      { ShouldNotReachHere(); }
-  bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
-  bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
-};
-
-// collapse duplicated records. Eliminating duplicated records here, is much
-// cheaper than during promotion phase. However, it does have limitation - it
-// can only eliminate duplicated records within the generation, there are
-// still chances seeing duplicated records during promotion.
-// We want to use the record with higher sequence number, because it has
-// more accurate callsite pc.
-class VMRecordIterator : public MemPointerArrayIterator {
- private:
-  MemPointerArrayIteratorImpl  _itr;
-
- public:
-  VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
-    MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
-    MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
-    while (next != NULL) {
-      assert(cur != NULL, "Sanity check");
-      assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
-        "pre-sort order");
-
-      if (is_duplicated_record(cur, next)) {
-        _itr.next();
-        next = (MemPointerRecord*)_itr.peek_next();
-      } else {
-        break;
-      }
-    }
-  }
-
-  virtual MemPointer* current() const {
-    return _itr.current();
-  }
-
-  // get next record, but skip the duplicated records
-  virtual MemPointer* next() {
-    MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
-    MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
-    while (next != NULL) {
-      assert(cur != NULL, "Sanity check");
-      assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
-        "pre-sort order");
-
-      if (is_duplicated_record(cur, next)) {
-        _itr.next();
-        cur = next;
-        next = (MemPointerRecord*)_itr.peek_next();
-      } else {
-        break;
-      }
-    }
-    return cur;
-  }
-
-  MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
-  MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
-  void remove()                      { ShouldNotReachHere(); }
-  bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
-  bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
-
- private:
-  bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
-    bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
-    assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
-    return ret;
-  }
-};
-
-class StagingArea VALUE_OBJ_CLASS_SPEC {
- private:
-  MemPointerArray*   _malloc_data;
-  MemPointerArray*   _vm_data;
-
- public:
-  StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
-    init();
-  }
-
-  ~StagingArea() {
-    if (_malloc_data != NULL) delete _malloc_data;
-    if (_vm_data != NULL) delete _vm_data;
-  }
-
-  MallocRecordIterator malloc_record_walker() {
-    return MallocRecordIterator(malloc_data());
-  }
-
-  VMRecordIterator virtual_memory_record_walker();
-
-  bool init();
-  void clear() {
-    assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
-    _malloc_data->shrink();
-    _malloc_data->clear();
-    _vm_data->clear();
-  }
-
-  inline MemPointerArray* malloc_data() { return _malloc_data; }
-  inline MemPointerArray* vm_data()     { return _vm_data; }
-};
-
-class MemBaseline;
-class MemSnapshot : public CHeapObj<mtNMT> {
- private:
-  // the following two arrays contain records of all known lived memory blocks
-  // live malloc-ed memory pointers
-  MemPointerArray*      _alloc_ptrs;
-  // live virtual memory pointers
-  MemPointerArray*      _vm_ptrs;
-
-  StagingArea           _staging_area;
-
-  // the lock to protect this snapshot
-  Monitor*              _lock;
-
-  // the number of instance classes
-  int                   _number_of_classes;
-
-  NOT_PRODUCT(size_t    _untracked_count;)
-  friend class MemBaseline;
-
- public:
-  MemSnapshot();
-  virtual ~MemSnapshot();
-
-  // if we are running out of native memory
-  bool out_of_memory() {
-    return (_alloc_ptrs == NULL ||
-      _staging_area.malloc_data() == NULL ||
-      _staging_area.vm_data() == NULL ||
-      _vm_ptrs == NULL || _lock == NULL ||
-      _alloc_ptrs->out_of_memory() ||
-      _vm_ptrs->out_of_memory());
-  }
-
-  // merge a per-thread memory recorder into staging area
-  bool merge(MemRecorder* rec);
-  // promote staged data to snapshot
-  bool promote(int number_of_classes);
-
-  int  number_of_classes() const { return _number_of_classes; }
-
-  void wait(long timeout) {
-    assert(_lock != NULL, "Just check");
-    MonitorLockerEx locker(_lock);
-    locker.wait(true, timeout);
-  }
-
-  NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
-  NOT_PRODUCT(void check_staging_data();)
-  NOT_PRODUCT(void check_malloc_pointers();)
-  NOT_PRODUCT(bool has_allocation_record(address addr);)
-  // dump all virtual memory pointers in snapshot
-  DEBUG_ONLY( void dump_all_vm_pointers();)
-
- private:
-   // copy sequenced pointer from src to dest
-   void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
-   // assign a sequenced pointer to non-sequenced pointer
-   void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
-
-   bool promote_malloc_records(MemPointerArrayIterator* itr);
-   bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
-};
-
-#endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
--- a/src/share/vm/services/memTrackWorker.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,212 +0,0 @@
-/*
- * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/threadCritical.hpp"
-#include "services/memTracker.hpp"
-#include "services/memTrackWorker.hpp"
-#include "utilities/decoder.hpp"
-#include "utilities/vmError.hpp"
-
-
-void GenerationData::reset() {
-  _number_of_classes = 0;
-  while (_recorder_list != NULL) {
-    MemRecorder* tmp = _recorder_list;
-    _recorder_list = _recorder_list->next();
-    MemTracker::release_thread_recorder(tmp);
-  }
-}
-
-MemTrackWorker::MemTrackWorker(MemSnapshot* snapshot): _snapshot(snapshot) {
-  // create thread uses cgc thread type for now. We should revisit
-  // the option, or create new thread type.
-  _has_error = !os::create_thread(this, os::cgc_thread);
-  set_name("MemTrackWorker");
-
-  // initial generation circuit buffer
-  if (!has_error()) {
-    _head = _tail = 0;
-    for(int index = 0; index < MAX_GENERATIONS; index ++) {
-      ::new ((void*)&_gen[index]) GenerationData();
-    }
-  }
-  NOT_PRODUCT(_sync_point_count = 0;)
-  NOT_PRODUCT(_merge_count = 0;)
-  NOT_PRODUCT(_last_gen_in_use = 0;)
-}
-
-MemTrackWorker::~MemTrackWorker() {
-  for (int index = 0; index < MAX_GENERATIONS; index ++) {
-    _gen[index].reset();
-  }
-}
-
-void* MemTrackWorker::operator new(size_t size) throw() {
-  assert(false, "use nothrow version");
-  return NULL;
-}
-
-void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
-  return allocate(size, false, mtNMT);
-}
-
-void MemTrackWorker::start() {
-  os::start_thread(this);
-}
-
-/*
- * Native memory tracking worker thread loop:
- *   1. merge one generation of memory recorders to staging area
- *   2. promote staging data to memory snapshot
- *
- * This thread can run through safepoint.
- */
-
-void MemTrackWorker::run() {
-  assert(MemTracker::is_on(), "native memory tracking is off");
-  this->initialize_thread_local_storage();
-  this->record_stack_base_and_size();
-  assert(_snapshot != NULL, "Worker should not be started");
-  MemRecorder* rec;
-  unsigned long processing_generation = 0;
-  bool          worker_idle = false;
-
-  while (!MemTracker::shutdown_in_progress()) {
-    NOT_PRODUCT(_last_gen_in_use = generations_in_use();)
-    {
-      // take a recorder from earliest generation in buffer
-      ThreadCritical tc;
-      rec = _gen[_head].next_recorder();
-    }
-    if (rec != NULL) {
-      if (rec->get_generation() != processing_generation || worker_idle) {
-        processing_generation = rec->get_generation();
-        worker_idle = false;
-        MemTracker::set_current_processing_generation(processing_generation);
-      }
-
-      // merge the recorder into staging area
-      if (!_snapshot->merge(rec)) {
-        MemTracker::shutdown(MemTracker::NMT_out_of_memory);
-      } else {
-        NOT_PRODUCT(_merge_count ++;)
-      }
-      MemTracker::release_thread_recorder(rec);
-    } else {
-      // no more recorder to merge, promote staging area
-      // to snapshot
-      if (_head != _tail) {
-        long number_of_classes;
-        {
-          ThreadCritical tc;
-          if (_gen[_head].has_more_recorder() || _head == _tail) {
-            continue;
-          }
-          number_of_classes = _gen[_head].number_of_classes();
-          _gen[_head].reset();
-
-          // done with this generation, increment _head pointer
-          _head = (_head + 1) % MAX_GENERATIONS;
-        }
-        // promote this generation data to snapshot
-        if (!_snapshot->promote(number_of_classes)) {
-          // failed to promote, means out of memory
-          MemTracker::shutdown(MemTracker::NMT_out_of_memory);
-        }
-      } else {
-        // worker thread is idle
-        worker_idle = true;
-        MemTracker::report_worker_idle();
-        _snapshot->wait(1000);
-        ThreadCritical tc;
-        // check if more data arrived
-        if (!_gen[_head].has_more_recorder()) {
-          _gen[_head].add_recorders(MemTracker::get_pending_recorders());
-        }
-      }
-    }
-  }
-  assert(MemTracker::shutdown_in_progress(), "just check");
-
-  // transits to final shutdown
-  MemTracker::final_shutdown();
-}
-
-// at synchronization point, where 'safepoint visible' Java threads are blocked
-// at a safepoint, and the rest of threads are blocked on ThreadCritical lock.
-// The caller MemTracker::sync() already takes ThreadCritical before calling this
-// method.
-//
-// Following tasks are performed:
-//   1. add all recorders in pending queue to current generation
-//   2. increase generation
-
-void MemTrackWorker::at_sync_point(MemRecorder* rec, int number_of_classes) {
-  NOT_PRODUCT(_sync_point_count ++;)
-  assert(count_recorder(rec) <= MemRecorder::_instance_count,
-    "pending queue has infinite loop");
-
-  bool out_of_generation_buffer = false;
-  // check shutdown state inside ThreadCritical
-  if (MemTracker::shutdown_in_progress()) return;
-
-  _gen[_tail].set_number_of_classes(number_of_classes);
-  // append the recorders to the end of the generation
-  _gen[_tail].add_recorders(rec);
-  assert(count_recorder(_gen[_tail].peek()) <= MemRecorder::_instance_count,
-    "after add to current generation has infinite loop");
-  // we have collected all recorders for this generation. If there is data,
-  // we need to increment _tail to start a new generation.
-  if (_gen[_tail].has_more_recorder()  || _head == _tail) {
-    _tail = (_tail + 1) % MAX_GENERATIONS;
-    out_of_generation_buffer = (_tail == _head);
-  }
-
-  if (out_of_generation_buffer) {
-    MemTracker::shutdown(MemTracker::NMT_out_of_generation);
-  }
-}
-
-#ifndef PRODUCT
-int MemTrackWorker::count_recorder(const MemRecorder* head) {
-  int count = 0;
-  while(head != NULL) {
-    count ++;
-    head = head->next();
-  }
-  return count;
-}
-
-int MemTrackWorker::count_pending_recorders() const {
-  int count = 0;
-  for (int index = 0; index < MAX_GENERATIONS; index ++) {
-    MemRecorder* head = _gen[index].peek();
-    if (head != NULL) {
-      count += count_recorder(head);
-    }
-  }
-  return count;
-}
-#endif
--- a/src/share/vm/services/memTrackWorker.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
-#define SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
-
-#include "memory/allocation.hpp"
-#include "runtime/thread.hpp"
-#include "services/memRecorder.hpp"
-
-// Maximum MAX_GENERATIONS generation data can be tracked.
-#define MAX_GENERATIONS  512
-
-class GenerationData VALUE_OBJ_CLASS_SPEC {
- private:
-  int           _number_of_classes;
-  MemRecorder*  _recorder_list;
-
- public:
-  GenerationData(): _number_of_classes(0), _recorder_list(NULL) { }
-
-  inline int  number_of_classes() const { return _number_of_classes; }
-  inline void set_number_of_classes(long num) { _number_of_classes = num; }
-
-  inline MemRecorder* next_recorder() {
-    if (_recorder_list == NULL) {
-      return NULL;
-    } else {
-      MemRecorder* tmp = _recorder_list;
-      _recorder_list = _recorder_list->next();
-      return tmp;
-    }
-  }
-
-  inline bool has_more_recorder() const {
-    return (_recorder_list != NULL);
-  }
-
-  // add recorders to this generation
-  void add_recorders(MemRecorder* head) {
-    if (head != NULL) {
-      if (_recorder_list == NULL) {
-        _recorder_list = head;
-      } else {
-        MemRecorder* tmp = _recorder_list;
-        for (; tmp->next() != NULL; tmp = tmp->next());
-        tmp->set_next(head);
-      }
-    }
-  }
-
-  void reset();
-
-  NOT_PRODUCT(MemRecorder* peek() const { return _recorder_list; })
-};
-
-class MemTrackWorker : public NamedThread {
- private:
-  // circular buffer. This buffer contains generation data to be merged into global
-  // snaphsot.
-  // Each slot holds a generation
-  GenerationData  _gen[MAX_GENERATIONS];
-  int             _head, _tail; // head and tail pointers to above circular buffer
-
-  bool            _has_error;
-
-  MemSnapshot*    _snapshot;
-
- public:
-  MemTrackWorker(MemSnapshot* snapshot);
-  ~MemTrackWorker();
-  _NOINLINE_ void* operator new(size_t size) throw();
-  _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw();
-
-  void start();
-  void run();
-
-  inline bool has_error() const { return _has_error; }
-
-  // task at synchronization point
-  void at_sync_point(MemRecorder* pending_recorders, int number_of_classes);
-
-  // for debugging purpose, they are not thread safe.
-  NOT_PRODUCT(static int count_recorder(const MemRecorder* head);)
-  NOT_PRODUCT(int count_pending_recorders() const;)
-
-  NOT_PRODUCT(int _sync_point_count;)
-  NOT_PRODUCT(int _merge_count;)
-  NOT_PRODUCT(int _last_gen_in_use;)
-
-  // how many generations are queued
-  inline int generations_in_use() const {
-    return (_tail >= _head ? (_tail - _head + 1) : (MAX_GENERATIONS - (_head - _tail) + 1));
-  }
-};
-
-#endif // SHARE_VM_SERVICES_MEM_TRACK_WORKER_HPP
--- a/src/share/vm/services/memTracker.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/memTracker.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,861 +23,304 @@
  */
 #include "precompiled.hpp"
 
-#include "oops/instanceKlass.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/interfaceSupport.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/threadCritical.hpp"
-#include "runtime/vm_operations.hpp"
-#include "services/memPtr.hpp"
+#include "runtime/mutex.hpp"
+#include "services/memBaseline.hpp"
 #include "services/memReporter.hpp"
+#include "services/mallocTracker.inline.hpp"
 #include "services/memTracker.hpp"
-#include "utilities/decoder.hpp"
 #include "utilities/defaultStream.hpp"
-#include "utilities/globalDefinitions.hpp"
 
-bool NMT_track_callsite = false;
+#ifdef SOLARIS
+  volatile bool NMT_stack_walkable = false;
+#else
+  volatile bool NMT_stack_walkable = true;
+#endif
 
-// walk all 'known' threads at NMT sync point, and collect their recorders
-void SyncThreadRecorderClosure::do_thread(Thread* thread) {
-  assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
-  if (thread->is_Java_thread()) {
-    JavaThread* javaThread = (JavaThread*)thread;
-    MemRecorder* recorder = javaThread->get_recorder();
-    if (recorder != NULL) {
-      MemTracker::enqueue_pending_recorder(recorder);
-      javaThread->set_recorder(NULL);
-    }
-  }
-  _thread_count ++;
-}
+volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown;
+NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown;
+
+MemBaseline MemTracker::_baseline;
+Mutex*      MemTracker::_query_lock = NULL;
+bool MemTracker::_is_nmt_env_valid = true;
 
 
-MemRecorder* volatile           MemTracker::_global_recorder = NULL;
-MemSnapshot*                    MemTracker::_snapshot = NULL;
-MemBaseline                     MemTracker::_baseline;
-Mutex*                          MemTracker::_query_lock = NULL;
-MemRecorder* volatile           MemTracker::_merge_pending_queue = NULL;
-MemRecorder* volatile           MemTracker::_pooled_recorders = NULL;
-MemTrackWorker*                 MemTracker::_worker_thread = NULL;
-int                             MemTracker::_sync_point_skip_count = 0;
-MemTracker::NMTLevel            MemTracker::_tracking_level = MemTracker::NMT_off;
-volatile MemTracker::NMTStates  MemTracker::_state = NMT_uninited;
-MemTracker::ShutdownReason      MemTracker::_reason = NMT_shutdown_none;
-int                             MemTracker::_thread_count = 255;
-volatile jint                   MemTracker::_pooled_recorder_count = 0;
-volatile unsigned long          MemTracker::_processing_generation = 0;
-volatile bool                   MemTracker::_worker_thread_idle = false;
-volatile jint                   MemTracker::_pending_op_count = 0;
-volatile bool                   MemTracker::_slowdown_calling_thread = false;
-debug_only(intx                 MemTracker::_main_thread_tid = 0;)
-NOT_PRODUCT(volatile jint       MemTracker::_pending_recorder_count = 0;)
-
-void MemTracker::init_tracking_options(const char* option_line) {
-  _tracking_level = NMT_off;
-  if (strcmp(option_line, "=summary") == 0) {
-    _tracking_level = NMT_summary;
-  } else if (strcmp(option_line, "=detail") == 0) {
-    // detail relies on a stack-walking ability that may not
-    // be available depending on platform and/or compiler flags
+NMT_TrackingLevel MemTracker::init_tracking_level() {
+  NMT_TrackingLevel level = NMT_off;
+  char buf[64];
+  char nmt_option[64];
+  jio_snprintf(buf, sizeof(buf), "NMT_LEVEL_%d", os::current_process_id());
+  if (os::getenv(buf, nmt_option, sizeof(nmt_option))) {
+    if (strcmp(nmt_option, "summary") == 0) {
+      level = NMT_summary;
+    } else if (strcmp(nmt_option, "detail") == 0) {
 #if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
-      _tracking_level = NMT_detail;
+      level = NMT_detail;
 #else
-      jio_fprintf(defaultStream::error_stream(),
-        "NMT detail is not supported on this platform.  Using NMT summary instead.\n");
-      _tracking_level = NMT_summary;
-#endif
-  } else if (strcmp(option_line, "=off") != 0) {
-    vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
-  }
-}
-
-// first phase of bootstrapping, when VM is still in single-threaded mode.
-void MemTracker::bootstrap_single_thread() {
-  if (_tracking_level > NMT_off) {
-    assert(_state == NMT_uninited, "wrong state");
-
-    // NMT is not supported with UseMallocOnly is on. NMT can NOT
-    // handle the amount of malloc data without significantly impacting
-    // runtime performance when this flag is on.
-    if (UseMallocOnly) {
-      shutdown(NMT_use_malloc_only);
-      return;
+      level = NMT_summary;
+#endif // PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
+    } else if (strcmp(nmt_option, "off") != 0) {
+      // The option value is invalid
+      _is_nmt_env_valid = false;
     }
 
-    _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
-    if (_query_lock == NULL) {
-      shutdown(NMT_out_of_memory);
-      return;
-    }
+    // Remove the environment variable to avoid leaking to child processes
+    os::unsetenv(buf);
+  }
 
-    debug_only(_main_thread_tid = os::current_thread_id();)
-    _state = NMT_bootstrapping_single_thread;
-    NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
+  // Construct NativeCallStack::EMPTY_STACK. It may get constructed twice,
+  // but it is benign, the results are the same.
+  ::new ((void*)&NativeCallStack::EMPTY_STACK) NativeCallStack(0, false);
+
+  if (!MallocTracker::initialize(level) ||
+      !VirtualMemoryTracker::initialize(level)) {
+    level = NMT_off;
   }
+  return level;
 }
 
-// second phase of bootstrapping, when VM is about to or already entered multi-theaded mode.
-void MemTracker::bootstrap_multi_thread() {
-  if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) {
-  // create nmt lock for multi-thread execution
-    assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
-    _state = NMT_bootstrapping_multi_thread;
-    NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
-  }
-}
-
-// fully start nmt
-void MemTracker::start() {
-  // Native memory tracking is off from command line option
-  if (_tracking_level == NMT_off || shutdown_in_progress()) return;
-
-  assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
-  assert(_state == NMT_bootstrapping_multi_thread, "wrong state");
-
-  _snapshot = new (std::nothrow)MemSnapshot();
-  if (_snapshot != NULL) {
-    if (!_snapshot->out_of_memory() && start_worker(_snapshot)) {
-      _state = NMT_started;
-      NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
+void MemTracker::init() {
+  NMT_TrackingLevel level = tracking_level();
+  if (level >= NMT_summary) {
+    if (!VirtualMemoryTracker::late_initialize(level)) {
+      shutdown();
       return;
     }
-
-    delete _snapshot;
-    _snapshot = NULL;
-  }
-
-  // fail to start native memory tracking, shut it down
-  shutdown(NMT_initialization);
-}
-
-/**
- * Shutting down native memory tracking.
- * We can not shutdown native memory tracking immediately, so we just
- * setup shutdown pending flag, every native memory tracking component
- * should orderly shut itself down.
- *
- * The shutdown sequences:
- *  1. MemTracker::shutdown() sets MemTracker to shutdown pending state
- *  2. Worker thread calls MemTracker::final_shutdown(), which transites
- *     MemTracker to final shutdown state.
- *  3. At sync point, MemTracker does final cleanup, before sets memory
- *     tracking level to off to complete shutdown.
- */
-void MemTracker::shutdown(ShutdownReason reason) {
-  if (_tracking_level == NMT_off) return;
-
-  if (_state <= NMT_bootstrapping_single_thread) {
-    // we still in single thread mode, there is not contention
-    _state = NMT_shutdown_pending;
-    _reason = reason;
-  } else {
-    // we want to know who initialized shutdown
-    if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending,
-                                       (jint*)&_state, (jint)NMT_started)) {
-        _reason = reason;
-    }
-  }
-}
-
-// final phase of shutdown
-void MemTracker::final_shutdown() {
-  // delete all pending recorders and pooled recorders
-  delete_all_pending_recorders();
-  delete_all_pooled_recorders();
-
-  {
-    // shared baseline and snapshot are the only objects needed to
-    // create query results
-    MutexLockerEx locker(_query_lock, true);
-    // cleanup baseline data and snapshot
-    _baseline.clear();
-    delete _snapshot;
-    _snapshot = NULL;
-  }
-
-  // shutdown shared decoder instance, since it is only
-  // used by native memory tracking so far.
-  Decoder::shutdown();
-
-  MemTrackWorker* worker = NULL;
-  {
-    ThreadCritical tc;
-    // can not delete worker inside the thread critical
-    if (_worker_thread != NULL && Thread::current() == _worker_thread) {
-      worker = _worker_thread;
-      _worker_thread = NULL;
-    }
-  }
-  if (worker != NULL) {
-    delete worker;
-  }
-  _state = NMT_final_shutdown;
-}
-
-// delete all pooled recorders
-void MemTracker::delete_all_pooled_recorders() {
-  // free all pooled recorders
-  MemRecorder* volatile cur_head = _pooled_recorders;
-  if (cur_head != NULL) {
-    MemRecorder* null_ptr = NULL;
-    while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr,
-      (void*)&_pooled_recorders, (void*)cur_head)) {
-      cur_head = _pooled_recorders;
-    }
-    if (cur_head != NULL) {
-      delete cur_head;
-      _pooled_recorder_count = 0;
+    _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
+    // Already OOM. It is unlikely, but still have to handle it.
+    if (_query_lock == NULL) {
+      shutdown();
     }
   }
 }
 
-// delete all recorders in pending queue
-void MemTracker::delete_all_pending_recorders() {
-  // free all pending recorders
-  MemRecorder* pending_head = get_pending_recorders();
-  if (pending_head != NULL) {
-    delete pending_head;
+bool MemTracker::check_launcher_nmt_support(const char* value) {
+  if (strcmp(value, "=detail") == 0) {
+#if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
+      jio_fprintf(defaultStream::error_stream(),
+        "NMT detail is not supported on this platform.  Using NMT summary instead.\n");
+    if (MemTracker::tracking_level() != NMT_summary) {
+    return false;
   }
-}
-
-/*
- * retrieve per-thread recorder of specified thread.
- * if thread == NULL, it means global recorder
- */
-MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) {
-  if (shutdown_in_progress()) return NULL;
-
-  MemRecorder* rc;
-  if (thread == NULL) {
-    rc = _global_recorder;
+#else
+    if (MemTracker::tracking_level() != NMT_detail) {
+      return false;
+    }
+#endif
+  } else if (strcmp(value, "=summary") == 0) {
+    if (MemTracker::tracking_level() != NMT_summary) {
+      return false;
+    }
+  } else if (strcmp(value, "=off") == 0) {
+    if (MemTracker::tracking_level() != NMT_off) {
+      return false;
+    }
   } else {
-    rc = thread->get_recorder();
-  }
-
-  if (rc != NULL && rc->is_full()) {
-    enqueue_pending_recorder(rc);
-    rc = NULL;
+    _is_nmt_env_valid = false;
   }
 
-  if (rc == NULL) {
-    rc = get_new_or_pooled_instance();
-    if (thread == NULL) {
-      _global_recorder = rc;
-    } else {
-      thread->set_recorder(rc);
-    }
-  }
-  return rc;
+  return true;
+}
+
+bool MemTracker::verify_nmt_option() {
+  return _is_nmt_env_valid;
+}
+
+void* MemTracker::malloc_base(void* memblock) {
+  return MallocTracker::get_base(memblock);
 }
 
-/*
- * get a per-thread recorder from pool, or create a new one if
- * there is not one available.
- */
-MemRecorder* MemTracker::get_new_or_pooled_instance() {
-   MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
-   if (cur_head == NULL) {
-     MemRecorder* rec = new (std::nothrow)MemRecorder();
-     if (rec == NULL || rec->out_of_memory()) {
-       shutdown(NMT_out_of_memory);
-       if (rec != NULL) {
-         delete rec;
-         rec = NULL;
-       }
-     }
-     return rec;
-   } else {
-     MemRecorder* next_head = cur_head->next();
-     if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
-       (void*)cur_head)) {
-       return get_new_or_pooled_instance();
-     }
-     cur_head->set_next(NULL);
-     Atomic::dec(&_pooled_recorder_count);
-     cur_head->set_generation();
-     return cur_head;
+void Tracker::record(address addr, size_t size) {
+  if (MemTracker::tracking_level() < NMT_summary) return;
+  switch(_type) {
+    case uncommit:
+      VirtualMemoryTracker::remove_uncommitted_region(addr, size);
+      break;
+    case release:
+      VirtualMemoryTracker::remove_released_region(addr, size);
+        break;
+    default:
+      ShouldNotReachHere();
   }
 }
 
-/*
- * retrieve all recorders in pending queue, and empty the queue
- */
-MemRecorder* MemTracker::get_pending_recorders() {
-  MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
-  MemRecorder* null_ptr = NULL;
-  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
-    (void*)cur_head)) {
-    cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
-  }
-  NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count));
-  return cur_head;
-}
 
-/*
- * release a recorder to recorder pool.
- */
-void MemTracker::release_thread_recorder(MemRecorder* rec) {
-  assert(rec != NULL, "null recorder");
-  // we don't want to pool too many recorders
-  rec->set_next(NULL);
-  if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) {
-    delete rec;
-    return;
-  }
-
-  rec->clear();
-  MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders);
-  rec->set_next(cur_head);
-  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders,
-    (void*)cur_head)) {
-    cur_head = const_cast<MemRecorder*>(_pooled_recorders);
-    rec->set_next(cur_head);
-  }
-  Atomic::inc(&_pooled_recorder_count);
-}
-
-// write a record to proper recorder. No lock can be taken from this method
-// down.
-void MemTracker::write_tracking_record(address addr, MEMFLAGS flags,
-    size_t size, jint seq, address pc, JavaThread* thread) {
-
-    MemRecorder* rc = get_thread_recorder(thread);
-    if (rc != NULL) {
-      rc->record(addr, flags, size, seq, pc);
-    }
-}
-
-/**
- * enqueue a recorder to pending queue
- */
-void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
-  assert(rec != NULL, "null recorder");
-
-  // we are shutting down, so just delete it
-  if (shutdown_in_progress()) {
-    rec->set_next(NULL);
-    delete rec;
-    return;
-  }
-
-  MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
-  rec->set_next(cur_head);
-  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue,
-    (void*)cur_head)) {
-    cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
-    rec->set_next(cur_head);
-  }
-  NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);)
-}
-
-/*
- * The method is called at global safepoint
- * during it synchronization process.
- *   1. enqueue all JavaThreads' per-thread recorders
- *   2. enqueue global recorder
- *   3. retrieve all pending recorders
- *   4. reset global sequence number generator
- *   5. call worker's sync
- */
-#define MAX_SAFEPOINTS_TO_SKIP     128
-#define SAFE_SEQUENCE_THRESHOLD    30
-#define HIGH_GENERATION_THRESHOLD  60
-#define MAX_RECORDER_THREAD_RATIO  30
-#define MAX_RECORDER_PER_THREAD    100
-
-void MemTracker::sync() {
-  assert(_tracking_level > NMT_off, "NMT is not enabled");
-  assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
-
-  // Some GC tests hit large number of safepoints in short period of time
-  // without meaningful activities. We should prevent going to
-  // sync point in these cases, which can potentially exhaust generation buffer.
-  // Here is the factots to determine if we should go into sync point:
-  // 1. not to overflow sequence number
-  // 2. if we are in danger to overflow generation buffer
-  // 3. how many safepoints we already skipped sync point
-  if (_state == NMT_started) {
-    // worker thread is not ready, no one can manage generation
-    // buffer, so skip this safepoint
-    if (_worker_thread == NULL) return;
-
-    if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) {
-      int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint;
-      int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS;
-      if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) {
-        _sync_point_skip_count ++;
-        return;
-      }
-    }
-    {
-      // This method is running at safepoint, with ThreadCritical lock,
-      // it should guarantee that NMT is fully sync-ed.
-      ThreadCritical tc;
-
-      // We can NOT execute NMT sync-point if there are pending tracking ops.
-      if (_pending_op_count == 0) {
-        SequenceGenerator::reset();
-        _sync_point_skip_count = 0;
-
-        // walk all JavaThreads to collect recorders
-        SyncThreadRecorderClosure stc;
-        Threads::threads_do(&stc);
-
-        _thread_count = stc.get_thread_count();
-        MemRecorder* pending_recorders = get_pending_recorders();
-
-        if (_global_recorder != NULL) {
-          _global_recorder->set_next(pending_recorders);
-          pending_recorders = _global_recorder;
-          _global_recorder = NULL;
-        }
-
-        // see if NMT has too many outstanding recorder instances, it usually
-        // means that worker thread is lagging behind in processing them.
-        if (!AutoShutdownNMT) {
-          _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count);
-        } else {
-          // If auto shutdown is on, enforce MAX_RECORDER_PER_THREAD threshold to prevent OOM
-          if (MemRecorder::_instance_count >= _thread_count * MAX_RECORDER_PER_THREAD) {
-            shutdown(NMT_out_of_memory);
-          }
-        }
-
-        // check _worker_thread with lock to avoid racing condition
-        if (_worker_thread != NULL) {
-          _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
-        }
-        assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
-      } else {
-        _sync_point_skip_count ++;
-      }
-    }
-  }
-
-  // now, it is the time to shut whole things off
-  if (_state == NMT_final_shutdown) {
-    // walk all JavaThreads to delete all recorders
-    SyncThreadRecorderClosure stc;
-    Threads::threads_do(&stc);
-    // delete global recorder
-    {
-      ThreadCritical tc;
-      if (_global_recorder != NULL) {
-        delete _global_recorder;
-        _global_recorder = NULL;
-      }
-    }
-    MemRecorder* pending_recorders = get_pending_recorders();
-    if (pending_recorders != NULL) {
-      delete pending_recorders;
-    }
-    // try at a later sync point to ensure MemRecorder instance drops to zero to
-    // completely shutdown NMT
-    if (MemRecorder::_instance_count == 0) {
-      _state = NMT_shutdown;
-      _tracking_level = NMT_off;
-    }
+// Shutdown can only be issued via JCmd, and NMT JCmd is serialized by lock
+void MemTracker::shutdown() {
+  // We can only shutdown NMT to minimal tracking level if it is ever on.
+  if (tracking_level () > NMT_minimal) {
+    transition_to(NMT_minimal);
   }
 }
 
-/*
- * Start worker thread.
- */
-bool MemTracker::start_worker(MemSnapshot* snapshot) {
-  assert(_worker_thread == NULL && _snapshot != NULL, "Just Check");
-  _worker_thread = new (std::nothrow) MemTrackWorker(snapshot);
-  if (_worker_thread == NULL) {
-    return false;
-  } else if (_worker_thread->has_error()) {
-    delete _worker_thread;
-    _worker_thread = NULL;
-    return false;
+bool MemTracker::transition_to(NMT_TrackingLevel level) {
+  NMT_TrackingLevel current_level = tracking_level();
+
+  assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off");
+
+  if (current_level == level) {
+    return true;
+  } else if (current_level > level) {
+    // Downgrade tracking level, we want to lower the tracking level first
+    _tracking_level = level;
+    // Make _tracking_level visible immediately.
+    OrderAccess::fence();
+    VirtualMemoryTracker::transition(current_level, level);
+    MallocTracker::transition(current_level, level);
+  } else {
+    // Upgrading tracking level is not supported and has never been supported.
+    // Allocating and deallocating malloc tracking structures is not thread safe and
+    // leads to inconsistencies unless a lot coarser locks are added.
   }
-  _worker_thread->start();
   return true;
 }
 
-/*
- * We need to collect a JavaThread's per-thread recorder
- * before it exits.
- */
-void MemTracker::thread_exiting(JavaThread* thread) {
-  if (is_on()) {
-    MemRecorder* rec = thread->get_recorder();
-    if (rec != NULL) {
-      enqueue_pending_recorder(rec);
-      thread->set_recorder(NULL);
+void MemTracker::report(bool summary_only, outputStream* output) {
+ assert(output != NULL, "No output stream");
+  MemBaseline baseline;
+  if (baseline.baseline(summary_only)) {
+    if (summary_only) {
+      MemSummaryReporter rpt(baseline, output);
+      rpt.report();
+    } else {
+      MemDetailReporter rpt(baseline, output);
+      rpt.report();
     }
   }
 }
 
-// baseline current memory snapshot
-bool MemTracker::baseline() {
-  MutexLocker lock(_query_lock);
-  MemSnapshot* snapshot = get_snapshot();
-  if (snapshot != NULL) {
-    return _baseline.baseline(*snapshot, false);
+// This is a walker to gather malloc site hashtable statistics,
+// the result is used for tuning.
+class StatisticsWalker : public MallocSiteWalker {
+ private:
+  enum Threshold {
+    // aggregates statistics over this threshold into one
+    // line item.
+    report_threshold = 20
+  };
+
+ private:
+  // Number of allocation sites that have all memory freed
+  int   _empty_entries;
+  // Total number of allocation sites, include empty sites
+  int   _total_entries;
+  // Number of captured call stack distribution
+  int   _stack_depth_distribution[NMT_TrackingStackDepth];
+  // Hash distribution
+  int   _hash_distribution[report_threshold];
+  // Number of hash buckets that have entries over the threshold
+  int   _bucket_over_threshold;
+
+  // The hash bucket that walker is currently walking
+  int   _current_hash_bucket;
+  // The length of current hash bucket
+  int   _current_bucket_length;
+  // Number of hash buckets that are not empty
+  int   _used_buckets;
+  // Longest hash bucket length
+  int   _longest_bucket_length;
+
+ public:
+  StatisticsWalker() : _empty_entries(0), _total_entries(0) {
+    int index = 0;
+    for (index = 0; index < NMT_TrackingStackDepth; index ++) {
+      _stack_depth_distribution[index] = 0;
+    }
+    for (index = 0; index < report_threshold; index ++) {
+      _hash_distribution[index] = 0;
+    }
+    _bucket_over_threshold = 0;
+    _longest_bucket_length = 0;
+    _current_hash_bucket = -1;
+    _current_bucket_length = 0;
+    _used_buckets = 0;
   }
-  return false;
-}
+
+  virtual bool at(const MallocSite* e) {
+    if (e->size() == 0) _empty_entries ++;
+    _total_entries ++;
+
+    // stack depth distrubution
+    int frames = e->call_stack()->frames();
+    _stack_depth_distribution[frames - 1] ++;
 
-// print memory usage from current snapshot
-bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
-  MemBaseline  baseline;
-  MutexLocker  lock(_query_lock);
-  MemSnapshot* snapshot = get_snapshot();
-  if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
-    BaselineReporter reporter(out, unit);
-    reporter.report_baseline(baseline, summary_only);
+    // hash distribution
+    int hash_bucket = e->hash() % MallocSiteTable::hash_buckets();
+    if (_current_hash_bucket == -1) {
+      _current_hash_bucket = hash_bucket;
+      _current_bucket_length = 1;
+    } else if (_current_hash_bucket == hash_bucket) {
+      _current_bucket_length ++;
+    } else {
+      record_bucket_length(_current_bucket_length);
+      _current_hash_bucket = hash_bucket;
+      _current_bucket_length = 1;
+    }
     return true;
   }
-  return false;
-}
+
+  // walk completed
+  void completed() {
+    record_bucket_length(_current_bucket_length);
+  }
 
-// Whitebox API for blocking until the current generation of NMT data has been merged
-bool MemTracker::wbtest_wait_for_data_merge() {
-  // NMT can't be shutdown while we're holding _query_lock
-  MutexLocker lock(_query_lock);
-  assert(_worker_thread != NULL, "Invalid query");
-  // the generation at query time, so NMT will spin till this generation is processed
-  unsigned long generation_at_query_time = SequenceGenerator::current_generation();
-  unsigned long current_processing_generation = _processing_generation;
-  // if generation counter overflown
-  bool generation_overflown = (generation_at_query_time < current_processing_generation);
-  long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
-  // spin
-  while (!shutdown_in_progress()) {
-    if (!generation_overflown) {
-      if (current_processing_generation > generation_at_query_time) {
-        return true;
-      }
-    } else {
-      assert(generations_to_wrap >= 0, "Sanity check");
-      long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
-      assert(current_generations_to_wrap >= 0, "Sanity check");
-      // to overflow an unsigned long should take long time, so to_wrap check should be sufficient
-      if (current_generations_to_wrap > generations_to_wrap &&
-          current_processing_generation > generation_at_query_time) {
-        return true;
+  void report_statistics(outputStream* out) {
+    int index;
+    out->print_cr("Malloc allocation site table:");
+    out->print_cr("\tTotal entries: %d", _total_entries);
+    out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries);
+    out->print_cr(" ");
+    out->print_cr("Hash distribution:");
+    if (_used_buckets < MallocSiteTable::hash_buckets()) {
+      out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets));
+    }
+    for (index = 0; index < report_threshold; index ++) {
+      if (_hash_distribution[index] != 0) {
+        if (index == 0) {
+          out->print_cr("  %d    entry: %d", 1, _hash_distribution[0]);
+        } else if (index < 9) { // single digit
+          out->print_cr("  %d  entries: %d", (index + 1), _hash_distribution[index]);
+        } else {
+          out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]);
+        }
       }
     }
-
-    // if worker thread is idle, but generation is not advancing, that means
-    // there is not safepoint to let NMT advance generation, force one.
-    if (_worker_thread_idle) {
-      VM_ForceSafepoint vfs;
-      VMThread::execute(&vfs);
-    }
-    MemSnapshot* snapshot = get_snapshot();
-    if (snapshot == NULL) {
-      return false;
-    }
-    snapshot->wait(1000);
-    current_processing_generation = _processing_generation;
-  }
-  // We end up here if NMT is shutting down before our data has been merged
-  return false;
-}
-
-// compare memory usage between current snapshot and baseline
-bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
-  MutexLocker lock(_query_lock);
-  if (_baseline.baselined()) {
-    MemBaseline baseline;
-    MemSnapshot* snapshot = get_snapshot();
-    if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
-      BaselineReporter reporter(out, unit);
-      reporter.diff_baselines(baseline, _baseline, summary_only);
-      return true;
-    }
-  }
-  return false;
-}
-
-#ifndef PRODUCT
-void MemTracker::walk_stack(int toSkip, char* buf, int len) {
-  int cur_len = 0;
-  char tmp[1024];
-  address pc;
-
-  while (cur_len < len) {
-    pc = os::get_caller_pc(toSkip + 1);
-    if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) {
-      jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp);
-      cur_len = (int)strlen(buf);
-    } else {
-      buf[cur_len] = '\0';
-      break;
+    if (_bucket_over_threshold > 0) {
+      out->print_cr(" >%d entries: %d", report_threshold,  _bucket_over_threshold);
     }
-    toSkip ++;
-  }
-}
-
-void MemTracker::print_tracker_stats(outputStream* st) {
-  st->print_cr("\nMemory Tracker Stats:");
-  st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num());
-  st->print_cr("\tthead count = %d", _thread_count);
-  st->print_cr("\tArena instance = %d", Arena::_instance_count);
-  st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count);
-  st->print_cr("\tqueued recorder count = %d", _pending_recorder_count);
-  st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count);
-  if (_worker_thread != NULL) {
-    st->print_cr("\tWorker thread:");
-    st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count);
-    st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders());
-    st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count);
-  } else {
-    st->print_cr("\tWorker thread is not started");
-  }
-  st->print_cr(" ");
-
-  if (_snapshot != NULL) {
-    _snapshot->print_snapshot_stats(st);
-  } else {
-    st->print_cr("No snapshot");
-  }
-}
-#endif
-
-
-// Tracker Implementation
-
-/*
- * Create a tracker.
- * This is a fairly complicated constructor, as it has to make two important decisions:
- *   1) Does it need to take ThreadCritical lock to write tracking record
- *   2) Does it need to pre-reserve a sequence number for the tracking record
- *
- * The rules to determine if ThreadCritical is needed:
- *   1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
- *      still in single thread mode.
- *   2. For all threads other than JavaThread, ThreadCritical is needed
- *      to write to recorders to global recorder.
- *   3. For JavaThreads that are no longer visible by safepoint, also
- *      need to take ThreadCritical and records are written to global
- *      recorders, since these threads are NOT walked by Threads.do_thread().
- *   4. JavaThreads that are running in safepoint-safe states do not stop
- *      for safepoints, ThreadCritical lock should be taken to write
- *      memory records.
- *   5. JavaThreads that are running in VM state do not need any lock and
- *      records are written to per-thread recorders.
- *   6. For a thread has yet to attach VM 'Thread', they need to take
- *      ThreadCritical to write to global recorder.
- *
- *  The memory operations that need pre-reserve sequence numbers:
- *    The memory operations that "release" memory blocks and the
- *    operations can fail, need to pre-reserve sequence number. They
- *    are realloc, uncommit and release.
- *
- *  The reason for pre-reserve sequence number, is to prevent race condition:
- *    Thread 1                      Thread 2
- *    <release>
- *                                  <allocate>
- *                                  <write allocate record>
- *   <write release record>
- *   if Thread 2 happens to obtain the memory address Thread 1 just released,
- *   then NMT can mistakenly report the memory is free.
- *
- *  Noticeably, free() does not need pre-reserve sequence number, because the call
- *  does not fail, so we can alway write "release" record before the memory is actaully
- *  freed.
- *
- *  For realloc, uncommit and release, following coding pattern should be used:
- *
- *     MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
- *     ptr = ::realloc(...);
- *     if (ptr == NULL) {
- *       tkr.record(...)
- *     } else {
- *       tkr.discard();
- *     }
- *
- *     MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
- *     if (uncommit(...)) {
- *       tkr.record(...);
- *     } else {
- *       tkr.discard();
- *     }
- *
- *     MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
- *     if (release(...)) {
- *       tkr.record(...);
- *     } else {
- *       tkr.discard();
- *     }
- *
- * Since pre-reserved sequence number is only good for the generation that it is acquired,
- * when there is pending Tracker that reserved sequence number, NMT sync-point has
- * to be skipped to prevent from advancing generation. This is done by inc and dec
- * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped.
- * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads
- * that honor safepoints, safepoint can not occur during the memory operations, so the
- * pre-reserved sequence number won't cross the generation boundry.
- */
-MemTracker::Tracker::Tracker(MemoryOperation op, Thread* thr) {
-  _op = NoOp;
-  _seq = 0;
-  if (MemTracker::is_on()) {
-    _java_thread = NULL;
-    _op = op;
-
-    // figure out if ThreadCritical lock is needed to write this operation
-    // to MemTracker
-    if (MemTracker::is_single_threaded_bootstrap()) {
-      thr = NULL;
-    } else if (thr == NULL) {
-      // don't use Thread::current(), since it is possible that
-      // the calling thread has yet to attach to VM 'Thread',
-      // which will result assertion failure
-      thr = ThreadLocalStorage::thread();
-    }
-
-    if (thr != NULL) {
-      // Check NMT load
-      MemTracker::check_NMT_load(thr);
-
-      if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) {
-        _java_thread = (JavaThread*)thr;
-        JavaThreadState  state = _java_thread->thread_state();
-        // JavaThreads that are safepoint safe, can run through safepoint,
-        // so ThreadCritical is needed to ensure no threads at safepoint create
-        // new records while the records are being gathered and the sequence number is changing
-        _need_thread_critical_lock =
-          SafepointSynchronize::safepoint_safe(_java_thread, state);
-      } else {
-        _need_thread_critical_lock = true;
-      }
-    } else {
-       _need_thread_critical_lock
-         = !MemTracker::is_single_threaded_bootstrap();
-    }
-
-    // see if we need to pre-reserve sequence number for this operation
-    if (_op == Realloc || _op == Uncommit || _op == Release) {
-      if (_need_thread_critical_lock) {
-        ThreadCritical tc;
-        MemTracker::inc_pending_op_count();
-        _seq = SequenceGenerator::next();
-      } else {
-        // for the threads that honor safepoints, no safepoint can occur
-        // during the lifespan of tracker, so we don't need to increase
-        // pending op count.
-        _seq = SequenceGenerator::next();
+    out->print_cr("most entries: %d", _longest_bucket_length);
+    out->print_cr(" ");
+    out->print_cr("Call stack depth distribution:");
+    for (index = 0; index < NMT_TrackingStackDepth; index ++) {
+      if (_stack_depth_distribution[index] > 0) {
+        out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]);
       }
     }
   }
-}
 
-void MemTracker::Tracker::discard() {
-  if (MemTracker::is_on() && _seq != 0) {
-    if (_need_thread_critical_lock) {
-      ThreadCritical tc;
-      MemTracker::dec_pending_op_count();
+ private:
+  void record_bucket_length(int length) {
+    _used_buckets ++;
+    if (length <= report_threshold) {
+      _hash_distribution[length - 1] ++;
+    } else {
+      _bucket_over_threshold ++;
     }
-    _seq = 0;
+    _longest_bucket_length = MAX2(_longest_bucket_length, length);
   }
-}
+};
 
 
-void MemTracker::Tracker::record(address old_addr, address new_addr, size_t size,
-  MEMFLAGS flags, address pc) {
-  assert(old_addr != NULL && new_addr != NULL, "Sanity check");
-  assert(_op == Realloc || _op == NoOp, "Wrong call");
-  if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
-    assert(_seq > 0, "Need pre-reserve sequence number");
-    if (_need_thread_critical_lock) {
-      ThreadCritical tc;
-      // free old address, use pre-reserved sequence number
-      MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
-        0, _seq, pc, _java_thread);
-      MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
-        size, SequenceGenerator::next(), pc, _java_thread);
-      // decrement MemTracker pending_op_count
-      MemTracker::dec_pending_op_count();
-    } else {
-      // free old address, use pre-reserved sequence number
-      MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
-        0, _seq, pc, _java_thread);
-      MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
-        size, SequenceGenerator::next(), pc, _java_thread);
-    }
-    _seq = 0;
-  }
+void MemTracker::tuning_statistics(outputStream* out) {
+  // NMT statistics
+  StatisticsWalker walker;
+  MallocSiteTable::walk_malloc_site(&walker);
+  walker.completed();
+
+  out->print_cr("Native Memory Tracking Statistics:");
+  out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets());
+  out->print_cr("             Tracking stack depth: %d", NMT_TrackingStackDepth);
+  NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());)
+  out->print_cr(" ");
+  walker.report_statistics(out);
 }
 
-void MemTracker::Tracker::record(address addr, size_t size, MEMFLAGS flags, address pc) {
-  // OOM already?
-  if (addr == NULL) return;
-
-  if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
-    bool pre_reserved_seq = (_seq != 0);
-    address  pc = CALLER_CALLER_PC;
-    MEMFLAGS orig_flags = flags;
-
-    // or the tagging flags
-    switch(_op) {
-      case Malloc:
-        flags |= MemPointerRecord::malloc_tag();
-        break;
-      case Free:
-        flags = MemPointerRecord::free_tag();
-        break;
-      case Realloc:
-        fatal("Use the other Tracker::record()");
-        break;
-      case Reserve:
-      case ReserveAndCommit:
-        flags |= MemPointerRecord::virtual_memory_reserve_tag();
-        break;
-      case Commit:
-        flags = MemPointerRecord::virtual_memory_commit_tag();
-        break;
-      case Type:
-        flags |= MemPointerRecord::virtual_memory_type_tag();
-        break;
-      case Uncommit:
-        assert(pre_reserved_seq, "Need pre-reserve sequence number");
-        flags = MemPointerRecord::virtual_memory_uncommit_tag();
-        break;
-      case Release:
-        assert(pre_reserved_seq, "Need pre-reserve sequence number");
-        flags = MemPointerRecord::virtual_memory_release_tag();
-        break;
-      case ArenaSize:
-        // a bit of hack here, add a small postive offset to arena
-        // address for its size record, so the size record is sorted
-        // right after arena record.
-        flags = MemPointerRecord::arena_size_tag();
-        addr += sizeof(void*);
-        break;
-      case StackRelease:
-        flags = MemPointerRecord::virtual_memory_release_tag();
-        break;
-      default:
-        ShouldNotReachHere();
-    }
-
-    // write memory tracking record
-    if (_need_thread_critical_lock) {
-      ThreadCritical tc;
-      if (_seq == 0) _seq = SequenceGenerator::next();
-      MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
-      if (_op == ReserveAndCommit) {
-        MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
-          size, SequenceGenerator::next(), pc, _java_thread);
-      }
-      if (pre_reserved_seq) MemTracker::dec_pending_op_count();
-    } else {
-      if (_seq == 0) _seq = SequenceGenerator::next();
-      MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
-      if (_op == ReserveAndCommit) {
-        MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
-          size, SequenceGenerator::next(), pc, _java_thread);
-      }
-    }
-    _seq = 0;
-  }
-}
-
--- a/src/share/vm/services/memTracker.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/memTracker.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,574 +25,297 @@
 #ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP
 #define SHARE_VM_SERVICES_MEM_TRACKER_HPP
 
-#include "utilities/macros.hpp"
+#include "services/nmtCommon.hpp"
+#include "utilities/nativeCallStack.hpp"
+
 
 #if !INCLUDE_NMT
 
-#include "utilities/ostream.hpp"
+#define CURRENT_PC   NativeCallStack::EMPTY_STACK
+#define CALLER_PC    NativeCallStack::EMPTY_STACK
 
-class BaselineOutputer : public StackObj {
-
+class Tracker : public StackObj {
+ public:
+  Tracker() { }
+  void record(address addr, size_t size) { }
 };
 
-class BaselineTTYOutputer : public BaselineOutputer {
-  public:
-    BaselineTTYOutputer(outputStream* st) { }
+class MemTracker : AllStatic {
+ public:
+  static inline NMT_TrackingLevel tracking_level() { return NMT_off; }
+  static inline void shutdown() { }
+  static inline void init() { }
+  static bool check_launcher_nmt_support(const char* value) { return true; }
+  static bool verify_nmt_option() { return true; }
+
+  static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
+    const NativeCallStack& stack, NMT_TrackingLevel level) { return mem_base; }
+  static inline size_t malloc_header_size(NMT_TrackingLevel level) { return 0; }
+  static inline size_t malloc_header_size(void* memblock) { return 0; }
+  static inline void* malloc_base(void* memblock) { return memblock; }
+  static inline void* record_free(void* memblock) { return memblock; }
+
+  static inline void record_new_arena(MEMFLAGS flag) { }
+  static inline void record_arena_free(MEMFLAGS flag) { }
+  static inline void record_arena_size_change(int diff, MEMFLAGS flag) { }
+  static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
+                       MEMFLAGS flag = mtNone) { }
+  static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
+    const NativeCallStack& stack, MEMFLAGS flag = mtNone) { }
+  static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { }
+  static inline Tracker get_virtual_memory_uncommit_tracker() { return Tracker(); }
+  static inline Tracker get_virtual_memory_release_tracker() { }
+  static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { }
+  static inline void record_thread_stack(void* addr, size_t size) { }
+  static inline void release_thread_stack(void* addr, size_t size) { }
+
+  static void final_report(outputStream*) { }
+  static void error_report(outputStream*) { }
+};
+
+#else
+
+#include "runtime/atomic.hpp"
+#include "runtime/threadCritical.hpp"
+#include "services/mallocTracker.hpp"
+#include "services/virtualMemoryTracker.hpp"
+
+extern volatile bool NMT_stack_walkable;
+
+#define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \
+                    NativeCallStack(0, true) : NativeCallStack::EMPTY_STACK)
+#define CALLER_PC  ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ?  \
+                    NativeCallStack(1, true) : NativeCallStack::EMPTY_STACK)
+
+class MemBaseline;
+class Mutex;
+
+// Tracker is used for guarding 'release' semantics of virtual memory operation, to avoid
+// the other thread obtains and records the same region that is just 'released' by current
+// thread but before it can record the operation.
+class Tracker : public StackObj {
+ public:
+  enum TrackerType {
+     uncommit,
+     release
+  };
+
+ public:
+  Tracker(enum TrackerType type) : _type(type) { }
+  void record(address addr, size_t size);
+ private:
+  enum TrackerType  _type;
+  // Virtual memory tracking data structures are protected by ThreadCritical lock.
+  ThreadCritical    _tc;
 };
 
 class MemTracker : AllStatic {
-  public:
-   enum ShutdownReason {
-      NMT_shutdown_none,     // no shutdown requested
-      NMT_shutdown_user,     // user requested shutdown
-      NMT_normal,            // normal shutdown, process exit
-      NMT_out_of_memory,     // shutdown due to out of memory
-      NMT_initialization,    // shutdown due to initialization failure
-      NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
-      NMT_error_reporting,   // shutdown by vmError::report_and_die()
-      NMT_out_of_generation, // running out of generation queue
-      NMT_sequence_overflow  // overflow the sequence number
-   };
-
-  class Tracker {
-   public:
-    void discard() { }
-
-    void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL) { }
-    void record(address old_addr, address new_addr, size_t size,
-      MEMFLAGS flags, address pc = NULL) { }
-  };
-
-  private:
-   static Tracker  _tkr;
-
-
-  public:
-   static inline void init_tracking_options(const char* option_line) { }
-   static inline bool is_on()   { return false; }
-   static const char* reason()  { return "Native memory tracking is not implemented"; }
-   static inline bool can_walk_stack() { return false; }
-
-   static inline void bootstrap_single_thread() { }
-   static inline void bootstrap_multi_thread() { }
-   static inline void start() { }
-
-   static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
-        address pc = 0, Thread* thread = NULL) { }
-   static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { }
-   static inline void record_arena_size(address addr, size_t size) { }
-   static inline void record_virtual_memory_reserve(address addr, size_t size,
-        MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
-   static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
-        MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
-   static inline void record_virtual_memory_commit(address addr, size_t size,
-        address pc = 0, Thread* thread = NULL) { }
-   static inline void record_virtual_memory_release(address addr, size_t size,
-        Thread* thread = NULL) { }
-   static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
-        Thread* thread = NULL) { }
-   static inline Tracker get_realloc_tracker() { return _tkr; }
-   static inline Tracker get_virtual_memory_uncommit_tracker() { return _tkr; }
-   static inline Tracker get_virtual_memory_release_tracker()  { return _tkr; }
-   static inline bool baseline() { return false; }
-   static inline bool has_baseline() { return false; }
-
-   static inline void set_autoShutdown(bool value) { }
-   static void shutdown(ShutdownReason reason) { }
-   static inline bool shutdown_in_progress() { return false; }
-   static bool print_memory_usage(BaselineOutputer& out, size_t unit,
-            bool summary_only = true) { return false; }
-   static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
-            bool summary_only = true) { return false; }
-
-   static bool wbtest_wait_for_data_merge() { return false; }
-
-   static inline void sync() { }
-   static inline void thread_exiting(JavaThread* thread) { }
-};
-
-
-#else // !INCLUDE_NMT
-
-#include "memory/allocation.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/os.hpp"
-#include "runtime/thread.hpp"
-#include "services/memPtr.hpp"
-#include "services/memRecorder.hpp"
-#include "services/memSnapshot.hpp"
-#include "services/memTrackWorker.hpp"
-
-extern bool NMT_track_callsite;
-
-#ifndef MAX_UNSIGNED_LONG
-#define MAX_UNSIGNED_LONG    (unsigned long)(-1)
-#endif
-
-#ifdef ASSERT
-  #define DEBUG_CALLER_PC  (NMT_track_callsite ? os::get_caller_pc(2) : 0)
-#else
-  #define DEBUG_CALLER_PC  0
-#endif
-
-// The thread closure walks threads to collect per-thread
-// memory recorders at NMT sync point
-class SyncThreadRecorderClosure : public ThreadClosure {
- private:
-  int _thread_count;
-
  public:
-  SyncThreadRecorderClosure() {
-    _thread_count =0;
-  }
-
-  void do_thread(Thread* thread);
-  int  get_thread_count() const {
-    return _thread_count;
-  }
-};
-
-class BaselineOutputer;
-class MemSnapshot;
-class MemTrackWorker;
-class Thread;
-/*
- * MemTracker is the 'gate' class to native memory tracking runtime.
- */
-class MemTracker : AllStatic {
-  friend class GenerationData;
-  friend class MemTrackWorker;
-  friend class MemSnapshot;
-  friend class SyncThreadRecorderClosure;
-
-  // NMT state
-  enum NMTStates {
-    NMT_uninited,                        // not yet initialized
-    NMT_bootstrapping_single_thread,     // bootstrapping, VM is in single thread mode
-    NMT_bootstrapping_multi_thread,      // bootstrapping, VM is about to enter multi-thread mode
-    NMT_started,                         // NMT fully started
-    NMT_shutdown_pending,                // shutdown pending
-    NMT_final_shutdown,                  // in final phase of shutdown
-    NMT_shutdown                         // shutdown
-  };
-
- public:
-  class Tracker : public StackObj {
-    friend class MemTracker;
-   public:
-    enum MemoryOperation {
-      NoOp,                   // no op
-      Malloc,                 // malloc
-      Realloc,                // realloc
-      Free,                   // free
-      Reserve,                // virtual memory reserve
-      Commit,                 // virtual memory commit
-      ReserveAndCommit,       // virtual memory reserve and commit
-      StackAlloc = ReserveAndCommit, // allocate thread stack
-      Type,                   // assign virtual memory type
-      Uncommit,               // virtual memory uncommit
-      Release,                // virtual memory release
-      ArenaSize,              // set arena size
-      StackRelease            // release thread stack
-    };
-
-
-   protected:
-    Tracker(MemoryOperation op, Thread* thr = NULL);
-
-   public:
-    void discard();
-
-    void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL);
-    void record(address old_addr, address new_addr, size_t size,
-      MEMFLAGS flags, address pc = NULL);
-
-   private:
-    bool            _need_thread_critical_lock;
-    JavaThread*     _java_thread;
-    MemoryOperation _op;          // memory operation
-    jint            _seq;         // reserved sequence number
-  };
-
-
- public:
-  // native memory tracking level
-  enum NMTLevel {
-    NMT_off,              // native memory tracking is off
-    NMT_summary,          // don't track callsite
-    NMT_detail            // track callsite also
-  };
-
-   enum ShutdownReason {
-     NMT_shutdown_none,     // no shutdown requested
-     NMT_shutdown_user,     // user requested shutdown
-     NMT_normal,            // normal shutdown, process exit
-     NMT_out_of_memory,     // shutdown due to out of memory
-     NMT_initialization,    // shutdown due to initialization failure
-     NMT_use_malloc_only,   // can not combine NMT with UseMallocOnly flag
-     NMT_error_reporting,   // shutdown by vmError::report_and_die()
-     NMT_out_of_generation, // running out of generation queue
-     NMT_sequence_overflow  // overflow the sequence number
-   };
-
- public:
-  // initialize NMT tracking level from command line options, called
-   // from VM command line parsing code
-  static void init_tracking_options(const char* option_line);
-
-  // if NMT is enabled to record memory activities
-  static inline bool is_on() {
-    return (_tracking_level >= NMT_summary &&
-      _state >= NMT_bootstrapping_single_thread);
-  }
-
-  static inline enum NMTLevel tracking_level() {
+  static inline NMT_TrackingLevel tracking_level() {
+    if (_tracking_level == NMT_unknown) {
+      // No fencing is needed here, since JVM is in single-threaded
+      // mode.
+      _tracking_level = init_tracking_level();
+      _cmdline_tracking_level = _tracking_level;
+    }
     return _tracking_level;
   }
 
-  // user readable reason for shutting down NMT
-  static const char* reason() {
-    switch(_reason) {
-      case NMT_shutdown_none:
-        return "Native memory tracking is not enabled";
-      case NMT_shutdown_user:
-        return "Native memory tracking has been shutdown by user";
-      case NMT_normal:
-        return "Native memory tracking has been shutdown due to process exiting";
-      case NMT_out_of_memory:
-        return "Native memory tracking has been shutdown due to out of native memory";
-      case NMT_initialization:
-        return "Native memory tracking failed to initialize";
-      case NMT_error_reporting:
-        return "Native memory tracking has been shutdown due to error reporting";
-      case NMT_out_of_generation:
-        return "Native memory tracking has been shutdown due to running out of generation buffer";
-      case NMT_sequence_overflow:
-        return "Native memory tracking has been shutdown due to overflow the sequence number";
-      case NMT_use_malloc_only:
-        return "Native memory tracking is not supported when UseMallocOnly is on";
-      default:
-        ShouldNotReachHere();
-        return NULL;
-    }
+  // A late initialization, for the stuff(s) can not be
+  // done in init_tracking_level(), which can NOT malloc
+  // any memory.
+  static void init();
+
+  // Shutdown native memory tracking
+  static void shutdown();
+
+  // Verify native memory tracking command line option.
+  // This check allows JVM to detect if compatible launcher
+  // is used.
+  // If an incompatible launcher is used, NMT may not be
+  // able to start, even it is enabled by command line option.
+  // A warning message should be given if it is encountered.
+  static bool check_launcher_nmt_support(const char* value);
+
+  // This method checks native memory tracking environment
+  // variable value passed by launcher.
+  // Launcher only obligates to pass native memory tracking
+  // option value, but not obligates to validate the value,
+  // and launcher has option to discard native memory tracking
+  // option from the command line once it sets up the environment
+  // variable, so NMT has to catch the bad value here.
+  static bool verify_nmt_option();
+
+  // Transition the tracking level to specified level
+  static bool transition_to(NMT_TrackingLevel level);
+
+  static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
+    const NativeCallStack& stack, NMT_TrackingLevel level) {
+    return MallocTracker::record_malloc(mem_base, size, flag, stack, level);
+  }
+
+  static inline size_t malloc_header_size(NMT_TrackingLevel level) {
+    return MallocTracker::malloc_header_size(level);
   }
 
-  // test if we can walk native stack
-  static bool can_walk_stack() {
-  // native stack is not walkable during bootstrapping on sparc
-#if defined(SPARC)
-    return (_state == NMT_started);
-#else
-    return (_state >= NMT_bootstrapping_single_thread && _state  <= NMT_started);
-#endif
+  static size_t malloc_header_size(void* memblock) {
+    if (tracking_level() != NMT_off) {
+      return MallocTracker::get_header_size(memblock);
+    }
+    return 0;
+  }
+
+  // To malloc base address, which is the starting address
+  // of malloc tracking header if tracking is enabled.
+  // Otherwise, it returns the same address.
+  static void* malloc_base(void* memblock);
+
+  // Record malloc free and return malloc base address
+  static inline void* record_free(void* memblock) {
+    return MallocTracker::record_free(memblock);
   }
 
-  // if native memory tracking tracks callsite
-  static inline bool track_callsite() { return _tracking_level == NMT_detail; }
+
+  // Record creation of an arena
+  static inline void record_new_arena(MEMFLAGS flag) {
+    if (tracking_level() < NMT_summary) return;
+    MallocTracker::record_new_arena(flag);
+  }
+
+  // Record destruction of an arena
+  static inline void record_arena_free(MEMFLAGS flag) {
+    if (tracking_level() < NMT_summary) return;
+    MallocTracker::record_arena_free(flag);
+  }
 
-  // NMT automatically shuts itself down under extreme situation by default.
-  // When the value is set to false,  NMT will try its best to stay alive,
-  // even it has to slow down VM.
-  static inline void set_autoShutdown(bool value) {
-    AutoShutdownNMT = value;
-    if (AutoShutdownNMT && _slowdown_calling_thread) {
-      _slowdown_calling_thread = false;
+  // Record arena size change. Arena size is the size of all arena
+  // chuncks that backing up the arena.
+  static inline void record_arena_size_change(int diff, MEMFLAGS flag) {
+    if (tracking_level() < NMT_summary) return;
+    MallocTracker::record_arena_size_change(diff, flag);
+  }
+
+  static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
+    MEMFLAGS flag = mtNone) {
+    if (tracking_level() < NMT_summary) return;
+    if (addr != NULL) {
+      ThreadCritical tc;
+      // Recheck to avoid potential racing during NMT shutdown
+      if (tracking_level() < NMT_summary) return;
+      VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
     }
   }
 
-  // shutdown native memory tracking capability. Native memory tracking
-  // can be shutdown by VM when it encounters low memory scenarios.
-  // Memory tracker should gracefully shutdown itself, and preserve the
-  // latest memory statistics for post morten diagnosis.
-  static void shutdown(ShutdownReason reason);
-
-  // if there is shutdown requested
-  static inline bool shutdown_in_progress() {
-    return (_state >= NMT_shutdown_pending);
-  }
-
-  // bootstrap native memory tracking, so it can start to collect raw data
-  // before worker thread can start
-
-  // the first phase of bootstrapping, when VM still in single-threaded mode
-  static void bootstrap_single_thread();
-  // the second phase of bootstrapping, VM is about or already in multi-threaded mode
-  static void bootstrap_multi_thread();
-
-
-  // start() has to be called when VM still in single thread mode, but after
-  // command line option parsing is done.
-  static void start();
-
-  // record a 'malloc' call
-  static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
-                            address pc = 0, Thread* thread = NULL) {
-    Tracker tkr(Tracker::Malloc, thread);
-    tkr.record(addr, size, flags, pc);
-  }
-  // record a 'free' call
-  static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) {
-    Tracker tkr(Tracker::Free, thread);
-    tkr.record(addr, 0, flags, DEBUG_CALLER_PC);
+  static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
+    const NativeCallStack& stack, MEMFLAGS flag = mtNone) {
+    if (tracking_level() < NMT_summary) return;
+    if (addr != NULL) {
+      ThreadCritical tc;
+      if (tracking_level() < NMT_summary) return;
+      VirtualMemoryTracker::add_reserved_region((address)addr, size,
+        stack, flag, true);
+    }
   }
 
-  static inline void record_arena_size(address addr, size_t size) {
-    Tracker tkr(Tracker::ArenaSize);
-    tkr.record(addr, size);
-  }
-
-  // record a virtual memory 'reserve' call
-  static inline void record_virtual_memory_reserve(address addr, size_t size,
-                     MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
-    assert(size > 0, "Sanity check");
-    Tracker tkr(Tracker::Reserve, thread);
-    tkr.record(addr, size, flags, pc);
-  }
-
-  static inline void record_thread_stack(address addr, size_t size, Thread* thr,
-                           address pc = 0) {
-    Tracker tkr(Tracker::StackAlloc, thr);
-    tkr.record(addr, size, mtThreadStack, pc);
-  }
-
-  static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
-    Tracker tkr(Tracker::StackRelease, thr);
-    tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC);
-  }
-
-  // record a virtual memory 'commit' call
-  static inline void record_virtual_memory_commit(address addr, size_t size,
-                            address pc, Thread* thread = NULL) {
-    Tracker tkr(Tracker::Commit, thread);
-    tkr.record(addr, size, mtNone, pc);
-  }
-
-  static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
-    MEMFLAGS flags, address pc, Thread* thread = NULL) {
-    Tracker tkr(Tracker::ReserveAndCommit, thread);
-    tkr.record(addr, size, flags, pc);
-  }
-
-  static inline void record_virtual_memory_release(address addr, size_t size,
-      Thread* thread = NULL) {
-    if (is_on()) {
-      Tracker tkr(Tracker::Release, thread);
-      tkr.record(addr, size);
+  static inline void record_virtual_memory_commit(void* addr, size_t size,
+    const NativeCallStack& stack) {
+    if (tracking_level() < NMT_summary) return;
+    if (addr != NULL) {
+      ThreadCritical tc;
+      if (tracking_level() < NMT_summary) return;
+      VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
     }
   }
 
-  // record memory type on virtual memory base address
-  static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
-                            Thread* thread = NULL) {
-    Tracker tkr(Tracker::Type);
-    tkr.record(base, 0, flags);
-  }
-
-  // Get memory trackers for memory operations that can result race conditions.
-  // The memory tracker has to be obtained before realloc, virtual memory uncommit
-  // and virtual memory release, and call tracker.record() method if operation
-  // succeeded, or tracker.discard() to abort the tracking.
-  static inline Tracker get_realloc_tracker() {
-    return Tracker(Tracker::Realloc);
-  }
-
   static inline Tracker get_virtual_memory_uncommit_tracker() {
-    return Tracker(Tracker::Uncommit);
+    assert(tracking_level() >= NMT_summary, "Check by caller");
+    return Tracker(Tracker::uncommit);
   }
 
   static inline Tracker get_virtual_memory_release_tracker() {
-    return Tracker(Tracker::Release);
+    assert(tracking_level() >= NMT_summary, "Check by caller");
+    return Tracker(Tracker::release);
+  }
+
+  static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) {
+    if (tracking_level() < NMT_summary) return;
+    if (addr != NULL) {
+      ThreadCritical tc;
+      if (tracking_level() < NMT_summary) return;
+      VirtualMemoryTracker::set_reserved_region_type((address)addr, flag);
+    }
+  }
+
+  static inline void record_thread_stack(void* addr, size_t size) {
+    if (tracking_level() < NMT_summary) return;
+    if (addr != NULL) {
+      // uses thread stack malloc slot for book keeping number of threads
+      MallocMemorySummary::record_malloc(0, mtThreadStack);
+      record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack);
+    }
+  }
+
+  static inline void release_thread_stack(void* addr, size_t size) {
+    if (tracking_level() < NMT_summary) return;
+    if (addr != NULL) {
+      // uses thread stack malloc slot for book keeping number of threads
+      MallocMemorySummary::record_free(0, mtThreadStack);
+      ThreadCritical tc;
+      if (tracking_level() < NMT_summary) return;
+      VirtualMemoryTracker::remove_released_region((address)addr, size);
+    }
+  }
+
+  // Query lock is used to synchronize the access to tracking data.
+  // So far, it is only used by JCmd query, but it may be used by
+  // other tools.
+  static inline Mutex* query_lock() { return _query_lock; }
+
+  // Make a final report or report for hs_err file.
+  static void error_report(outputStream* output) {
+    if (tracking_level() >= NMT_summary) {
+      report(true, output);  // just print summary for error case.
+    }
+   }
+
+  static void final_report(outputStream* output) {
+    NMT_TrackingLevel level = tracking_level();
+    if (level >= NMT_summary) {
+      report(level == NMT_summary, output);
+    }
   }
 
 
-  // create memory baseline of current memory snapshot
-  static bool baseline();
-  // is there a memory baseline
-  static bool has_baseline() {
-    return _baseline.baselined();
-  }
-
-  // print memory usage from current snapshot
-  static bool print_memory_usage(BaselineOutputer& out, size_t unit,
-           bool summary_only = true);
-  // compare memory usage between current snapshot and baseline
-  static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
-           bool summary_only = true);
-
-  // the version for whitebox testing support, it ensures that all memory
-  // activities before this method call, are reflected in the snapshot
-  // database.
-  static bool wbtest_wait_for_data_merge();
-
-  // sync is called within global safepoint to synchronize nmt data
-  static void sync();
-
-  // called when a thread is about to exit
-  static void thread_exiting(JavaThread* thread);
-
-  // retrieve global snapshot
-  static MemSnapshot* get_snapshot() {
-    if (shutdown_in_progress()) {
-      return NULL;
-    }
-    return _snapshot;
+  // Stored baseline
+  static inline MemBaseline& get_baseline() {
+    return _baseline;
   }
 
-  // print tracker stats
-  NOT_PRODUCT(static void print_tracker_stats(outputStream* st);)
-  NOT_PRODUCT(static void walk_stack(int toSkip, char* buf, int len);)
-
- private:
-  // start native memory tracking worker thread
-  static bool start_worker(MemSnapshot* snapshot);
-
-  // called by worker thread to complete shutdown process
-  static void final_shutdown();
-
- protected:
-  // retrieve per-thread recorder of the specified thread.
-  // if the recorder is full, it will be enqueued to overflow
-  // queue, a new recorder is acquired from recorder pool or a
-  // new instance is created.
-  // when thread == NULL, it means global recorder
-  static MemRecorder* get_thread_recorder(JavaThread* thread);
-
-  // per-thread recorder pool
-  static void release_thread_recorder(MemRecorder* rec);
-  static void delete_all_pooled_recorders();
-
-  // pending recorder queue. Recorders are queued to pending queue
-  // when they are overflowed or collected at nmt sync point.
-  static void enqueue_pending_recorder(MemRecorder* rec);
-  static MemRecorder* get_pending_recorders();
-  static void delete_all_pending_recorders();
-
-  // write a memory tracking record in recorder
-  static void write_tracking_record(address addr, MEMFLAGS type,
-    size_t size, jint seq, address pc, JavaThread* thread);
-
-  static bool is_single_threaded_bootstrap() {
-    return _state == NMT_bootstrapping_single_thread;
-  }
-
-  static void check_NMT_load(Thread* thr) {
-    assert(thr != NULL, "Sanity check");
-    if (_slowdown_calling_thread && thr != _worker_thread) {
-#ifdef _WINDOWS
-      // On Windows, os::NakedYield() does not work as well
-      // as os::yield_all()
-      os::yield_all();
-#else
-     // On Solaris, os::yield_all() depends on os::sleep()
-     // which requires JavaTherad in _thread_in_vm state.
-     // Transits thread to _thread_in_vm state can be dangerous
-     // if caller holds lock, as it may deadlock with Threads_lock.
-     // So use NaKedYield instead.
-     //
-     // Linux and BSD, NakedYield() and yield_all() implementations
-     // are the same.
-      os::NakedYield();
-#endif
-    }
-  }
-
-  static void inc_pending_op_count() {
-    Atomic::inc(&_pending_op_count);
+  static NMT_TrackingLevel cmdline_tracking_level() {
+    return _cmdline_tracking_level;
   }
 
-  static void dec_pending_op_count() {
-    Atomic::dec(&_pending_op_count);
-    assert(_pending_op_count >= 0, "Sanity check");
-  }
-
+  static void tuning_statistics(outputStream* out);
 
  private:
-  // retrieve a pooled memory record or create new one if there is not
-  // one available
-  static MemRecorder* get_new_or_pooled_instance();
-  static void create_memory_record(address addr, MEMFLAGS type,
-                   size_t size, address pc, Thread* thread);
-  static void create_record_in_recorder(address addr, MEMFLAGS type,
-                   size_t size, address pc, JavaThread* thread);
-
-  static void set_current_processing_generation(unsigned long generation) {
-    _worker_thread_idle = false;
-    _processing_generation = generation;
-  }
-
-  static void report_worker_idle() {
-    _worker_thread_idle = true;
-  }
+  static NMT_TrackingLevel init_tracking_level();
+  static void report(bool summary_only, outputStream* output);
 
  private:
-  // global memory snapshot
-  static MemSnapshot*     _snapshot;
-
-  // a memory baseline of snapshot
+  // Tracking level
+  static volatile NMT_TrackingLevel   _tracking_level;
+  // If NMT option value passed by launcher through environment
+  // variable is valid
+  static bool                         _is_nmt_env_valid;
+  // command line tracking level
+  static NMT_TrackingLevel            _cmdline_tracking_level;
+  // Stored baseline
   static MemBaseline      _baseline;
-
-  // query lock
+  // Query lock
   static Mutex*           _query_lock;
-
-  // a thread can start to allocate memory before it is attached
-  // to VM 'Thread', those memory activities are recorded here.
-  // ThreadCritical is required to guard this global recorder.
-  static MemRecorder* volatile _global_recorder;
-
-  // main thread id
-  debug_only(static intx   _main_thread_tid;)
-
-  // pending recorders to be merged
-  static MemRecorder* volatile     _merge_pending_queue;
-
-  NOT_PRODUCT(static volatile jint   _pending_recorder_count;)
-
-  // pooled memory recorders
-  static MemRecorder* volatile     _pooled_recorders;
-
-  // memory recorder pool management, uses following
-  // counter to determine if a released memory recorder
-  // should be pooled
-
-  // latest thread count
-  static int               _thread_count;
-  // pooled recorder count
-  static volatile jint     _pooled_recorder_count;
-
-
-  // worker thread to merge pending recorders into snapshot
-  static MemTrackWorker*  _worker_thread;
-
-  // how many safepoints we skipped without entering sync point
-  static int              _sync_point_skip_count;
-
-  // if the tracker is properly intialized
-  static bool             _is_tracker_ready;
-  // tracking level (off, summary and detail)
-  static enum NMTLevel    _tracking_level;
-
-  // current nmt state
-  static volatile enum NMTStates   _state;
-  // the reason for shutting down nmt
-  static enum ShutdownReason       _reason;
-  // the generation that NMT is processing
-  static volatile unsigned long    _processing_generation;
-  // although NMT is still procesing current generation, but
-  // there is not more recorder to process, set idle state
-  static volatile bool             _worker_thread_idle;
-
-  // if NMT should slow down calling thread to allow
-  // worker thread to catch up
-  static volatile bool             _slowdown_calling_thread;
-
-  // pending memory op count.
-  // Certain memory ops need to pre-reserve sequence number
-  // before memory operation can happen to avoid race condition.
-  // See MemTracker::Tracker for detail
-  static volatile jint             _pending_op_count;
 };
 
-#endif // !INCLUDE_NMT
+#endif // INCLUDE_NMT
 
 #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP
+
--- a/src/share/vm/services/memoryManager.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/memoryManager.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,6 +28,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "services/lowMemoryDetector.hpp"
 #include "services/management.hpp"
 #include "services/memoryManager.hpp"
--- a/src/share/vm/services/memoryPool.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/memoryPool.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -29,6 +29,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "services/lowMemoryDetector.hpp"
 #include "services/management.hpp"
 #include "services/memoryManager.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/nmtCommon.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+#include "services/nmtCommon.hpp"
+
+const char* NMTUtil::_memory_type_names[] = {
+  "Java Heap",
+  "Class",
+  "Thread",
+  "Thread Stack",
+  "Code",
+  "GC",
+  "Compiler",
+  "Internal",
+  "Other",
+  "Symbol",
+  "Native Memory Tracking",
+  "Shared class space",
+  "Arena Chunk",
+  "Test",
+  "Tracing",
+  "Unknown"
+};
+
+
+const char* NMTUtil::scale_name(size_t scale) {
+  switch(scale) {
+    case K: return "KB";
+    case M: return "MB";
+    case G: return "GB";
+  }
+  ShouldNotReachHere();
+  return NULL;
+}
+
+size_t NMTUtil::scale_from_name(const char* scale) {
+  assert(scale != NULL, "Null pointer check");
+  if (strncmp(scale, "KB", 2) == 0 ||
+      strncmp(scale, "kb", 2) == 0) {
+    return K;
+  } else if (strncmp(scale, "MB", 2) == 0 ||
+             strncmp(scale, "mb", 2) == 0) {
+    return M;
+  } else if (strncmp(scale, "GB", 2) == 0 ||
+             strncmp(scale, "gb", 2) == 0) {
+    return G;
+  } else {
+    return 0; // Invalid value
+  }
+  return K;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/nmtCommon.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_NMT_COMMON_HPP
+#define SHARE_VM_SERVICES_NMT_COMMON_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#define CALC_OBJ_SIZE_IN_TYPE(obj, type) (align_size_up_(sizeof(obj), sizeof(type))/sizeof(type))
+
+// Data type for memory counters
+#ifdef _LP64
+  typedef jlong    MemoryCounterType;
+#else
+  typedef jint     MemoryCounterType;
+#endif
+
+// Native memory tracking level
+enum NMT_TrackingLevel {
+  NMT_unknown = 0xFF,
+  NMT_off     = 0x00,
+  NMT_minimal = 0x01,
+  NMT_summary = 0x02,
+  NMT_detail  = 0x03
+};
+
+// Number of stack frames to capture. This is a
+// build time decision.
+const int NMT_TrackingStackDepth = 4;
+
+// A few common utilities for native memory tracking
+class NMTUtil : AllStatic {
+ public:
+  // Map memory type to index
+  static inline int flag_to_index(MEMFLAGS flag) {
+    return (flag & 0xff);
+  }
+
+  // Map memory type to human readable name
+  static const char* flag_to_name(MEMFLAGS flag) {
+    return _memory_type_names[flag_to_index(flag)];
+  }
+
+  // Map an index to memory type
+  static MEMFLAGS index_to_flag(int index) {
+    return (MEMFLAGS)index;
+  }
+
+  // Memory size scale
+  static const char* scale_name(size_t scale);
+  static size_t scale_from_name(const char* scale);
+
+  // Translate memory size in specified scale
+  static size_t amount_in_scale(size_t amount, size_t scale) {
+    return (amount + scale / 2) / scale;
+  }
+ private:
+  static const char* _memory_type_names[mt_number_of_types];
+};
+
+
+#endif
--- a/src/share/vm/services/nmtDCmd.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/nmtDCmd.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -22,6 +22,8 @@
  *
  */
 #include "precompiled.hpp"
+
+#include "runtime/mutexLocker.hpp"
 #include "services/nmtDCmd.hpp"
 #include "services/memReporter.hpp"
 #include "services/memTracker.hpp"
@@ -49,13 +51,8 @@
   _shutdown("shutdown", "request runtime to shutdown itself and free the " \
             "memory used by runtime.",
             "BOOLEAN", false, "false"),
-  _auto_shutdown("autoShutdown", "automatically shutdown itself under "    \
-            "stress situation",
-            "BOOLEAN", true, "true"),
-#ifndef PRODUCT
-  _debug("debug", "print tracker statistics. Debug only, not thread safe", \
+  _statistics("statistics", "print tracker statistics for tuning purpose.", \
             "BOOLEAN", false, "false"),
-#endif
   _scale("scale", "Memory usage in which scale, KB, MB or GB",
        "STRING", false, "KB") {
   _dcmdparser.add_dcmd_option(&_summary);
@@ -64,25 +61,30 @@
   _dcmdparser.add_dcmd_option(&_summary_diff);
   _dcmdparser.add_dcmd_option(&_detail_diff);
   _dcmdparser.add_dcmd_option(&_shutdown);
-  _dcmdparser.add_dcmd_option(&_auto_shutdown);
-#ifndef PRODUCT
-  _dcmdparser.add_dcmd_option(&_debug);
-#endif
+  _dcmdparser.add_dcmd_option(&_statistics);
   _dcmdparser.add_dcmd_option(&_scale);
 }
 
+
+size_t NMTDCmd::get_scale(const char* scale) const {
+  if (scale == NULL) return 0;
+  return NMTUtil::scale_from_name(scale);
+}
+
 void NMTDCmd::execute(DCmdSource source, TRAPS) {
+  // Check NMT state
+  //  native memory tracking has to be on
+  if (MemTracker::tracking_level() == NMT_off) {
+    output()->print_cr("Native memory tracking is not enabled");
+    return;
+  } else if (MemTracker::tracking_level() == NMT_minimal) {
+     output()->print_cr("Native memory tracking has been shutdown");
+     return;
+  }
+
   const char* scale_value = _scale.value();
-  size_t scale_unit;
-  if (strcmp(scale_value, "KB") == 0 || strcmp(scale_value, "kb") == 0) {
-    scale_unit = K;
-  } else if (strcmp(scale_value, "MB") == 0 ||
-             strcmp(scale_value, "mb") == 0) {
-    scale_unit = M;
-  } else if (strcmp(scale_value, "GB") == 0 ||
-             strcmp(scale_value, "gb") == 0) {
-    scale_unit = G;
-  } else {
+  size_t scale_unit = get_scale(scale_value);
+  if (scale_unit == 0) {
     output()->print_cr("Incorrect scale value: %s", scale_value);
     return;
   }
@@ -94,19 +96,11 @@
   if (_summary_diff.is_set() && _summary_diff.value()) { ++nopt; }
   if (_detail_diff.is_set() && _detail_diff.value()) { ++nopt; }
   if (_shutdown.is_set() && _shutdown.value()) { ++nopt; }
-  if (_auto_shutdown.is_set()) { ++nopt; }
-
-#ifndef PRODUCT
-  if (_debug.is_set() && _debug.value()) { ++nopt; }
-#endif
+  if (_statistics.is_set() && _statistics.value()) { ++nopt; }
 
   if (nopt > 1) {
       output()->print_cr("At most one of the following option can be specified: " \
-        "summary, detail, baseline, summary.diff, detail.diff, shutdown"
-#ifndef PRODUCT
-        ", debug"
-#endif
-      );
+        "summary, detail, baseline, summary.diff, detail.diff, shutdown");
       return;
   } else if (nopt == 0) {
     if (_summary.is_set()) {
@@ -117,53 +111,47 @@
     }
   }
 
-#ifndef PRODUCT
-  if (_debug.value()) {
-    output()->print_cr("debug command is NOT thread-safe, may cause crash");
-    MemTracker::print_tracker_stats(output());
-    return;
-  }
-#endif
-
-  // native memory tracking has to be on
-  if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
-    // if it is not on, what's the reason?
-    output()->print_cr("%s", MemTracker::reason());
-    return;
-  }
+  // Serialize NMT query
+  MutexLocker locker(MemTracker::query_lock());
 
   if (_summary.value()) {
-    BaselineTTYOutputer outputer(output());
-    MemTracker::print_memory_usage(outputer, scale_unit, true);
+    report(true, scale_unit);
   } else if (_detail.value()) {
-    BaselineTTYOutputer outputer(output());
-    MemTracker::print_memory_usage(outputer, scale_unit, false);
+    if (!check_detail_tracking_level(output())) {
+    return;
+  }
+    report(false, scale_unit);
   } else if (_baseline.value()) {
-    if (MemTracker::baseline()) {
-      output()->print_cr("Successfully baselined.");
+    MemBaseline& baseline = MemTracker::get_baseline();
+    if (!baseline.baseline(MemTracker::tracking_level() != NMT_detail)) {
+      output()->print_cr("Baseline failed");
     } else {
-      output()->print_cr("Baseline failed.");
+      output()->print_cr("Baseline succeeded");
     }
   } else if (_summary_diff.value()) {
-    if (MemTracker::has_baseline()) {
-      BaselineTTYOutputer outputer(output());
-      MemTracker::compare_memory_usage(outputer, scale_unit, true);
+    MemBaseline& baseline = MemTracker::get_baseline();
+    if (baseline.baseline_type() >= MemBaseline::Summary_baselined) {
+      report_diff(true, scale_unit);
     } else {
-      output()->print_cr("No baseline to compare, run 'baseline' command first");
+      output()->print_cr("No baseline for comparison");
     }
   } else if (_detail_diff.value()) {
-    if (MemTracker::has_baseline()) {
-      BaselineTTYOutputer outputer(output());
-      MemTracker::compare_memory_usage(outputer, scale_unit, false);
+    if (!check_detail_tracking_level(output())) {
+    return;
+  }
+    MemBaseline& baseline = MemTracker::get_baseline();
+    if (baseline.baseline_type() == MemBaseline::Detail_baselined) {
+      report_diff(false, scale_unit);
     } else {
-      output()->print_cr("No baseline to compare to, run 'baseline' command first");
+      output()->print_cr("No detail baseline for comparison");
     }
   } else if (_shutdown.value()) {
-    MemTracker::shutdown(MemTracker::NMT_shutdown_user);
-    output()->print_cr("Shutdown is in progress, it will take a few moments to " \
-      "completely shutdown");
-  } else if (_auto_shutdown.is_set()) {
-    MemTracker::set_autoShutdown(_auto_shutdown.value());
+    MemTracker::shutdown();
+    output()->print_cr("Native memory tracking has been turned off");
+  } else if (_statistics.value()) {
+    if (check_detail_tracking_level(output())) {
+      MemTracker::tuning_statistics(output());
+    }
   } else {
     ShouldNotReachHere();
     output()->print_cr("Unknown command");
@@ -181,3 +169,46 @@
   }
 }
 
+void NMTDCmd::report(bool summaryOnly, size_t scale_unit) {
+  MemBaseline baseline;
+  if (baseline.baseline(summaryOnly)) {
+    if (summaryOnly) {
+      MemSummaryReporter rpt(baseline, output(), scale_unit);
+      rpt.report();
+    } else {
+      MemDetailReporter rpt(baseline, output(), scale_unit);
+      rpt.report();
+    }
+  }
+}
+
+void NMTDCmd::report_diff(bool summaryOnly, size_t scale_unit) {
+  MemBaseline& early_baseline = MemTracker::get_baseline();
+  assert(early_baseline.baseline_type() != MemBaseline::Not_baselined,
+    "Not yet baselined");
+  assert(summaryOnly || early_baseline.baseline_type() == MemBaseline::Detail_baselined,
+    "Not a detail baseline");
+
+  MemBaseline baseline;
+  if (baseline.baseline(summaryOnly)) {
+    if (summaryOnly) {
+      MemSummaryDiffReporter rpt(early_baseline, baseline, output(), scale_unit);
+      rpt.report_diff();
+    } else {
+      MemDetailDiffReporter rpt(early_baseline, baseline, output(), scale_unit);
+      rpt.report_diff();
+    }
+  }
+}
+
+bool NMTDCmd::check_detail_tracking_level(outputStream* out) {
+  if (MemTracker::tracking_level() == NMT_detail) {
+    return true;
+  } else if (MemTracker::cmdline_tracking_level() == NMT_detail) {
+    out->print_cr("Tracking level has been downgraded due to lack of resources");
+    return false;
+  } else {
+    out->print_cr("Detail tracking is not enabled");
+    return false;
+  }
+}
--- a/src/share/vm/services/nmtDCmd.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/nmtDCmd.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,12 @@
 #ifndef SHARE_VM_SERVICES_NMT_DCMD_HPP
 #define SHARE_VM_SERVICES_NMT_DCMD_HPP
 
+#if INCLUDE_NMT
+
 #include "services/diagnosticArgument.hpp"
 #include "services/diagnosticFramework.hpp"
+#include "services/memBaseline.hpp"
+#include "services/mallocTracker.hpp"
 
 /**
  * Native memory tracking DCmd implementation
@@ -39,10 +43,7 @@
   DCmdArgument<bool>  _summary_diff;
   DCmdArgument<bool>  _detail_diff;
   DCmdArgument<bool>  _shutdown;
-  DCmdArgument<bool>  _auto_shutdown;
-#ifndef PRODUCT
-  DCmdArgument<bool>  _debug;
-#endif
+  DCmdArgument<bool>  _statistics;
   DCmdArgument<char*> _scale;
 
  public:
@@ -61,6 +62,17 @@
   }
   static int num_arguments();
   virtual void execute(DCmdSource source, TRAPS);
+
+ private:
+  void report(bool summaryOnly, size_t scale);
+  void report_diff(bool summaryOnly, size_t scale);
+
+  size_t get_scale(const char* scale) const;
+
+  // check if NMT running at detail tracking level
+  bool check_detail_tracking_level(outputStream* out);
 };
 
+#endif // INCLUDE_NMT
+
 #endif // SHARE_VM_SERVICES_NMT_DCMD_HPP
--- a/src/share/vm/services/runtimeService.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/runtimeService.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -46,6 +46,7 @@
 PerfCounter*  RuntimeService::_thread_interrupt_signaled_count = NULL;
 PerfCounter*  RuntimeService::_interrupted_before_count = NULL;
 PerfCounter*  RuntimeService::_interrupted_during_count = NULL;
+double RuntimeService::_last_safepoint_sync_time_sec = 0.0;
 
 void RuntimeService::init() {
   // Make sure the VM version is initialized
@@ -128,6 +129,7 @@
 
   // update the time stamp to begin recording safepoint time
   _safepoint_timer.update();
+  _last_safepoint_sync_time_sec = 0.0;
   if (UsePerfData) {
     _total_safepoints->inc();
     if (_app_timer.is_updated()) {
@@ -140,6 +142,9 @@
   if (UsePerfData) {
     _sync_time_ticks->inc(_safepoint_timer.ticks_since_update());
   }
+  if (PrintGCApplicationStoppedTime) {
+    _last_safepoint_sync_time_sec = last_safepoint_time_sec();
+  }
 }
 
 void RuntimeService::record_safepoint_end() {
@@ -155,8 +160,10 @@
     gclog_or_tty->date_stamp(PrintGCDateStamps);
     gclog_or_tty->stamp(PrintGCTimeStamps);
     gclog_or_tty->print_cr("Total time for which application threads "
-                           "were stopped: %3.7f seconds",
-                           last_safepoint_time_sec());
+                           "were stopped: %3.7f seconds, "
+                           "Stopping threads took: %3.7f seconds",
+                           last_safepoint_time_sec(),
+                           _last_safepoint_sync_time_sec);
   }
 
   // update the time stamp to begin recording app time
--- a/src/share/vm/services/runtimeService.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/runtimeService.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -40,6 +40,7 @@
 
   static TimeStamp _safepoint_timer;
   static TimeStamp _app_timer;
+  static double _last_safepoint_sync_time_sec;
 
 public:
   static void init();
--- a/src/share/vm/services/threadService.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/services/threadService.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -33,6 +33,7 @@
 #include "runtime/init.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/vframe.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "runtime/vm_operations.hpp"
 #include "services/threadService.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/virtualMemoryTracker.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,477 @@
+/*
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+
+#include "runtime/threadCritical.hpp"
+#include "services/virtualMemoryTracker.hpp"
+
+size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
+
+void VirtualMemorySummary::initialize() {
+  assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
+  // Use placement operator new to initialize static data area.
+  ::new ((void*)_snapshot) VirtualMemorySnapshot();
+}
+
+SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
+
+int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
+  return r1.compare(r2);
+}
+
+int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
+  return r1.compare(r2);
+}
+
+bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
+  assert(addr != NULL, "Invalid address");
+  assert(size > 0, "Invalid size");
+  assert(contain_region(addr, size), "Not contain this region");
+
+  if (all_committed()) return true;
+
+  CommittedMemoryRegion committed_rgn(addr, size, stack);
+  LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn);
+  if (node != NULL) {
+    CommittedMemoryRegion* rgn = node->data();
+    if (rgn->same_region(addr, size)) {
+      return true;
+    }
+
+    if (rgn->adjacent_to(addr, size)) {
+      // check if the next region covers this committed region,
+      // the regions may not be merged due to different call stacks
+      LinkedListNode<CommittedMemoryRegion>* next =
+        node->next();
+      if (next != NULL && next->data()->contain_region(addr, size)) {
+        if (next->data()->same_region(addr, size)) {
+          next->data()->set_call_stack(stack);
+        }
+        return true;
+      }
+      if (rgn->call_stack()->equals(stack)) {
+        VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
+        // the two adjacent regions have the same call stack, merge them
+        rgn->expand_region(addr, size);
+        VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
+        return true;
+      }
+      VirtualMemorySummary::record_committed_memory(size, flag());
+      if (rgn->base() > addr) {
+        return _committed_regions.insert_before(committed_rgn, node) != NULL;
+      } else {
+        return _committed_regions.insert_after(committed_rgn, node) != NULL;
+      }
+    }
+    assert(rgn->contain_region(addr, size), "Must cover this region");
+    return true;
+  } else {
+    // New committed region
+    VirtualMemorySummary::record_committed_memory(size, flag());
+    return add_committed_region(committed_rgn);
+  }
+}
+
+void ReservedMemoryRegion::set_all_committed(bool b) {
+  if (all_committed() != b) {
+    _all_committed = b;
+    if (b) {
+      VirtualMemorySummary::record_committed_memory(size(), flag());
+    }
+  }
+}
+
+bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
+  address addr, size_t size) {
+  assert(addr != NULL, "Invalid address");
+  assert(size > 0, "Invalid size");
+
+  CommittedMemoryRegion* rgn = node->data();
+  assert(rgn->contain_region(addr, size), "Has to be contained");
+  assert(!rgn->same_region(addr, size), "Can not be the same region");
+
+  if (rgn->base() == addr ||
+      rgn->end() == addr + size) {
+    rgn->exclude_region(addr, size);
+    return true;
+  } else {
+    // split this region
+    address top =rgn->end();
+    // use this region for lower part
+    size_t exclude_size = rgn->end() - addr;
+    rgn->exclude_region(addr, exclude_size);
+
+    // higher part
+    address high_base = addr + size;
+    size_t  high_size = top - high_base;
+
+    CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
+    LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
+    assert(high_node == NULL || node->next() == high_node, "Should be right after");
+    return (high_node != NULL);
+  }
+
+  return false;
+}
+
+bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
+  // uncommit stack guard pages
+  if (flag() == mtThreadStack && !same_region(addr, sz)) {
+    return true;
+  }
+
+  assert(addr != NULL, "Invalid address");
+  assert(sz > 0, "Invalid size");
+
+  if (all_committed()) {
+    assert(_committed_regions.is_empty(), "Sanity check");
+    assert(contain_region(addr, sz), "Reserved region does not contain this region");
+    set_all_committed(false);
+    VirtualMemorySummary::record_uncommitted_memory(sz, flag());
+    if (same_region(addr, sz)) {
+      return true;
+    } else {
+      CommittedMemoryRegion rgn(base(), size(), *call_stack());
+      if (rgn.base() == addr || rgn.end() == (addr + sz)) {
+        rgn.exclude_region(addr, sz);
+        return add_committed_region(rgn);
+      } else {
+        // split this region
+        // top of the whole region
+        address top =rgn.end();
+        // use this region for lower part
+        size_t exclude_size = rgn.end() - addr;
+        rgn.exclude_region(addr, exclude_size);
+        if (add_committed_region(rgn)) {
+          // higher part
+          address high_base = addr + sz;
+          size_t  high_size = top - high_base;
+          CommittedMemoryRegion high_rgn(high_base, high_size, NativeCallStack::EMPTY_STACK);
+          return add_committed_region(high_rgn);
+        } else {
+          return false;
+        }
+      }
+    }
+  } else {
+    // we have to walk whole list to remove the committed regions in
+    // specified range
+    LinkedListNode<CommittedMemoryRegion>* head =
+      _committed_regions.head();
+    LinkedListNode<CommittedMemoryRegion>* prev = NULL;
+    VirtualMemoryRegion uncommitted_rgn(addr, sz);
+
+    while (head != NULL && !uncommitted_rgn.is_empty()) {
+      CommittedMemoryRegion* crgn = head->data();
+      // this committed region overlaps to region to uncommit
+      if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
+        if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
+          // find matched region, remove the node will do
+          VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
+          _committed_regions.remove_after(prev);
+          return true;
+        } else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
+          // this committed region contains whole uncommitted region
+          VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
+          return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size());
+        } else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) {
+          // this committed region has been uncommitted
+          size_t exclude_size = crgn->end() - uncommitted_rgn.base();
+          uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size);
+          VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
+          LinkedListNode<CommittedMemoryRegion>* tmp = head;
+          head = head->next();
+          _committed_regions.remove_after(prev);
+          continue;
+        } else if (crgn->contain_address(uncommitted_rgn.base())) {
+          size_t toUncommitted = crgn->end() - uncommitted_rgn.base();
+          crgn->exclude_region(uncommitted_rgn.base(), toUncommitted);
+          uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted);
+          VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
+        } else if (uncommitted_rgn.contain_address(crgn->base())) {
+          size_t toUncommitted = uncommitted_rgn.end() - crgn->base();
+          crgn->exclude_region(crgn->base(), toUncommitted);
+          uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted,
+            toUncommitted);
+          VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
+        }
+      }
+      prev = head;
+      head = head->next();
+    }
+  }
+
+  return true;
+}
+
+void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
+  assert(addr != NULL, "Invalid address");
+
+  // split committed regions
+  LinkedListNode<CommittedMemoryRegion>* head =
+    _committed_regions.head();
+  LinkedListNode<CommittedMemoryRegion>* prev = NULL;
+
+  while (head != NULL) {
+    if (head->data()->base() >= addr) {
+      break;
+    }
+    prev = head;
+    head = head->next();
+  }
+
+  if (head != NULL) {
+    if (prev != NULL) {
+      prev->set_next(head->next());
+    } else {
+      _committed_regions.set_head(NULL);
+    }
+  }
+
+  rgn._committed_regions.set_head(head);
+}
+
+size_t ReservedMemoryRegion::committed_size() const {
+  if (all_committed()) {
+    return size();
+  } else {
+    size_t committed = 0;
+    LinkedListNode<CommittedMemoryRegion>* head =
+      _committed_regions.head();
+    while (head != NULL) {
+      committed += head->data()->size();
+      head = head->next();
+    }
+    return committed;
+  }
+}
+
+void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
+  assert((flag() == mtNone || flag() == f), "Overwrite memory type");
+  if (flag() != f) {
+    VirtualMemorySummary::move_reserved_memory(flag(), f, size());
+    VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
+    _flag = f;
+  }
+}
+
+bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
+  if (level >= NMT_summary) {
+    VirtualMemorySummary::initialize();
+  }
+  return true;
+}
+
+bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
+  if (level >= NMT_summary) {
+    _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
+      SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
+    return (_reserved_regions != NULL);
+  }
+  return true;
+}
+
+bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
+   const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
+  assert(base_addr != NULL, "Invalid address");
+  assert(size > 0, "Invalid size");
+  assert(_reserved_regions != NULL, "Sanity check");
+  ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
+  ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
+  LinkedListNode<ReservedMemoryRegion>* node;
+  if (reserved_rgn == NULL) {
+    VirtualMemorySummary::record_reserved_memory(size, flag);
+    node = _reserved_regions->add(rgn);
+    if (node != NULL) {
+      node->data()->set_all_committed(all_committed);
+      return true;
+    } else {
+      return false;
+    }
+  } else {
+    if (reserved_rgn->same_region(base_addr, size)) {
+      reserved_rgn->set_call_stack(stack);
+      reserved_rgn->set_flag(flag);
+      return true;
+    } else if (reserved_rgn->adjacent_to(base_addr, size)) {
+      VirtualMemorySummary::record_reserved_memory(size, flag);
+      reserved_rgn->expand_region(base_addr, size);
+      reserved_rgn->set_call_stack(stack);
+      return true;
+    } else {
+      // Overlapped reservation.
+      // It can happen when the regions are thread stacks, as JNI
+      // thread does not detach from VM before exits, and leads to
+      // leak JavaThread object
+      if (reserved_rgn->flag() == mtThreadStack) {
+        guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
+        // Overwrite with new region
+
+        // Release old region
+        VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
+        VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
+
+        // Add new region
+        VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
+
+        *reserved_rgn = rgn;
+        return true;
+      }
+
+      // CDS mapping region.
+      // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
+      // NMT reports CDS as a whole.
+      if (reserved_rgn->flag() == mtClassShared) {
+        assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
+        return true;
+      }
+
+      ShouldNotReachHere();
+      return false;
+    }
+  }
+}
+
+void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
+  assert(addr != NULL, "Invalid address");
+  assert(_reserved_regions != NULL, "Sanity check");
+
+  ReservedMemoryRegion   rgn(addr, 1);
+  ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
+  if (reserved_rgn != NULL) {
+    assert(reserved_rgn->contain_address(addr), "Containment");
+    if (reserved_rgn->flag() != flag) {
+      assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
+      reserved_rgn->set_flag(flag);
+    }
+  }
+}
+
+bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
+  const NativeCallStack& stack) {
+  assert(addr != NULL, "Invalid address");
+  assert(size > 0, "Invalid size");
+  assert(_reserved_regions != NULL, "Sanity check");
+
+  ReservedMemoryRegion  rgn(addr, size);
+  ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
+
+  assert(reserved_rgn != NULL, "No reserved region");
+  assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
+  return reserved_rgn->add_committed_region(addr, size, stack);
+}
+
+bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
+  assert(addr != NULL, "Invalid address");
+  assert(size > 0, "Invalid size");
+  assert(_reserved_regions != NULL, "Sanity check");
+
+  ReservedMemoryRegion  rgn(addr, size);
+  ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
+  assert(reserved_rgn != NULL, "No reserved region");
+  assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
+  return reserved_rgn->remove_uncommitted_region(addr, size);
+}
+
+bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
+  assert(addr != NULL, "Invalid address");
+  assert(size > 0, "Invalid size");
+  assert(_reserved_regions != NULL, "Sanity check");
+
+  ReservedMemoryRegion  rgn(addr, size);
+  ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
+
+  assert(reserved_rgn != NULL, "No reserved region");
+
+  // uncommit regions within the released region
+  if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
+    return false;
+  }
+
+
+  VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
+
+  if (reserved_rgn->same_region(addr, size)) {
+    return _reserved_regions->remove(rgn);
+  } else {
+    assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
+    if (reserved_rgn->base() == addr ||
+        reserved_rgn->end() == addr + size) {
+        reserved_rgn->exclude_region(addr, size);
+      return true;
+    } else {
+      address top = reserved_rgn->end();
+      address high_base = addr + size;
+      ReservedMemoryRegion high_rgn(high_base, top - high_base,
+        *reserved_rgn->call_stack(), reserved_rgn->flag());
+
+      // use original region for lower region
+      reserved_rgn->exclude_region(addr, top - addr);
+      LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
+      if (new_rgn == NULL) {
+        return false;
+      } else {
+        reserved_rgn->move_committed_regions(addr, *new_rgn->data());
+        return true;
+      }
+    }
+  }
+}
+
+
+bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
+  assert(_reserved_regions != NULL, "Sanity check");
+  ThreadCritical tc;
+  // Check that the _reserved_regions haven't been deleted.
+  if (_reserved_regions != NULL) {
+    LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
+    while (head != NULL) {
+      const ReservedMemoryRegion* rgn = head->peek();
+      if (!walker->do_allocation_site(rgn)) {
+        return false;
+      }
+      head = head->next();
+    }
+   }
+  return true;
+}
+
+// Transition virtual memory tracking level.
+bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
+  assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
+  if (to == NMT_minimal) {
+    assert(from == NMT_summary || from == NMT_detail, "Just check");
+    // Clean up virtual memory tracking data structures.
+    ThreadCritical tc;
+    // Check for potential race with other thread calling transition
+    if (_reserved_regions != NULL) {
+      delete _reserved_regions;
+      _reserved_regions = NULL;
+    }
+  }
+
+  return true;
+}
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/services/virtualMemoryTracker.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
+#define SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
+
+#if INCLUDE_NMT
+
+#include "memory/allocation.hpp"
+#include "services/allocationSite.hpp"
+#include "services/nmtCommon.hpp"
+#include "utilities/linkedlist.hpp"
+#include "utilities/nativeCallStack.hpp"
+#include "utilities/ostream.hpp"
+
+
+/*
+ * Virtual memory counter
+ */
+class VirtualMemory VALUE_OBJ_CLASS_SPEC {
+ private:
+  size_t     _reserved;
+  size_t     _committed;
+
+ public:
+  VirtualMemory() : _reserved(0), _committed(0) { }
+
+  inline void reserve_memory(size_t sz) { _reserved += sz; }
+  inline void commit_memory (size_t sz) {
+    _committed += sz;
+    assert(_committed <= _reserved, "Sanity check");
+  }
+
+  inline void release_memory (size_t sz) {
+    assert(_reserved >= sz, "Negative amount");
+    _reserved -= sz;
+  }
+
+  inline void uncommit_memory(size_t sz) {
+    assert(_committed >= sz, "Negative amount");
+    _committed -= sz;
+  }
+
+  inline size_t reserved()  const { return _reserved;  }
+  inline size_t committed() const { return _committed; }
+};
+
+// Virtual memory allocation site, keeps track where the virtual memory is reserved.
+class VirtualMemoryAllocationSite : public AllocationSite<VirtualMemory> {
+ public:
+  VirtualMemoryAllocationSite(const NativeCallStack& stack) :
+    AllocationSite<VirtualMemory>(stack) { }
+
+  inline void reserve_memory(size_t sz)  { data()->reserve_memory(sz);  }
+  inline void commit_memory (size_t sz)  { data()->commit_memory(sz);   }
+  inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); }
+  inline void release_memory(size_t sz)  { data()->release_memory(sz);  }
+  inline size_t reserved() const  { return peek()->reserved(); }
+  inline size_t committed() const { return peek()->committed(); }
+};
+
+class VirtualMemorySummary;
+
+// This class represents a snapshot of virtual memory at a given time.
+// The latest snapshot is saved in a static area.
+class VirtualMemorySnapshot : public ResourceObj {
+  friend class VirtualMemorySummary;
+
+ private:
+  VirtualMemory  _virtual_memory[mt_number_of_types];
+
+ public:
+  inline VirtualMemory* by_type(MEMFLAGS flag) {
+    int index = NMTUtil::flag_to_index(flag);
+    return &_virtual_memory[index];
+  }
+
+  inline VirtualMemory* by_index(int index) {
+    assert(index >= 0, "Index out of bound");
+    assert(index < mt_number_of_types, "Index out of bound");
+    return &_virtual_memory[index];
+  }
+
+  inline size_t total_reserved() const {
+    size_t amount = 0;
+    for (int index = 0; index < mt_number_of_types; index ++) {
+      amount += _virtual_memory[index].reserved();
+    }
+    return amount;
+  }
+
+  inline size_t total_committed() const {
+    size_t amount = 0;
+    for (int index = 0; index < mt_number_of_types; index ++) {
+      amount += _virtual_memory[index].committed();
+    }
+    return amount;
+  }
+
+  void copy_to(VirtualMemorySnapshot* s) {
+    for (int index = 0; index < mt_number_of_types; index ++) {
+      s->_virtual_memory[index] = _virtual_memory[index];
+    }
+  }
+};
+
+class VirtualMemorySummary : AllStatic {
+ public:
+  static void initialize();
+
+  static inline void record_reserved_memory(size_t size, MEMFLAGS flag) {
+    as_snapshot()->by_type(flag)->reserve_memory(size);
+  }
+
+  static inline void record_committed_memory(size_t size, MEMFLAGS flag) {
+    as_snapshot()->by_type(flag)->commit_memory(size);
+  }
+
+  static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) {
+    as_snapshot()->by_type(flag)->uncommit_memory(size);
+  }
+
+  static inline void record_released_memory(size_t size, MEMFLAGS flag) {
+    as_snapshot()->by_type(flag)->release_memory(size);
+  }
+
+  // Move virtual memory from one memory type to another.
+  // Virtual memory can be reserved before it is associated with a memory type, and tagged
+  // as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown'
+  // type to specified memory type.
+  static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
+    as_snapshot()->by_type(from)->release_memory(size);
+    as_snapshot()->by_type(to)->reserve_memory(size);
+  }
+
+  static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
+    as_snapshot()->by_type(from)->uncommit_memory(size);
+    as_snapshot()->by_type(to)->commit_memory(size);
+  }
+
+  static inline void snapshot(VirtualMemorySnapshot* s) {
+    as_snapshot()->copy_to(s);
+  }
+
+  static VirtualMemorySnapshot* as_snapshot() {
+    return (VirtualMemorySnapshot*)_snapshot;
+  }
+
+ private:
+  static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
+};
+
+
+
+/*
+ * A virtual memory region
+ */
+class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC {
+ private:
+  address      _base_address;
+  size_t       _size;
+
+ public:
+  VirtualMemoryRegion(address addr, size_t size) :
+    _base_address(addr), _size(size) {
+     assert(addr != NULL, "Invalid address");
+     assert(size > 0, "Invalid size");
+   }
+
+  inline address base() const { return _base_address;   }
+  inline address end()  const { return base() + size(); }
+  inline size_t  size() const { return _size;           }
+
+  inline bool is_empty() const { return size() == 0; }
+
+  inline bool contain_address(address addr) const {
+    return (addr >= base() && addr < end());
+  }
+
+
+  inline bool contain_region(address addr, size_t size) const {
+    return contain_address(addr) && contain_address(addr + size - 1);
+  }
+
+  inline bool same_region(address addr, size_t sz) const {
+    return (addr == base() && sz == size());
+  }
+
+
+  inline bool overlap_region(address addr, size_t sz) const {
+    VirtualMemoryRegion rgn(addr, sz);
+    return contain_address(addr) ||
+           contain_address(addr + sz - 1) ||
+           rgn.contain_address(base()) ||
+           rgn.contain_address(end() - 1);
+  }
+
+  inline bool adjacent_to(address addr, size_t sz) const {
+    return (addr == end() || (addr + sz) == base());
+  }
+
+  void exclude_region(address addr, size_t sz) {
+    assert(contain_region(addr, sz), "Not containment");
+    assert(addr == base() || addr + sz == end(), "Can not exclude from middle");
+    size_t new_size = size() - sz;
+
+    if (addr == base()) {
+      set_base(addr + sz);
+    }
+    set_size(new_size);
+  }
+
+  void expand_region(address addr, size_t sz) {
+    assert(adjacent_to(addr, sz), "Not adjacent regions");
+    if (base() == addr + sz) {
+      set_base(addr);
+    }
+    set_size(size() + sz);
+  }
+
+ protected:
+  void set_base(address base) {
+    assert(base != NULL, "Sanity check");
+    _base_address = base;
+  }
+
+  void set_size(size_t  size) {
+    assert(size > 0, "Sanity check");
+    _size = size;
+  }
+};
+
+
+class CommittedMemoryRegion : public VirtualMemoryRegion {
+ private:
+  NativeCallStack  _stack;
+
+ public:
+  CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) :
+    VirtualMemoryRegion(addr, size), _stack(stack) { }
+
+  inline int compare(const CommittedMemoryRegion& rgn) const {
+    if (overlap_region(rgn.base(), rgn.size()) ||
+        adjacent_to   (rgn.base(), rgn.size())) {
+      return 0;
+    } else {
+      if (base() == rgn.base()) {
+        return 0;
+      } else if (base() > rgn.base()) {
+        return 1;
+      } else {
+        return -1;
+      }
+    }
+  }
+
+  inline bool equals(const CommittedMemoryRegion& rgn) const {
+    return compare(rgn) == 0;
+  }
+
+  inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
+  inline const NativeCallStack* call_stack() const         { return &_stack; }
+};
+
+
+typedef LinkedListIterator<CommittedMemoryRegion> CommittedRegionIterator;
+
+int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&);
+class ReservedMemoryRegion : public VirtualMemoryRegion {
+ private:
+  SortedLinkedList<CommittedMemoryRegion, compare_committed_region>
+    _committed_regions;
+
+  NativeCallStack  _stack;
+  MEMFLAGS         _flag;
+
+  bool             _all_committed;
+
+ public:
+  ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
+    MEMFLAGS flag = mtNone) :
+    VirtualMemoryRegion(base, size), _stack(stack), _flag(flag),
+    _all_committed(false) { }
+
+
+  ReservedMemoryRegion(address base, size_t size) :
+    VirtualMemoryRegion(base, size), _stack(NativeCallStack::EMPTY_STACK), _flag(mtNone),
+    _all_committed(false) { }
+
+  // Copy constructor
+  ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
+    VirtualMemoryRegion(rr.base(), rr.size()) {
+    *this = rr;
+  }
+
+  inline void  set_call_stack(const NativeCallStack& stack) { _stack = stack; }
+  inline const NativeCallStack* call_stack() const          { return &_stack;  }
+
+  void  set_flag(MEMFLAGS flag);
+  inline MEMFLAGS flag() const            { return _flag;  }
+
+  inline int compare(const ReservedMemoryRegion& rgn) const {
+    if (overlap_region(rgn.base(), rgn.size())) {
+      return 0;
+    } else {
+      if (base() == rgn.base()) {
+        return 0;
+      } else if (base() > rgn.base()) {
+        return 1;
+      } else {
+        return -1;
+      }
+    }
+  }
+
+  inline bool equals(const ReservedMemoryRegion& rgn) const {
+    return compare(rgn) == 0;
+  }
+
+  bool    add_committed_region(address addr, size_t size, const NativeCallStack& stack);
+  bool    remove_uncommitted_region(address addr, size_t size);
+
+  size_t  committed_size() const;
+
+  // move committed regions that higher than specified address to
+  // the new region
+  void    move_committed_regions(address addr, ReservedMemoryRegion& rgn);
+
+  inline bool all_committed() const { return _all_committed; }
+  void        set_all_committed(bool b);
+
+  CommittedRegionIterator iterate_committed_regions() const {
+    return CommittedRegionIterator(_committed_regions.head());
+  }
+
+  ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) {
+    set_base(other.base());
+    set_size(other.size());
+
+    _stack =         *other.call_stack();
+    _flag  =         other.flag();
+    _all_committed = other.all_committed();
+    if (other.all_committed()) {
+      set_all_committed(true);
+    } else {
+      CommittedRegionIterator itr = other.iterate_committed_regions();
+      const CommittedMemoryRegion* rgn = itr.next();
+      while (rgn != NULL) {
+        _committed_regions.add(*rgn);
+        rgn = itr.next();
+      }
+    }
+    return *this;
+  }
+
+ private:
+  // The committed region contains the uncommitted region, subtract the uncommitted
+  // region from this committed region
+  bool remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
+    address addr, size_t sz);
+
+  bool add_committed_region(const CommittedMemoryRegion& rgn) {
+    assert(rgn.base() != NULL, "Invalid base address");
+    assert(size() > 0, "Invalid size");
+    return _committed_regions.add(rgn) != NULL;
+  }
+};
+
+int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2);
+
+class VirtualMemoryWalker : public StackObj {
+ public:
+   virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
+};
+
+// Main class called from MemTracker to track virtual memory allocations, commits and releases.
+class VirtualMemoryTracker : AllStatic {
+ public:
+  static bool initialize(NMT_TrackingLevel level);
+
+  // Late phase initialization
+  static bool late_initialize(NMT_TrackingLevel level);
+
+  static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack,
+    MEMFLAGS flag = mtNone, bool all_committed = false);
+
+  static bool add_committed_region      (address base_addr, size_t size, const NativeCallStack& stack);
+  static bool remove_uncommitted_region (address base_addr, size_t size);
+  static bool remove_released_region    (address base_addr, size_t size);
+  static void set_reserved_region_type  (address addr, MEMFLAGS flag);
+
+  // Walk virtual memory data structure for creating baseline, etc.
+  static bool walk_virtual_memory(VirtualMemoryWalker* walker);
+
+  static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
+
+ private:
+  static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* _reserved_regions;
+};
+
+
+#endif // INCLUDE_NMT
+
+#endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
--- a/src/share/vm/trace/noTraceBackend.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/trace/noTraceBackend.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -41,4 +41,4 @@
 
 typedef NoTraceBackend Tracing;
 
-#endif
+#endif // SHARE_VM_TRACE_NOTRACEBACKEND_HPP
--- a/src/share/vm/trace/traceBackend.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/trace/traceBackend.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -25,9 +25,7 @@
 #define SHARE_VM_TRACE_TRACEBACKEND_HPP
 
 #include "utilities/macros.hpp"
-
 #if INCLUDE_TRACE
-
 #include "runtime/globals.hpp"
 #include "runtime/os.hpp"
 #include "trace/traceTime.hpp"
@@ -58,9 +56,7 @@
 
 typedef TraceBackend Tracing;
 
-#else /* INCLUDE_TRACE */
-
+#else // !INCLUDE_TRACE
 #include "trace/noTraceBackend.hpp"
-
-#endif /* INCLUDE_TRACE */
-#endif /* SHARE_VM_TRACE_TRACEBACKEND_HPP */
+#endif // INCLUDE_TRACE
+#endif // SHARE_VM_TRACE_TRACEBACKEND_HPP
--- a/src/share/vm/trace/traceEvent.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/trace/traceEvent.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -33,7 +33,6 @@
 };
 
 #if INCLUDE_TRACE
-
 #include "trace/traceBackend.hpp"
 #include "trace/tracing.hpp"
 #include "tracefiles/traceEventIds.hpp"
@@ -154,6 +153,5 @@
   }
 };
 
-#endif /* INCLUDE_TRACE */
-
-#endif /* SHARE_VM_TRACE_TRACEEVENT_HPP */
+#endif // INCLUDE_TRACE
+#endif // SHARE_VM_TRACE_TRACEEVENT_HPP
--- a/src/share/vm/trace/traceEventClasses.xsl	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/trace/traceEventClasses.xsl	Fri Apr 10 16:58:26 2015 -0700
@@ -41,17 +41,14 @@
 #include "trace/traceEvent.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ticks.hpp"
-
 #if INCLUDE_TRACE
-
-
 #include "trace/traceStream.hpp"
 #include "utilities/ostream.hpp"
 
   <xsl:apply-templates select="trace/events/struct" mode="trace"/>
   <xsl:apply-templates select="trace/events/event" mode="trace"/>
 
-#else
+#else // !INCLUDE_TRACE
 
 class TraceEvent {
 public:
@@ -65,9 +62,8 @@
   <xsl:apply-templates select="trace/events/struct" mode="empty"/>
   <xsl:apply-templates select="trace/events/event" mode="empty"/>
 
-#endif
-
-#endif
+#endif // INCLUDE_TRACE
+#endif // TRACEFILES_TRACEEVENTCLASSES_HPP
 </xsl:template>
 
 <xsl:template match="struct" mode="trace">
--- a/src/share/vm/trace/traceEventIds.xsl	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/trace/traceEventIds.xsl	Fri Apr 10 16:58:26 2015 -0700
@@ -29,13 +29,11 @@
 <xsl:template match="/">
   <xsl:call-template name="file-header"/>
 
-#ifndef TRACEFILES_JFREVENTIDS_HPP
-#define TRACEFILES_JFREVENTIDS_HPP
+#ifndef TRACEFILES_TRACEEVENTIDS_HPP
+#define TRACEFILES_TRACEEVENTIDS_HPP
 
 #include "utilities/macros.hpp"
-
 #if INCLUDE_TRACE
-
 #include "trace/traceDataTypes.hpp"
 
 /**
@@ -67,8 +65,8 @@
 typedef enum TraceEventId  TraceEventId;
 typedef enum TraceStructId TraceStructId;
 
-#endif
-#endif
+#endif // INCLUDE_TRACE
+#endif // TRACEFILES_TRACEEVENTIDS_HPP
 </xsl:template>
 
 </xsl:stylesheet>
--- a/src/share/vm/trace/traceMacros.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/trace/traceMacros.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -22,8 +22,8 @@
  *
  */
 
-#ifndef SHARE_VM_TRACE_TRACE_MACRO_HPP
-#define SHARE_VM_TRACE_TRACE_MACRO_HPP
+#ifndef SHARE_VM_TRACE_TRACEMACROS_HPP
+#define SHARE_VM_TRACE_TRACEMACROS_HPP
 
 #define EVENT_THREAD_EXIT(thread)
 #define EVENT_THREAD_DESTRUCT(thread)
@@ -41,4 +41,4 @@
 #define TRACE_TEMPLATES(template)
 #define TRACE_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias)
 
-#endif
+#endif // SHARE_VM_TRACE_TRACEMACROS_HPP
--- a/src/share/vm/trace/traceStream.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/trace/traceStream.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,9 +26,7 @@
 #define SHARE_VM_TRACE_TRACESTREAM_HPP
 
 #include "utilities/macros.hpp"
-
 #if INCLUDE_TRACE
-
 #include "oops/klass.hpp"
 #include "oops/method.hpp"
 #include "oops/symbol.hpp"
@@ -117,5 +115,5 @@
   }
 };
 
-#endif /* INCLUDE_TRACE */
-#endif /* SHARE_VM_TRACE_TRACESTREAM_HPP */
+#endif // INCLUDE_TRACE
+#endif // SHARE_VM_TRACE_TRACESTREAM_HPP
--- a/src/share/vm/trace/traceTypes.xsl	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/trace/traceTypes.xsl	Fri Apr 10 16:58:26 2015 -0700
@@ -29,15 +29,14 @@
 <xsl:template match="/">
   <xsl:call-template name="file-header"/>
 
-#ifndef TRACEFILES_JFRTYPES_HPP
-#define TRACEFILES_JFRTYPES_HPP
+#ifndef TRACEFILES_TRACETYPES_HPP
+#define TRACEFILES_TRACETYPES_HPP
 
 #include "oops/symbol.hpp"
 #include "trace/traceDataTypes.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/ticks.hpp"
 
-
 enum JVMContentType {
   _not_a_content_type = (JVM_CONTENT_TYPES_START - 1),
   
@@ -58,7 +57,7 @@
 };
 
 /**
- * Create typedefs for the JRA types:
+ * Create typedefs for the TRACE types:
  *   typedef s8 TYPE_LONG;
  *   typedef s4 TYPE_INTEGER;
  *   typedef const char * TYPE_STRING;
@@ -68,7 +67,7 @@
 typedef <xsl:value-of select="@type"/>  TYPE_<xsl:value-of select="@symbol"/>;
 </xsl:for-each>
 
-#endif // JFRFILES_JFRTYPES_HPP
+#endif // TRACEFILES_TRACETYPES_HPP
 </xsl:template>
 
 </xsl:stylesheet>
--- a/src/share/vm/trace/tracetypes.xml	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/trace/tracetypes.xml	Fri Apr 10 16:58:26 2015 -0700
@@ -98,6 +98,7 @@
       <value type="SYMBOL" field="name" label="Name"/>
       <value type="SYMBOL" field="signature" label="Signature"/>
       <value type="SHORT" field="modifiers" label="Access modifiers"/>
+      <value type="BOOLEAN" field="hidden" label="Hidden"/>
     </content_type>
 
     <content_type id="UTFConstant" hr_name="UTF constant"
--- a/src/share/vm/trace/tracing.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/trace/tracing.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,4 +28,4 @@
 #include "tracefiles/traceEventClasses.hpp"
 #include "tracefiles/traceEventIds.hpp"
 
-#endif
+#endif // SHARE_VM_TRACE_TRACING_HPP
--- a/src/share/vm/utilities/accessFlags.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/accessFlags.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -62,6 +62,21 @@
   } while(f != old_flags);
 }
 
+// Returns true iff this thread succeeded setting the bit.
+bool AccessFlags::atomic_set_one_bit(jint bit) {
+  // Atomically update the flags with the bit given
+  jint old_flags, new_flags, f;
+  bool is_setting_bit = false;
+  do {
+    old_flags = _flags;
+    new_flags = old_flags | bit;
+    is_setting_bit = old_flags != new_flags;
+    f = Atomic::cmpxchg(new_flags, &_flags, old_flags);
+  } while(f != old_flags);
+
+  return is_setting_bit;
+}
+
 #if !defined(PRODUCT) || INCLUDE_JVMTI
 
 void AccessFlags::print_on(outputStream* st) const {
--- a/src/share/vm/utilities/accessFlags.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/accessFlags.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -170,6 +170,7 @@
 
   // Atomic update of flags
   void atomic_set_bits(jint bits);
+  bool atomic_set_one_bit(jint bit);
   void atomic_clear_bits(jint bits);
 
  private:
@@ -230,12 +231,13 @@
                                          atomic_set_bits(JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE);
                                        }
 
-  void set_on_stack(const bool value)
+  bool set_on_stack(const bool value)
                                        {
                                          if (value) {
-                                           atomic_set_bits(JVM_ACC_ON_STACK);
+                                           return atomic_set_one_bit(JVM_ACC_ON_STACK);
                                          } else {
                                            atomic_clear_bits(JVM_ACC_ON_STACK);
+                                           return true; // Ignored
                                          }
                                        }
   // Conversion
--- a/src/share/vm/utilities/array.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/array.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,6 +28,7 @@
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/metaspace.hpp"
+#include "runtime/orderAccess.hpp"
 
 // correct linkage required to compile w/o warnings
 // (must be on file level - cannot be local)
@@ -304,6 +305,7 @@
   friend class MetadataFactory;
   friend class VMStructs;
   friend class MethodHandleCompiler;           // special case
+  friend class WhiteBox;
 protected:
   int _length;                                 // the number of array elements
   T   _data[1];                                // the array memory
@@ -325,6 +327,31 @@
 
   static size_t byte_sizeof(int length) { return sizeof(Array<T>) + MAX2(length - 1, 0) * sizeof(T); }
 
+  // WhiteBox API helper.
+  // Can't distinguish between array of length 0 and length 1,
+  // will always return 0 in those cases.
+  static int bytes_to_length(size_t bytes)       {
+    assert(is_size_aligned(bytes, BytesPerWord), "Must be, for now");
+
+    if (sizeof(Array<T>) >= bytes) {
+      return 0;
+    }
+
+    size_t left = bytes - sizeof(Array<T>);
+    assert(is_size_aligned(left, sizeof(T)), "Must be");
+
+    size_t elements = left / sizeof(T);
+    assert(elements <= (size_t)INT_MAX, err_msg("number of elements " SIZE_FORMAT "doesn't fit into an int.", elements));
+
+    int length = (int)elements;
+
+    assert((size_t)size(length) * BytesPerWord == bytes,
+        err_msg("Expected: " SIZE_FORMAT " got: " SIZE_FORMAT,
+                bytes, (size_t)size(length) * BytesPerWord));
+
+    return length;
+  }
+
   explicit Array(int length) : _length(length) {
     assert(length >= 0, "illegal length");
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/chunkedList.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "utilities/chunkedList.hpp"
+#include "utilities/debug.hpp"
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+template <typename T>
+class TestChunkedList {
+  typedef ChunkedList<T, mtOther> ChunkedListT;
+
+ public:
+  static void testEmpty() {
+    ChunkedListT buffer;
+    assert(buffer.size() == 0, "assert");
+  }
+
+  static void testFull() {
+    ChunkedListT buffer;
+    for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
+      buffer.push((T)i);
+    }
+    assert(buffer.size() == ChunkedListT::BufferSize, "assert");
+    assert(buffer.is_full(), "assert");
+  }
+
+  static void testSize() {
+    ChunkedListT buffer;
+    for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
+      assert(buffer.size() == i, "assert");
+      buffer.push((T)i);
+      assert(buffer.size() == i + 1, "assert");
+    }
+  }
+
+  static void testClear() {
+    ChunkedListT buffer;
+
+    buffer.clear();
+    assert(buffer.size() == 0, "assert");
+
+    for (uintptr_t i = 0; i < ChunkedListT::BufferSize / 2; i++) {
+      buffer.push((T)i);
+    }
+    buffer.clear();
+    assert(buffer.size() == 0, "assert");
+
+    for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
+      buffer.push((T)i);
+    }
+    buffer.clear();
+    assert(buffer.size() == 0, "assert");
+  }
+
+  static void testAt() {
+    ChunkedListT buffer;
+
+    for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
+      buffer.push((T)i);
+      assert(buffer.at(i) == (T)i, "assert");
+    }
+
+    for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
+      assert(buffer.at(i) == (T)i, "assert");
+    }
+  }
+
+  static void test() {
+    testEmpty();
+    testFull();
+    testSize();
+    testClear();
+    testAt();
+  }
+};
+
+class Metadata;
+
+void TestChunkedList_test() {
+  TestChunkedList<Metadata*>::test();
+  TestChunkedList<size_t>::test();
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/chunkedList.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_CHUNKED_LIST_HPP
+#define SHARE_VM_UTILITIES_CHUNKED_LIST_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+template <class T, MEMFLAGS F> class ChunkedList : public CHeapObj<F> {
+  template <class U> friend class TestChunkedList;
+
+  static const size_t BufferSize = 64;
+
+  T  _values[BufferSize];
+  T* _top;
+
+  ChunkedList<T, F>* _next_used;
+  ChunkedList<T, F>* _next_free;
+
+  T const * end() const {
+    return &_values[BufferSize];
+  }
+
+ public:
+  ChunkedList<T, F>() : _top(_values), _next_used(NULL), _next_free(NULL) {}
+
+  bool is_full() const {
+    return _top == end();
+  }
+
+  void clear() {
+    _top = _values;
+    // Don't clear the next pointers since that would interfere
+    // with other threads trying to iterate through the lists.
+  }
+
+  void push(T m) {
+    assert(!is_full(), "Buffer is full");
+    *_top = m;
+    _top++;
+  }
+
+  void set_next_used(ChunkedList<T, F>* buffer) { _next_used = buffer; }
+  void set_next_free(ChunkedList<T, F>* buffer) { _next_free = buffer; }
+
+  ChunkedList<T, F>* next_used() const          { return _next_used; }
+  ChunkedList<T, F>* next_free() const          { return _next_free; }
+
+  size_t size() const {
+    return pointer_delta(_top, _values, sizeof(T));
+  }
+
+  T at(size_t i) {
+    assert(i < size(), err_msg("IOOBE i: " SIZE_FORMAT " size(): " SIZE_FORMAT, i, size()));
+    return _values[i];
+  }
+};
+
+#endif // SHARE_VM_UTILITIES_CHUNKED_LIST_HPP
--- a/src/share/vm/utilities/debug.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/debug.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -266,17 +266,19 @@
     "native memory for metadata",
     "shared read only space",
     "shared read write space",
-    "shared miscellaneous data space"
+    "shared miscellaneous data space",
+    "shared miscellaneous code space"
   };
   static const char* flag[] = {
     "Metaspace",
     "SharedReadOnlySize",
     "SharedReadWriteSize",
-    "SharedMiscDataSize"
+    "SharedMiscDataSize",
+    "SharedMiscCodeSize"
   };
 
    warning("\nThe %s is not large enough\n"
-           "to preload requested classes. Use -XX:%s=\n"
+           "to preload requested classes. Use -XX:%s=<size>\n"
            "to increase the initial size of %s.\n",
            name[shared_space], flag[shared_space], name[shared_space]);
    exit(2);
--- a/src/share/vm/utilities/debug.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/debug.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -246,7 +246,8 @@
   SharedPermGen,
   SharedReadOnly,
   SharedReadWrite,
-  SharedMiscData
+  SharedMiscData,
+  SharedMiscCode
 };
 
 void report_out_of_shared_space(SharedSpaceType space_type);
--- a/src/share/vm/utilities/defaultStream.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/defaultStream.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -41,6 +41,8 @@
 
   void init();
   void init_log();
+  fileStream* open_file(const char* log_name);
+  void start_log();
   void finish_log();
   void finish_log_on_error(char *buf, int buflen);
  public:
--- a/src/share/vm/utilities/exceptions.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/exceptions.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -85,9 +85,13 @@
 #endif // ASSERT
 
   if (thread->is_VM_thread()
-      || !thread->can_call_java() ) {
+      || !thread->can_call_java()
+      || DumpSharedSpaces ) {
     // We do not care what kind of exception we get for the vm-thread or a thread which
     // is compiling.  We just install a dummy exception object
+    //
+    // We also cannot throw a proper exception when dumping, because we cannot run
+    // Java bytecodes now. A dummy exception will suffice.
     thread->set_pending_exception(Universe::vm_exception(), file, line);
     return true;
   }
@@ -108,9 +112,13 @@
   }
 
   if (thread->is_VM_thread()
-      || !thread->can_call_java() ) {
+      || !thread->can_call_java()
+      || DumpSharedSpaces ) {
     // We do not care what kind of exception we get for the vm-thread or a thread which
     // is compiling.  We just install a dummy exception object
+    //
+    // We also cannot throw a proper exception when dumping, because we cannot run
+    // Java bytecodes now. A dummy exception will suffice.
     thread->set_pending_exception(Universe::vm_exception(), file, line);
     return true;
   }
--- a/src/share/vm/utilities/globalDefinitions.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -558,6 +558,27 @@
   return fabs(value);
 }
 
+//----------------------------------------------------------------------------------------------------
+// Special casts
+// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
+typedef union {
+  jfloat f;
+  jint i;
+} FloatIntConv;
+
+typedef union {
+  jdouble d;
+  jlong l;
+  julong ul;
+} DoubleLongConv;
+
+inline jint    jint_cast    (jfloat  x)  { return ((FloatIntConv*)&x)->i; }
+inline jfloat  jfloat_cast  (jint    x)  { return ((FloatIntConv*)&x)->f; }
+
+inline jlong   jlong_cast   (jdouble x)  { return ((DoubleLongConv*)&x)->l;  }
+inline julong  julong_cast  (jdouble x)  { return ((DoubleLongConv*)&x)->ul; }
+inline jdouble jdouble_cast (jlong   x)  { return ((DoubleLongConv*)&x)->d;  }
+
 inline jint low (jlong value)                    { return jint(value); }
 inline jint high(jlong value)                    { return jint(value >> 32); }
 
--- a/src/share/vm/utilities/globalDefinitions_gcc.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/globalDefinitions_gcc.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -167,17 +167,6 @@
 typedef uint32_t juint;
 typedef uint64_t julong;
 
-//----------------------------------------------------------------------------------------------------
-// Special (possibly not-portable) casts
-// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
-// %%%%%% These seem like standard C++ to me--how about factoring them out? - Ungar
-
-inline jint    jint_cast   (jfloat  x)           { return *(jint*   )&x; }
-inline jlong   jlong_cast  (jdouble x)           { return *(jlong*  )&x; }
-inline julong  julong_cast (jdouble x)           { return *(julong* )&x; }
-
-inline jfloat  jfloat_cast (jint    x)           { return *(jfloat* )&x; }
-inline jdouble jdouble_cast(jlong   x)           { return *(jdouble*)&x; }
 
 //----------------------------------------------------------------------------------------------------
 // Constant for jlong (specifying an long long canstant is C++ compiler specific)
--- a/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -33,7 +33,9 @@
 
 
 # include <ctype.h>
+#define __USE_LEGACY_PROTOTYPES__
 # include <dirent.h>
+#undef __USE_LEGACY_PROTOTYPES__
 # include <string.h>
 # include <strings.h>     // for bsd'isms
 # include <stdarg.h>
@@ -183,15 +185,6 @@
 typedef unsigned int       juint;
 typedef unsigned long long julong;
 
-//----------------------------------------------------------------------------------------------------
-// Special (possibly not-portable) casts
-// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
-
-inline jint    jint_cast   (jfloat  x)           { return *(jint*   )&x; }
-inline jlong   jlong_cast  (jdouble x)           { return *(jlong*  )&x; }
-
-inline jfloat  jfloat_cast (jint    x)           { return *(jfloat* )&x; }
-inline jdouble jdouble_cast(jlong   x)           { return *(jdouble*)&x; }
 
 //----------------------------------------------------------------------------------------------------
 // Constant for jlong (specifying an long long constant is C++ compiler specific)
--- a/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -116,16 +116,6 @@
 typedef unsigned int     juint;
 typedef unsigned __int64 julong;
 
-//----------------------------------------------------------------------------------------------------
-// Special (possibly not-portable) casts
-// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
-
-inline jint    jint_cast   (jfloat  x)           { return *(jint*   )&x; }
-inline jlong   jlong_cast  (jdouble x)           { return *(jlong*  )&x; }
-
-inline jfloat  jfloat_cast (jint    x)           { return *(jfloat* )&x; }
-inline jdouble jdouble_cast(jlong   x)           { return *(jdouble*)&x; }
-
 
 //----------------------------------------------------------------------------------------------------
 // Non-standard stdlib-like stuff:
--- a/src/share/vm/utilities/globalDefinitions_xlc.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/globalDefinitions_xlc.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -114,16 +114,6 @@
 typedef uint32_t juint;
 typedef uint64_t julong;
 
-//----------------------------------------------------------------------------------------------------
-// Special (possibly not-portable) casts
-// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
-// %%%%%% These seem like standard C++ to me--how about factoring them out? - Ungar
-
-inline jint    jint_cast   (jfloat  x)           { return *(jint*   )&x; }
-inline jlong   jlong_cast  (jdouble x)           { return *(jlong*  )&x; }
-
-inline jfloat  jfloat_cast (jint    x)           { return *(jfloat* )&x; }
-inline jdouble jdouble_cast(jlong   x)           { return *(jdouble*)&x; }
 
 //----------------------------------------------------------------------------------------------------
 // Constant for jlong (specifying an long long canstant is C++ compiler specific)
--- a/src/share/vm/utilities/growableArray.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/growableArray.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -147,6 +147,9 @@
   }
 };
 
+template<class E> class GrowableArrayIterator;
+template<class E, class UnaryPredicate> class GrowableArrayFilterIterator;
+
 template<class E> class GrowableArray : public GenericGrowableArray {
   friend class VMStructs;
 
@@ -243,6 +246,14 @@
     return _data[_len-1];
   }
 
+  GrowableArrayIterator<E> begin() const {
+    return GrowableArrayIterator<E>(this, 0);
+  }
+
+  GrowableArrayIterator<E> end() const {
+    return GrowableArrayIterator<E>(this, length());
+  }
+
   void push(const E& elem) { append(elem); }
 
   E pop() {
@@ -338,6 +349,7 @@
 
   // inserts the given element before the element at index i
   void insert_before(const int idx, const E& elem) {
+    assert(0 <= idx && idx <= _len, "illegal index");
     check_nesting();
     if (_len == _max) grow(_len);
     for (int j = _len - 1; j >= idx; j--) {
@@ -349,7 +361,7 @@
 
   void appendAll(const GrowableArray<E>* l) {
     for (int i = 0; i < l->_len; i++) {
-      raw_at_put_grow(_len, l->_data[i], 0);
+      raw_at_put_grow(_len, l->_data[i], E());
     }
   }
 
@@ -462,4 +474,83 @@
     tty->print("}\n");
 }
 
+// Custom STL-style iterator to iterate over GrowableArrays
+// It is constructed by invoking GrowableArray::begin() and GrowableArray::end()
+template<class E> class GrowableArrayIterator : public StackObj {
+  friend class GrowableArray<E>;
+  template<class F, class UnaryPredicate> friend class GrowableArrayFilterIterator;
+
+ private:
+  const GrowableArray<E>* _array; // GrowableArray we iterate over
+  int _position;                  // The current position in the GrowableArray
+
+  // Private constructor used in GrowableArray::begin() and GrowableArray::end()
+  GrowableArrayIterator(const GrowableArray<E>* array, int position) : _array(array), _position(position) {
+    assert(0 <= position && position <= _array->length(), "illegal position");
+  }
+
+ public:
+  GrowableArrayIterator<E>& operator++()  { ++_position; return *this; }
+  E operator*()                           { return _array->at(_position); }
+
+  bool operator==(const GrowableArrayIterator<E>& rhs)  {
+    assert(_array == rhs._array, "iterator belongs to different array");
+    return _position == rhs._position;
+  }
+
+  bool operator!=(const GrowableArrayIterator<E>& rhs)  {
+    assert(_array == rhs._array, "iterator belongs to different array");
+    return _position != rhs._position;
+  }
+};
+
+// Custom STL-style iterator to iterate over elements of a GrowableArray that satisfy a given predicate
+template<class E, class UnaryPredicate> class GrowableArrayFilterIterator : public StackObj {
+  friend class GrowableArray<E>;
+
+ private:
+  const GrowableArray<E>* _array;   // GrowableArray we iterate over
+  int _position;                    // Current position in the GrowableArray
+  UnaryPredicate _predicate;        // Unary predicate the elements of the GrowableArray should satisfy
+
+ public:
+  GrowableArrayFilterIterator(const GrowableArrayIterator<E>& begin, UnaryPredicate filter_predicate)
+   : _array(begin._array), _position(begin._position), _predicate(filter_predicate) {
+    // Advance to first element satisfying the predicate
+    while(_position != _array->length() && !_predicate(_array->at(_position))) {
+      ++_position;
+    }
+  }
+
+  GrowableArrayFilterIterator<E, UnaryPredicate>& operator++() {
+    do {
+      // Advance to next element satisfying the predicate
+      ++_position;
+    } while(_position != _array->length() && !_predicate(_array->at(_position)));
+    return *this;
+  }
+
+  E operator*()   { return _array->at(_position); }
+
+  bool operator==(const GrowableArrayIterator<E>& rhs)  {
+    assert(_array == rhs._array, "iterator belongs to different array");
+    return _position == rhs._position;
+  }
+
+  bool operator!=(const GrowableArrayIterator<E>& rhs)  {
+    assert(_array == rhs._array, "iterator belongs to different array");
+    return _position != rhs._position;
+  }
+
+  bool operator==(const GrowableArrayFilterIterator<E, UnaryPredicate>& rhs)  {
+    assert(_array == rhs._array, "iterator belongs to different array");
+    return _position == rhs._position;
+  }
+
+  bool operator!=(const GrowableArrayFilterIterator<E, UnaryPredicate>& rhs)  {
+    assert(_array == rhs._array, "iterator belongs to different array");
+    return _position != rhs._position;
+  }
+};
+
 #endif // SHARE_VM_UTILITIES_GROWABLEARRAY_HPP
--- a/src/share/vm/utilities/hashtable.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/hashtable.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -36,21 +36,22 @@
 #include "utilities/numberSeq.hpp"
 
 
-// This is a generic hashtable, designed to be used for the symbol
-// and string tables.
-//
-// It is implemented as an open hash table with a fixed number of buckets.
-//
-// %note:
-//  - HashtableEntrys are allocated in blocks to reduce the space overhead.
+// This hashtable is implemented as an open hash table with a fixed number of buckets.
 
-template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
-  BasicHashtableEntry<F>* entry;
-
-  if (_free_list) {
+template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() {
+  BasicHashtableEntry<F>* entry = NULL;
+  if (_free_list != NULL) {
     entry = _free_list;
     _free_list = _free_list->next();
-  } else {
+  }
+  return entry;
+}
+
+// HashtableEntrys are allocated in blocks to reduce the space overhead.
+template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
+  BasicHashtableEntry<F>* entry = new_entry_free_list();
+
+  if (entry == NULL) {
     if (_first_free_entry + _entry_size >= _end_block) {
       int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries));
       int len = _entry_size * block_size;
@@ -83,9 +84,9 @@
 // This is somewhat an arbitrary heuristic but if one bucket gets to
 // rehash_count which is currently 100, there's probably something wrong.
 
-template <MEMFLAGS F> bool BasicHashtable<F>::check_rehash_table(int count) {
-  assert(table_size() != 0, "underflow");
-  if (count > (((double)number_of_entries()/(double)table_size())*rehash_multiple)) {
+template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) {
+  assert(this->table_size() != 0, "underflow");
+  if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) {
     // Set a flag for the next safepoint, which should be at some guaranteed
     // safepoint interval.
     return true;
@@ -93,13 +94,13 @@
   return false;
 }
 
-template <class T, MEMFLAGS F> juint Hashtable<T, F>::_seed = 0;
+template <class T, MEMFLAGS F> juint RehashableHashtable<T, F>::_seed = 0;
 
 // Create a new table and using alternate hash code, populate the new table
 // with the existing elements.   This can be used to change the hash code
 // and could in the future change the size of the table.
 
-template <class T, MEMFLAGS F> void Hashtable<T, F>::move_to(Hashtable<T, F>* new_table) {
+template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) {
 
   // Initialize the global seed for hashing.
   _seed = AltHashing::compute_seed();
@@ -109,7 +110,7 @@
 
   // Iterate through the table and create a new entry for the new table
   for (int i = 0; i < new_table->table_size(); ++i) {
-    for (HashtableEntry<T, F>* p = bucket(i); p != NULL; ) {
+    for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) {
       HashtableEntry<T, F>* next = p->next();
       T string = p->literal();
       // Use alternate hashing algorithm on the symbol in the first table
@@ -238,11 +239,11 @@
   }
 }
 
-template <class T, MEMFLAGS F> int Hashtable<T, F>::literal_size(Symbol *symbol) {
+template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(Symbol *symbol) {
   return symbol->size() * HeapWordSize;
 }
 
-template <class T, MEMFLAGS F> int Hashtable<T, F>::literal_size(oop oop) {
+template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(oop oop) {
   // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true,
   // and the String.value array is shared by several Strings. However, starting from JDK8,
   // the String.value array is not shared anymore.
@@ -255,12 +256,12 @@
 // Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to
 // add a new function Hashtable<T, F>::literal_size(MyNewType lit)
 
-template <class T, MEMFLAGS F> void Hashtable<T, F>::dump_table(outputStream* st, const char *table_name) {
+template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::dump_table(outputStream* st, const char *table_name) {
   NumberSeq summary;
   int literal_bytes = 0;
   for (int i = 0; i < this->table_size(); ++i) {
     int count = 0;
-    for (HashtableEntry<T, F>* e = bucket(i);
+    for (HashtableEntry<T, F>* e = this->bucket(i);
        e != NULL; e = e->next()) {
       count++;
       literal_bytes += literal_size(e->literal());
@@ -270,7 +271,7 @@
   double num_buckets = summary.num();
   double num_entries = summary.sum();
 
-  int bucket_bytes = (int)num_buckets * sizeof(bucket(0));
+  int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>);
   int entry_bytes  = (int)num_entries * sizeof(HashtableEntry<T, F>);
   int total_bytes = literal_bytes +  bucket_bytes + entry_bytes;
 
@@ -352,12 +353,20 @@
 
 #endif
 // Explicitly instantiate these types
+#if INCLUDE_ALL_GCS
+template class Hashtable<nmethod*, mtGC>;
+template class HashtableEntry<nmethod*, mtGC>;
+template class BasicHashtable<mtGC>;
+#endif
 template class Hashtable<ConstantPool*, mtClass>;
+template class RehashableHashtable<Symbol*, mtSymbol>;
+template class RehashableHashtable<oopDesc*, mtSymbol>;
 template class Hashtable<Symbol*, mtSymbol>;
 template class Hashtable<Klass*, mtClass>;
 template class Hashtable<oop, mtClass>;
 #if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
 template class Hashtable<oop, mtSymbol>;
+template class RehashableHashtable<oop, mtSymbol>;
 #endif // SOLARIS || CHECK_UNHANDLED_OOPS
 template class Hashtable<oopDesc*, mtSymbol>;
 template class Hashtable<Symbol*, mtClass>;
--- a/src/share/vm/utilities/hashtable.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/hashtable.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -178,11 +178,6 @@
   void verify_lookup_length(double load);
 #endif
 
-  enum {
-    rehash_count = 100,
-    rehash_multiple = 60
-  };
-
   void initialize(int table_size, int entry_size, int number_of_entries);
 
   // Accessor
@@ -194,12 +189,12 @@
   // The following method is not MT-safe and must be done under lock.
   BasicHashtableEntry<F>** bucket_addr(int i) { return _buckets[i].entry_addr(); }
 
+  // Attempt to get an entry from the free list
+  BasicHashtableEntry<F>* new_entry_free_list();
+
   // Table entry management
   BasicHashtableEntry<F>* new_entry(unsigned int hashValue);
 
-  // Check that the table is unbalanced
-  bool check_rehash_table(int count);
-
   // Used when moving the entry to another table
   // Clean up links, but do not add to free_list
   void unlink_entry(BasicHashtableEntry<F>* entry) {
@@ -277,8 +272,30 @@
     return (HashtableEntry<T, F>**)BasicHashtable<F>::bucket_addr(i);
   }
 
+};
+
+template <class T, MEMFLAGS F> class RehashableHashtable : public Hashtable<T, F> {
+ protected:
+
+  enum {
+    rehash_count = 100,
+    rehash_multiple = 60
+  };
+
+  // Check that the table is unbalanced
+  bool check_rehash_table(int count);
+
+ public:
+  RehashableHashtable(int table_size, int entry_size)
+    : Hashtable<T, F>(table_size, entry_size) { }
+
+  RehashableHashtable(int table_size, int entry_size,
+                   HashtableBucket<F>* buckets, int number_of_entries)
+    : Hashtable<T, F>(table_size, entry_size, buckets, number_of_entries) { }
+
+
   // Function to move these elements into the new table.
-  void move_to(Hashtable<T, F>* new_table);
+  void move_to(RehashableHashtable<T, F>* new_table);
   static bool use_alternate_hashcode()  { return _seed != 0; }
   static juint seed()                    { return _seed; }
 
@@ -292,7 +309,6 @@
   static int literal_size(ConstantPool *cp) {Unimplemented(); return 0;}
   static int literal_size(Klass *k)         {Unimplemented(); return 0;}
 
-public:
   void dump_table(outputStream* st, const char *table_name);
 
  private:
--- a/src/share/vm/utilities/hashtable.inline.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/hashtable.inline.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -26,6 +26,7 @@
 #define SHARE_VM_UTILITIES_HASHTABLE_INLINE_HPP
 
 #include "memory/allocation.inline.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/dtrace.hpp"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/linkedlist.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+#include "runtime/os.hpp"
+#include "utilities/linkedlist.hpp"
+#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
+
+class Integer : public StackObj {
+ private:
+  int  _value;
+ public:
+  Integer(int i) : _value(i) { }
+
+  int   value() const { return _value; }
+  bool  equals(const Integer& i) const {
+   return _value == i.value();
+  }
+};
+
+int compare_Integer(const Integer& i1, const Integer& i2) {
+  return i1.value() - i2.value();
+}
+
+void check_list_values(const int* expected, const LinkedList<Integer>* list) {
+  LinkedListNode<Integer>* head = list->head();
+  int index = 0;
+  while (head != NULL) {
+    assert(head->peek()->value() == expected[index], "Unexpected value");
+    head = head->next();
+    index ++;
+  }
+}
+
+void Test_linked_list() {
+  LinkedListImpl<Integer, ResourceObj::C_HEAP, mtTest>  ll;
+
+
+  // Test regular linked list
+  assert(ll.is_empty(), "Start with empty list");
+  Integer one(1), two(2), three(3), four(4), five(5), six(6);
+
+  ll.add(six);
+  assert(!ll.is_empty(), "Should not be empty");
+
+  Integer* i = ll.find(six);
+  assert(i != NULL, "Should find it");
+
+  i = ll.find(three);
+  assert(i == NULL, "Not in the list");
+
+  LinkedListNode<Integer>* node = ll.find_node(six);
+  assert(node != NULL, "6 is in the list");
+
+  ll.insert_after(three, node);
+  ll.insert_before(one, node);
+  int expected[3] = {1, 6, 3};
+  check_list_values(expected, &ll);
+
+  ll.add(two);
+  ll.add(four);
+  ll.add(five);
+
+  // Test sorted linked list
+  SortedLinkedList<Integer, compare_Integer, ResourceObj::C_HEAP, mtTest> sl;
+  assert(sl.is_empty(), "Start with empty list");
+
+  size_t ll_size = ll.size();
+  sl.move(&ll);
+  size_t sl_size = sl.size();
+
+  assert(ll_size == sl_size, "Should be the same size");
+  assert(ll.is_empty(), "No more entires");
+
+  // sorted result
+  int sorted_result[] = {1, 2, 3, 4, 5, 6};
+  check_list_values(sorted_result, &sl);
+
+  node = sl.find_node(four);
+  assert(node != NULL, "4 is in the list");
+  sl.remove_before(node);
+  sl.remove_after(node);
+  int remains[] = {1, 2, 4, 6};
+  check_list_values(remains, &sl);
+}
+#endif // PRODUCT
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/linkedlist.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_LINKED_LIST_HPP
+#define SHARE_VM_UTILITIES_LINKED_LIST_HPP
+
+#include "memory/allocation.hpp"
+
+/*
+ * The implementation of a generic linked list, which uses various
+ * backing storages, such as C heap, arena and resource, etc.
+ */
+
+
+// An entry in a linked list. It should use the same backing storage
+// as the linked list that contains this entry.
+template <class E> class LinkedListNode : public ResourceObj {
+ private:
+  E                       _data;  // embedded content
+  LinkedListNode<E>*      _next;  // next entry
+
+ protected:
+  LinkedListNode() : _next(NULL) { }
+
+ public:
+  LinkedListNode(const E& e): _data(e), _next(NULL) { }
+
+  inline void set_next(LinkedListNode<E>* node) { _next = node; }
+  inline LinkedListNode<E> * next() const       { return _next; }
+
+  E*  data() { return &_data; }
+  const E* peek() const { return &_data; }
+};
+
+// A linked list interface. It does not specify
+// any storage type it uses, so all methods involving
+// memory allocation or deallocation are pure virtual
+template <class E> class LinkedList : public ResourceObj {
+ protected:
+  LinkedListNode<E>*    _head;
+
+ public:
+  LinkedList() : _head(NULL) { }
+
+  inline void set_head(LinkedListNode<E>* h) { _head = h; }
+  inline LinkedListNode<E>* head() const     { return _head; }
+  inline bool is_empty()           const     { return head() == NULL; }
+
+  inline size_t size() const {
+    LinkedListNode<E>* p;
+    size_t count = 0;
+    for (p = head(); p != NULL; count++, p = p->next());
+    return count;
+ }
+
+  // Move all entries from specified linked list to this one
+  virtual void move(LinkedList<E>* list) = 0;
+
+  // Add an entry to this linked list
+  virtual LinkedListNode<E>* add(const E& e) = 0;
+  // Add all entries from specified linked list to this one,
+  virtual void add(LinkedListNode<E>* node) = 0;
+
+  // Add a linked list to this linked list
+  virtual bool  add(const LinkedList<E>* list) = 0;
+
+  // Search entry in the linked list
+  virtual LinkedListNode<E>* find_node(const E& e) = 0;
+  virtual E* find(const E& e) = 0;
+
+  // Insert entry to the linked list
+  virtual LinkedListNode<E>* insert_before(const E& e, LinkedListNode<E>* ref) = 0;
+  virtual LinkedListNode<E>* insert_after (const E& e, LinkedListNode<E>* ref) = 0;
+
+  // Remove entry from the linked list
+  virtual bool               remove(const E& e) = 0;
+  virtual bool               remove(LinkedListNode<E>* node) = 0;
+  virtual bool               remove_before(LinkedListNode<E>* ref) = 0;
+  virtual bool               remove_after(LinkedListNode<E>*  ref) = 0;
+
+  LinkedListNode<E>* unlink_head() {
+    LinkedListNode<E>* h = this->head();
+    if (h != NULL) {
+      this->set_head(h->next());
+    }
+    return h;
+  }
+
+  DEBUG_ONLY(virtual ResourceObj::allocation_type storage_type() = 0;)
+};
+
+// A linked list implementation.
+// The linked list can be allocated in various type of memory: C heap, arena and resource area, etc.
+template <class E, ResourceObj::allocation_type T = ResourceObj::C_HEAP,
+  MEMFLAGS F = mtNMT, AllocFailType alloc_failmode = AllocFailStrategy::RETURN_NULL>
+  class LinkedListImpl : public LinkedList<E> {
+ protected:
+  Arena*                 _arena;
+ public:
+  LinkedListImpl() :  _arena(NULL) { }
+  LinkedListImpl(Arena* a) : _arena(a) { }
+
+  virtual ~LinkedListImpl() {
+    clear();
+  }
+
+  virtual void clear() {
+    LinkedListNode<E>* p = this->head();
+    this->set_head(NULL);
+    while (p != NULL) {
+      LinkedListNode<E>* to_delete = p;
+      p = p->next();
+      delete_node(to_delete);
+    }
+  }
+
+  // Add an entry to the linked list
+  virtual LinkedListNode<E>* add(const E& e)  {
+    LinkedListNode<E>* node = this->new_node(e);
+    if (node != NULL) {
+      this->add(node);
+    }
+
+    return node;
+  }
+
+  virtual void add(LinkedListNode<E>* node) {
+    assert(node != NULL, "NULL pointer");
+    node->set_next(this->head());
+    this->set_head(node);
+  }
+
+  // Move a linked list to this linked list, both have to be allocated on the same
+  // storage type.
+  virtual void move(LinkedList<E>* list) {
+    assert(list->storage_type() == this->storage_type(), "Different storage type");
+    LinkedListNode<E>* node = this->head();
+    while (node != NULL && node->next() != NULL) {
+      node = node->next();
+    }
+    if (node == NULL) {
+      this->set_head(list->head());
+    } else {
+      node->set_next(list->head());
+    }
+    // All entries are moved
+    list->set_head(NULL);
+  }
+
+  virtual bool add(const LinkedList<E>* list) {
+    LinkedListNode<E>* node = list->head();
+    while (node != NULL) {
+      if (this->add(*node->peek()) == NULL) {
+        return false;
+      }
+      node = node->next();
+    }
+    return true;
+  }
+
+
+  virtual LinkedListNode<E>* find_node(const E& e) {
+    LinkedListNode<E>* p = this->head();
+    while (p != NULL && !p->peek()->equals(e)) {
+      p = p->next();
+    }
+    return p;
+  }
+
+  E* find(const E& e) {
+    LinkedListNode<E>* node = find_node(e);
+    return (node == NULL) ? NULL : node->data();
+  }
+
+
+  // Add an entry in front of the reference entry
+  LinkedListNode<E>* insert_before(const E& e, LinkedListNode<E>* ref_node) {
+    LinkedListNode<E>* node = this->new_node(e);
+    if (node == NULL) return NULL;
+    if (ref_node == this->head()) {
+      node->set_next(ref_node);
+      this->set_head(node);
+    } else {
+      LinkedListNode<E>* p = this->head();
+      while (p != NULL && p->next() != ref_node) {
+        p = p->next();
+      }
+      assert(p != NULL, "ref_node not in the list");
+      node->set_next(ref_node);
+      p->set_next(node);
+    }
+    return node;
+  }
+
+   // Add an entry behind the reference entry
+   LinkedListNode<E>* insert_after(const E& e, LinkedListNode<E>* ref_node) {
+     LinkedListNode<E>* node = this->new_node(e);
+     if (node == NULL) return NULL;
+     node->set_next(ref_node->next());
+     ref_node->set_next(node);
+     return node;
+   }
+
+   // Remove an entry from the linked list.
+   // Return true if the entry is successfully removed
+   virtual bool remove(const E& e) {
+     LinkedListNode<E>* tmp = this->head();
+     LinkedListNode<E>* prev = NULL;
+
+     while (tmp != NULL) {
+       if (tmp->peek()->equals(e)) {
+         return remove_after(prev);
+       }
+       prev = tmp;
+       tmp = tmp->next();
+     }
+     return false;
+  }
+
+  // Remove the node after the reference entry
+  virtual bool remove_after(LinkedListNode<E>* prev) {
+    LinkedListNode<E>* to_delete;
+    if (prev == NULL) {
+      to_delete = this->unlink_head();
+    } else {
+      to_delete = prev->next();
+      if (to_delete != NULL) {
+        prev->set_next(to_delete->next());
+      }
+    }
+
+    if (to_delete != NULL) {
+      delete_node(to_delete);
+      return true;
+    }
+    return false;
+  }
+
+  virtual bool remove(LinkedListNode<E>* node) {
+    LinkedListNode<E>* p = this->head();
+    while (p != NULL && p->next() != node) {
+      p = p->next();
+    }
+    if (p != NULL) {
+      p->set_next(node->next());
+      delete_node(node);
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  virtual bool remove_before(LinkedListNode<E>* ref) {
+    assert(ref != NULL, "NULL pointer");
+    LinkedListNode<E>* p = this->head();
+    LinkedListNode<E>* to_delete = NULL; // to be deleted
+    LinkedListNode<E>* prev = NULL;      // node before the node to be deleted
+    while (p != NULL && p != ref) {
+      prev = to_delete;
+      to_delete = p;
+      p = p->next();
+    }
+    if (p == NULL || to_delete == NULL) return false;
+    assert(to_delete->next() == ref, "Wrong node to delete");
+    assert(prev == NULL || prev->next() == to_delete,
+      "Sanity check");
+    if (prev == NULL) {
+      assert(to_delete == this->head(), "Must be head");
+      this->set_head(to_delete->next());
+    } else {
+      prev->set_next(to_delete->next());
+    }
+    delete_node(to_delete);
+    return true;
+  }
+
+  DEBUG_ONLY(ResourceObj::allocation_type storage_type() { return T; })
+ protected:
+  // Create new linked list node object in specified storage
+  LinkedListNode<E>* new_node(const E& e) const {
+     switch(T) {
+       case ResourceObj::ARENA: {
+         assert(_arena != NULL, "Arena not set");
+         return new(_arena) LinkedListNode<E>(e);
+       }
+       case ResourceObj::RESOURCE_AREA:
+       case ResourceObj::C_HEAP: {
+         if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
+           return new(std::nothrow, T, F) LinkedListNode<E>(e);
+         } else {
+           return new(T, F) LinkedListNode<E>(e);
+         }
+       }
+       default:
+         ShouldNotReachHere();
+     }
+     return NULL;
+  }
+
+  // Delete linked list node object
+  void delete_node(LinkedListNode<E>* node) {
+    if (T == ResourceObj::C_HEAP) {
+      delete node;
+    }
+  }
+};
+
+// Sorted linked list. The linked list maintains sorting order specified by the comparison
+// function
+template <class E, int (*FUNC)(const E&, const E&),
+  ResourceObj::allocation_type T = ResourceObj::C_HEAP,
+  MEMFLAGS F = mtNMT, AllocFailType alloc_failmode = AllocFailStrategy::RETURN_NULL>
+  class SortedLinkedList : public LinkedListImpl<E, T, F, alloc_failmode> {
+ public:
+  SortedLinkedList() { }
+  SortedLinkedList(Arena* a) : LinkedListImpl<E, T, F, alloc_failmode>(a) { }
+
+  virtual LinkedListNode<E>* add(const E& e) {
+    return LinkedListImpl<E, T, F, alloc_failmode>::add(e);
+  }
+
+  virtual void move(LinkedList<E>* list) {
+    assert(list->storage_type() == this->storage_type(), "Different storage type");
+    LinkedListNode<E>* node;
+    while ((node = list->unlink_head()) != NULL) {
+      this->add(node);
+    }
+    assert(list->is_empty(), "All entries are moved");
+  }
+
+  virtual void add(LinkedListNode<E>* node) {
+    assert(node != NULL, "NULL pointer");
+    LinkedListNode<E>* tmp = this->head();
+    LinkedListNode<E>* prev = NULL;
+
+    int cmp_val;
+    while (tmp != NULL) {
+      cmp_val = FUNC(*tmp->peek(), *node->peek());
+      if (cmp_val >= 0) {
+        break;
+      }
+      prev = tmp;
+      tmp = tmp->next();
+    }
+
+    if (prev != NULL) {
+      node->set_next(prev->next());
+      prev->set_next(node);
+    } else {
+      node->set_next(this->head());
+      this->set_head(node);
+    }
+  }
+
+  virtual bool add(const LinkedList<E>* list) {
+    return LinkedListImpl<E, T, F, alloc_failmode>::add(list);
+  }
+
+  virtual LinkedListNode<E>* find_node(const E& e) {
+    LinkedListNode<E>* p = this->head();
+
+    while (p != NULL) {
+      int comp_val = FUNC(*p->peek(), e);
+      if (comp_val == 0) {
+        return p;
+      } else if (comp_val > 0) {
+        return NULL;
+      }
+      p = p->next();
+    }
+    return NULL;
+  }
+};
+
+// Iterates all entries in the list
+template <class E> class LinkedListIterator : public StackObj {
+ private:
+  LinkedListNode<E>* _p;
+  bool               _is_empty;
+ public:
+  LinkedListIterator(LinkedListNode<E>* head) : _p(head) {
+    _is_empty = (head == NULL);
+  }
+
+  bool is_empty() const { return _is_empty; }
+
+  const E* next() {
+    if (_p == NULL) return NULL;
+    const E* e = _p->peek();
+    _p = _p->next();
+    return e;
+  }
+};
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/nativeCallStack.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/os.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/nativeCallStack.hpp"
+
+const NativeCallStack NativeCallStack::EMPTY_STACK(0, false);
+
+NativeCallStack::NativeCallStack(int toSkip, bool fillStack) :
+  _hash_value(0) {
+
+#if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
+  fillStack = false;
+#endif
+
+  if (fillStack) {
+    os::get_native_stack(_stack, NMT_TrackingStackDepth, toSkip);
+  } else {
+    for (int index = 0; index < NMT_TrackingStackDepth; index ++) {
+      _stack[index] = NULL;
+    }
+  }
+}
+
+NativeCallStack::NativeCallStack(address* pc, int frameCount) {
+  int frameToCopy = (frameCount < NMT_TrackingStackDepth) ?
+    frameCount : NMT_TrackingStackDepth;
+  int index;
+  for (index = 0; index < frameToCopy; index ++) {
+    _stack[index] = pc[index];
+  }
+  for (; index < NMT_TrackingStackDepth; index ++) {
+    _stack[index] = NULL;
+  }
+}
+
+// number of stack frames captured
+int NativeCallStack::frames() const {
+  int index;
+  for (index = 0; index < NMT_TrackingStackDepth; index ++) {
+    if (_stack[index] == NULL) {
+      break;
+    }
+  }
+  return index;
+}
+
+// Hash code. Any better algorithm?
+int NativeCallStack::hash() const {
+  long hash_val = _hash_value;
+  if (hash_val == 0) {
+    long pc;
+    int  index;
+    for (index = 0; index < NMT_TrackingStackDepth; index ++) {
+      pc = (long)_stack[index];
+      if (pc == 0) break;
+      hash_val += pc;
+    }
+
+    NativeCallStack* p = const_cast<NativeCallStack*>(this);
+    p->_hash_value = (int)(hash_val & 0xFFFFFFFF);
+  }
+  return _hash_value;
+}
+
+void NativeCallStack::print_on(outputStream* out) const {
+  print_on(out, 0);
+}
+
+// Decode and print this call path
+void NativeCallStack::print_on(outputStream* out, int indent) const {
+  address pc;
+  char    buf[1024];
+  int     offset;
+  if (is_empty()) {
+    for (int index = 0; index < indent; index ++) out->print(" ");
+#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
+    out->print("[BOOTSTRAP]");
+#else
+    out->print("[No stack]");
+#endif
+  } else {
+    for (int frame = 0; frame < NMT_TrackingStackDepth; frame ++) {
+      pc = get_frame(frame);
+      if (pc == NULL) break;
+      // Print indent
+      for (int index = 0; index < indent; index ++) out->print(" ");
+      if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
+        out->print_cr("[" PTR_FORMAT "] %s+0x%x", p2i(pc), buf, offset);
+      } else {
+        out->print_cr("[" PTR_FORMAT "]", p2i(pc));
+      }
+    }
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/nativeCallStack.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_NATIVE_CALL_STACK_HPP
+#define SHARE_VM_UTILITIES_NATIVE_CALL_STACK_HPP
+
+#include "memory/allocation.hpp"
+#include "services/nmtCommon.hpp"
+#include "utilities/ostream.hpp"
+
+/*
+ * This class represents a native call path (does not include Java frame)
+ *
+ * This class is developed in the context of native memory tracking, it can
+ * be an useful tool for debugging purpose.
+ *
+ * For example, following code should print out native call path:
+ *
+ *   ....
+ *   NativeCallStack here;
+ *   here.print_on(tty);
+ *   ....
+ *
+ * However, there are a couple of restrictions on this class. If the restrictions are
+ * not strictly followed, it may break native memory tracking badly.
+ *
+ * 1. Number of stack frames to capture, is defined by native memory tracking.
+ *    This number has impacts on how much memory to be used by native
+ *    memory tracking.
+ * 2. The class is strict stack object, no heap or virtual memory can be allocated
+ *    from it.
+ */
+class NativeCallStack : public StackObj {
+ public:
+  static const NativeCallStack EMPTY_STACK;
+
+ private:
+  address   _stack[NMT_TrackingStackDepth];
+  int       _hash_value;
+
+ public:
+  NativeCallStack(int toSkip = 0, bool fillStack = false);
+  NativeCallStack(address* pc, int frameCount);
+
+
+  // if it is an empty stack
+  inline bool is_empty() const {
+    return _stack[0] == NULL;
+  }
+
+  // number of stack frames captured
+  int frames() const;
+
+  inline int compare(const NativeCallStack& other) const {
+    return memcmp(_stack, other._stack, sizeof(_stack));
+  }
+
+  inline bool equals(const NativeCallStack& other) const {
+    // compare hash values
+    if (hash() != other.hash()) return false;
+    // compare each frame
+    return compare(other) == 0;
+  }
+
+  inline address get_frame(int index) const {
+    assert(index >= 0 && index < NMT_TrackingStackDepth, "Index out of bound");
+    return _stack[index];
+  }
+
+  // Hash code. Any better algorithm?
+  int hash() const;
+
+  void print_on(outputStream* out) const;
+  void print_on(outputStream* out, int indent) const;
+};
+
+#endif
--- a/src/share/vm/utilities/ostream.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/ostream.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "compiler/compileLog.hpp"
+#include "gc_implementation/shared/gcId.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/arguments.hpp"
 #include "utilities/defaultStream.hpp"
@@ -240,6 +241,14 @@
   return;
 }
 
+void outputStream::gclog_stamp(const GCId& gc_id) {
+  date_stamp(PrintGCDateStamps);
+  stamp(PrintGCTimeStamps);
+  if (PrintGCID) {
+    print("#%u: ", gc_id.id());
+  }
+}
+
 outputStream& outputStream::indent() {
   while (_position < _indentation) sp();
   return *this;
@@ -356,11 +365,11 @@
 xmlStream*   xtty;
 outputStream* tty;
 outputStream* gclog_or_tty;
+CDS_ONLY(fileStream* classlist_file;) // Only dump the classes that can be stored into the CDS archive
 extern Mutex* tty_lock;
 
 #define EXTRACHARLEN   32
 #define CURRENTAPPX    ".current"
-#define FILENAMEBUFLEN  1024
 // convert YYYY-MM-DD HH:MM:SS to YYYY-MM-DD_HH-MM-SS
 char* get_datetime_string(char *buf, size_t len) {
   os::local_time_string(buf, len);
@@ -394,7 +403,6 @@
     buffer_length = strlen(log_name) + 1;
   }
 
-  // const char* star = strchr(basename, '*');
   const char* pts = strstr(basename, "%p");
   int pid_pos = (pts == NULL) ? -1 : (pts - nametail);
 
@@ -409,6 +417,11 @@
     buffer_length += strlen(tms);
   }
 
+  // File name is too long.
+  if (buffer_length > JVM_MAXPATHLEN) {
+    return NULL;
+  }
+
   // Create big enough buffer.
   char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal);
 
@@ -467,7 +480,8 @@
   return buf;
 }
 
-// log_name comes from -XX:LogFile=log_name or -Xloggc:log_name
+// log_name comes from -XX:LogFile=log_name, -Xloggc:log_name or
+// -XX:DumpLoadedClassList=<file_name>
 // in log_name, %p => pid1234 and
 //              %t => YYYY-MM-DD_HH-MM-SS
 static const char* make_log_name(const char* log_name, const char* force_directory) {
@@ -481,46 +495,88 @@
 void test_loggc_filename() {
   int pid;
   char  tms[32];
-  char  i_result[FILENAMEBUFLEN];
+  char  i_result[JVM_MAXPATHLEN];
   const char* o_result;
   get_datetime_string(tms, sizeof(tms));
   pid = os::current_process_id();
 
   // test.log
-  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test.log", tms);
+  jio_snprintf(i_result, JVM_MAXPATHLEN, "test.log", tms);
   o_result = make_log_name_internal("test.log", NULL, pid, tms);
   assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test.log\", NULL)");
   FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
 
   // test-%t-%p.log
-  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%s-pid%u.log", tms, pid);
+  jio_snprintf(i_result, JVM_MAXPATHLEN, "test-%s-pid%u.log", tms, pid);
   o_result = make_log_name_internal("test-%t-%p.log", NULL, pid, tms);
   assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t-%%p.log\", NULL)");
   FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
 
   // test-%t%p.log
-  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%spid%u.log", tms, pid);
+  jio_snprintf(i_result, JVM_MAXPATHLEN, "test-%spid%u.log", tms, pid);
   o_result = make_log_name_internal("test-%t%p.log", NULL, pid, tms);
   assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t%%p.log\", NULL)");
   FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
 
   // %p%t.log
-  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u%s.log", pid, tms);
+  jio_snprintf(i_result, JVM_MAXPATHLEN, "pid%u%s.log", pid, tms);
   o_result = make_log_name_internal("%p%t.log", NULL, pid, tms);
   assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p%%t.log\", NULL)");
   FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
 
   // %p-test.log
-  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u-test.log", pid);
+  jio_snprintf(i_result, JVM_MAXPATHLEN, "pid%u-test.log", pid);
   o_result = make_log_name_internal("%p-test.log", NULL, pid, tms);
   assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p-test.log\", NULL)");
   FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
 
   // %t.log
-  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "%s.log", tms);
+  jio_snprintf(i_result, JVM_MAXPATHLEN, "%s.log", tms);
   o_result = make_log_name_internal("%t.log", NULL, pid, tms);
   assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%t.log\", NULL)");
   FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+
+  {
+    // longest filename
+    char longest_name[JVM_MAXPATHLEN];
+    memset(longest_name, 'a', sizeof(longest_name));
+    longest_name[JVM_MAXPATHLEN - 1] = '\0';
+    o_result = make_log_name_internal((const char*)&longest_name, NULL, pid, tms);
+    assert(strcmp(longest_name, o_result) == 0, err_msg("longest name does not match. expected '%s' but got '%s'", longest_name, o_result));
+    FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
+  }
+
+  {
+    // too long file name
+    char too_long_name[JVM_MAXPATHLEN + 100];
+    int too_long_length = sizeof(too_long_name);
+    memset(too_long_name, 'a', too_long_length);
+    too_long_name[too_long_length - 1] = '\0';
+    o_result = make_log_name_internal((const char*)&too_long_name, NULL, pid, tms);
+    assert(o_result == NULL, err_msg("Too long file name should return NULL, but got '%s'", o_result));
+  }
+
+  {
+    // too long with timestamp
+    char longest_name[JVM_MAXPATHLEN];
+    memset(longest_name, 'a', JVM_MAXPATHLEN);
+    longest_name[JVM_MAXPATHLEN - 3] = '%';
+    longest_name[JVM_MAXPATHLEN - 2] = 't';
+    longest_name[JVM_MAXPATHLEN - 1] = '\0';
+    o_result = make_log_name_internal((const char*)&longest_name, NULL, pid, tms);
+    assert(o_result == NULL, err_msg("Too long file name after timestamp expansion should return NULL, but got '%s'", o_result));
+  }
+
+  {
+    // too long with pid
+    char longest_name[JVM_MAXPATHLEN];
+    memset(longest_name, 'a', JVM_MAXPATHLEN);
+    longest_name[JVM_MAXPATHLEN - 3] = '%';
+    longest_name[JVM_MAXPATHLEN - 2] = 'p';
+    longest_name[JVM_MAXPATHLEN - 1] = '\0';
+    o_result = make_log_name_internal((const char*)&longest_name, NULL, pid, tms);
+    assert(o_result == NULL, err_msg("Too long file name after pid expansion should return NULL, but got '%s'", o_result));
+  }
 }
 #endif // PRODUCT
 
@@ -629,9 +685,16 @@
   _bytes_written = 0L;
   _file_name = make_log_name(file_name, NULL);
 
+  if (_file_name == NULL) {
+    warning("Cannot open file %s: file name is too long.\n", file_name);
+    _need_close = false;
+    UseGCLogFileRotation = false;
+    return;
+  }
+
   // gc log file rotation
   if (UseGCLogFileRotation && NumberOfGCLogFiles > 1) {
-    char tempbuf[FILENAMEBUFLEN];
+    char tempbuf[JVM_MAXPATHLEN];
     jio_snprintf(tempbuf, sizeof(tempbuf), "%s.%d" CURRENTAPPX, _file_name, _cur_file_num);
     _file = fopen(tempbuf, "w");
   } else {
@@ -663,10 +726,10 @@
 // concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log
 // must be synchronized.
 void gcLogFileStream::rotate_log(bool force, outputStream* out) {
-  char time_msg[FILENAMEBUFLEN];
+  char time_msg[O_BUFLEN];
   char time_str[EXTRACHARLEN];
-  char current_file_name[FILENAMEBUFLEN];
-  char renamed_file_name[FILENAMEBUFLEN];
+  char current_file_name[JVM_MAXPATHLEN];
+  char renamed_file_name[JVM_MAXPATHLEN];
 
   if (!should_rotate(force)) {
     return;
@@ -705,12 +768,15 @@
   // have a form of extended_filename.<i>.current where i is the current rotation
   // file number. After it reaches max file size, the file will be saved and renamed
   // with .current removed from its tail.
-  size_t filename_len = strlen(_file_name);
   if (_file != NULL) {
-    jio_snprintf(renamed_file_name, filename_len + EXTRACHARLEN, "%s.%d",
+    jio_snprintf(renamed_file_name, JVM_MAXPATHLEN, "%s.%d",
                  _file_name, _cur_file_num);
-    jio_snprintf(current_file_name, filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX,
-                 _file_name, _cur_file_num);
+    int result = jio_snprintf(current_file_name, JVM_MAXPATHLEN,
+                              "%s.%d" CURRENTAPPX, _file_name, _cur_file_num);
+    if (result >= JVM_MAXPATHLEN) {
+      warning("Cannot create new log file name: %s: file name is too long.\n", current_file_name);
+      return;
+    }
 
     const char* msg = force ? "GC log rotation request has been received."
                             : "GC log file has reached the maximum size.";
@@ -749,19 +815,23 @@
 
   _cur_file_num++;
   if (_cur_file_num > NumberOfGCLogFiles - 1) _cur_file_num = 0;
-  jio_snprintf(current_file_name,  filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX,
+  int result = jio_snprintf(current_file_name,  JVM_MAXPATHLEN, "%s.%d" CURRENTAPPX,
                _file_name, _cur_file_num);
+  if (result >= JVM_MAXPATHLEN) {
+    warning("Cannot create new log file name: %s: file name is too long.\n", current_file_name);
+    return;
+  }
+
   _file = fopen(current_file_name, "w");
 
   if (_file != NULL) {
     _bytes_written = 0L;
     _need_close = true;
     // reuse current_file_name for time_msg
-    jio_snprintf(current_file_name, filename_len + EXTRACHARLEN,
+    jio_snprintf(current_file_name, JVM_MAXPATHLEN,
                  "%s.%d", _file_name, _cur_file_num);
     jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file created %s\n",
-                           os::local_time_string((char *)time_str, sizeof(time_str)),
-                           current_file_name);
+                 os::local_time_string((char *)time_str, sizeof(time_str)), current_file_name);
     write(time_msg, strlen(time_msg));
 
     if (out != NULL) {
@@ -809,32 +879,64 @@
   return _log_file != NULL;
 }
 
+fileStream* defaultStream::open_file(const char* log_name) {
+  const char* try_name = make_log_name(log_name, NULL);
+  if (try_name == NULL) {
+    warning("Cannot open file %s: file name is too long.\n", log_name);
+    return NULL;
+  }
+
+  fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
+  FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
+  if (file->is_open()) {
+    return file;
+  }
+
+  // Try again to open the file in the temp directory.
+  delete file;
+  char warnbuf[O_BUFLEN*2];
+  jio_snprintf(warnbuf, sizeof(warnbuf), "Warning:  Cannot open log file: %s\n", log_name);
+  // Note:  This feature is for maintainer use only.  No need for L10N.
+  jio_print(warnbuf);
+  try_name = make_log_name(log_name, os::get_temp_directory());
+  if (try_name == NULL) {
+    warning("Cannot open file %s: file name is too long for directory %s.\n", log_name, os::get_temp_directory());
+    return NULL;
+  }
+
+  jio_snprintf(warnbuf, sizeof(warnbuf),
+               "Warning:  Forcing option -XX:LogFile=%s\n", try_name);
+  jio_print(warnbuf);
+
+  file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
+  FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
+  if (file->is_open()) {
+    return file;
+  }
+
+  delete file;
+  return NULL;
+}
+
 void defaultStream::init_log() {
   // %%% Need a MutexLocker?
   const char* log_name = LogFile != NULL ? LogFile : "hotspot_%p.log";
-  const char* try_name = make_log_name(log_name, NULL);
-  fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
-  if (!file->is_open()) {
-    // Try again to open the file.
-    char warnbuf[O_BUFLEN*2];
-    jio_snprintf(warnbuf, sizeof(warnbuf),
-                 "Warning:  Cannot open log file: %s\n", try_name);
-    // Note:  This feature is for maintainer use only.  No need for L10N.
-    jio_print(warnbuf);
-    FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
-    try_name = make_log_name(log_name, os::get_temp_directory());
-    jio_snprintf(warnbuf, sizeof(warnbuf),
-                 "Warning:  Forcing option -XX:LogFile=%s\n", try_name);
-    jio_print(warnbuf);
-    delete file;
-    file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
+  fileStream* file = open_file(log_name);
+
+  if (file != NULL) {
+    _log_file = file;
+    _outer_xmlStream = new(ResourceObj::C_HEAP, mtInternal) xmlStream(file);
+    start_log();
+  } else {
+    // and leave xtty as NULL
+    LogVMOutput = false;
+    DisplayVMOutput = true;
+    LogCompilation = false;
   }
-  FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
+}
 
-  if (file->is_open()) {
-    _log_file = file;
-    xmlStream* xs = new(ResourceObj::C_HEAP, mtInternal) xmlStream(file);
-    _outer_xmlStream = xs;
+void defaultStream::start_log() {
+  xmlStream*xs = _outer_xmlStream;
     if (this == tty)  xtty = xs;
     // Write XML header.
     xs->print_cr("<?xml version='1.0' encoding='UTF-8'?>");
@@ -889,13 +991,6 @@
     xs->head("tty");
     // All further non-markup text gets copied to the tty:
     xs->_text = this;  // requires friend declaration!
-  } else {
-    delete(file);
-    // and leave xtty as NULL
-    LogVMOutput = false;
-    DisplayVMOutput = true;
-    LogCompilation = false;
-  }
 }
 
 // finish_log() is called during normal VM shutdown. finish_log_on_error() is
@@ -1107,6 +1202,16 @@
     gclog_or_tty = gclog;
   }
 
+#if INCLUDE_CDS
+  // For -XX:DumpLoadedClassList=<file> option
+  if (DumpLoadedClassList != NULL) {
+    const char* list_name = make_log_name(DumpLoadedClassList, NULL);
+    classlist_file = new(ResourceObj::C_HEAP, mtInternal)
+                         fileStream(list_name);
+    FREE_C_HEAP_ARRAY(char, list_name, mtInternal);
+  }
+#endif
+
   // If we haven't lazily initialized the logfile yet, do it now,
   // to avoid the possibility of lazy initialization during a VM
   // crash, which can affect the stability of the fatal error handler.
@@ -1119,6 +1224,11 @@
   static bool ostream_exit_called = false;
   if (ostream_exit_called)  return;
   ostream_exit_called = true;
+#if INCLUDE_CDS
+  if (classlist_file != NULL) {
+    delete classlist_file;
+  }
+#endif
   if (gclog_or_tty != tty) {
       delete gclog_or_tty;
   }
--- a/src/share/vm/utilities/ostream.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/ostream.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,6 +28,7 @@
 #include "memory/allocation.hpp"
 #include "runtime/timer.hpp"
 
+class GCId;
 DEBUG_ONLY(class ResourceMark;)
 
 // Output streams for printing
@@ -107,6 +108,7 @@
    void date_stamp(bool guard) {
      date_stamp(guard, "", ": ");
    }
+   void gclog_stamp(const GCId& gc_id);
 
    // portable printing of 64 bit integers
    void print_jlong(jlong value);
@@ -212,6 +214,8 @@
   void flush();
 };
 
+CDS_ONLY(extern fileStream*   classlist_file;)
+
 // unlike fileStream, fdStream does unbuffered I/O by calling
 // open() and write() directly. It is async-safe, but output
 // from multiple thread may be mixed together. Used by fatal
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/stringUtils.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "utilities/stringUtils.hpp"
+
+int StringUtils::replace_no_expand(char* string, const char* from, const char* to) {
+  int replace_count = 0;
+  size_t from_len = strlen(from);
+  size_t to_len = strlen(to);
+  assert(from_len >= to_len, "must not expand input");
+
+  for (char* dst = string; *dst && (dst = strstr(dst, from)) != NULL;) {
+    char* left_over = dst + from_len;
+    memmove(dst, to, to_len);                       // does not copy trailing 0 of <to>
+    dst += to_len;                                  // skip over the replacement.
+    memmove(dst, left_over, strlen(left_over) + 1); // copies the trailing 0 of <left_over>
+    ++ replace_count;
+  }
+
+  return replace_count;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/stringUtils.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_STRINGUTILS_HPP
+#define SHARE_VM_UTILITIES_STRINGUTILS_HPP
+
+#include "memory/allocation.hpp"
+
+class StringUtils : AllStatic {
+public:
+  // Replace the substring <from> with another string <to>. <to> must be
+  // no longer than <from>. The input string is modified in-place.
+  //
+  // Replacement is done in a single pass left-to-right. So replace_no_expand("aaa", "aa", "a")
+  // will result in "aa", not "a".
+  //
+  // Returns the count of substrings that have been replaced.
+  static int replace_no_expand(char* string, const char* from, const char* to);
+};
+
+#endif // SHARE_VM_UTILITIES_STRINGUTILS_HPP
--- a/src/share/vm/utilities/taskqueue.hpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/taskqueue.hpp	Fri Apr 10 16:58:26 2015 -0700
@@ -28,40 +28,8 @@
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/mutex.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "utilities/stack.hpp"
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "orderAccess_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_sparc
-# include "orderAccess_linux_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "orderAccess_linux_zero.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "orderAccess_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_sparc
-# include "orderAccess_solaris_sparc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "orderAccess_windows_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_arm
-# include "orderAccess_linux_arm.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_linux_ppc
-# include "orderAccess_linux_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_aix_ppc
-# include "orderAccess_aix_ppc.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "orderAccess_bsd_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "orderAccess_bsd_zero.inline.hpp"
-#endif
 
 // Simple TaskQueue stats that are collected by default in debug builds.
 
--- a/src/share/vm/utilities/vmError.cpp	Fri Apr 10 16:55:38 2015 -0700
+++ b/src/share/vm/utilities/vmError.cpp	Fri Apr 10 16:58:26 2015 -0700
@@ -22,6 +22,7 @@
  *
  */
 
+#include <fcntl.h>
 #include "precompiled.hpp"
 #include "compiler/compileBroker.hpp"
 #include "compiler/disassembler.hpp"
@@ -31,7 +32,7 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/os.hpp"
-#include "runtime/thread.hpp"
+#include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "runtime/vm_operations.hpp"
 #include "services/memTracker.hpp"
@@ -799,6 +800,11 @@
        st->cr();
      }
 
+  STEP(228, "(Native Memory Tracking)" )
+     if (_verbose) {
+       MemTracker::error_report(st);
+     }
+
   STEP(230, "" )
 
      if (_verbose) {
@@ -863,7 +869,8 @@
 static int expand_and_open(const char* pattern, char* buf, size_t buflen, size_t pos) {
   int fd = -1;
   if (Arguments::copy_expand_pid(pattern, strlen(pattern), &buf[pos], buflen - pos)) {
-    fd = open(buf, O_RDWR | O_CREAT | O_TRUNC, 0666);
+    // the O_EXCL flag will cause the open to fail if the file exists
+    fd = open(buf, O_RDWR | O_CREAT | O_EXCL, 0666);
   }
   return fd;
 }
@@ -922,9 +929,6 @@
   static bool log_done = false;         // done saving error log
   static bool transmit_report_done = false; // done error reporting
 
-  // disble NMT to avoid further exception
-  MemTracker::shutdown(MemTracker::NMT_error_reporting);
-
   if (SuppressFatalErrorMessage) {
       os::abort();
   }
--- a/test/Makefile	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/Makefile	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1995, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -23,14 +23,36 @@
 #
 
 #
-# Makefile to run various jdk tests
+# Makefile to run various hotspot tests
 #
 
 GETMIXEDPATH=echo
 
-# Get OS/ARCH specifics
-OSNAME = $(shell uname -s)
-ifeq ($(OSNAME), SunOS)
+# Utilities used
+AWK       = awk
+CAT       = cat
+CD        = cd
+CHMOD     = chmod
+CP        = cp
+CUT       = cut
+DIRNAME   = dirname
+ECHO      = echo
+EGREP     = egrep
+EXPAND    = expand
+FIND      = find
+MKDIR     = mkdir
+PWD       = pwd
+SED       = sed
+SORT      = sort
+TEE       = tee
+UNAME     = uname
+UNIQ      = uniq
+WC        = wc
+ZIP       = zip
+
+# Get OS name from uname (Cygwin inexplicably adds _NT-5.1)
+UNAME_S := $(shell $(UNAME) -s | $(CUT) -f1 -d_)
+ifeq ($(UNAME_S), SunOS)
   PLATFORM = solaris
   SLASH_JAVA = /java
   ARCH = $(shell uname -p)
@@ -38,7 +60,7 @@
     ARCH=i586
   endif
 endif
-ifeq ($(OSNAME), Linux)
+ifeq ($(UNAME_S), Linux)
   PLATFORM = linux
   SLASH_JAVA = /java
   ARCH = $(shell uname -m)
@@ -46,7 +68,7 @@
     ARCH = i586
   endif
 endif
-ifeq ($(OSNAME), Darwin)
+ifeq ($(UNAME_S), Darwin)
   PLATFORM = bsd
   SLASH_JAVA = /java
   ARCH = $(shell uname -m)
@@ -54,7 +76,7 @@
     ARCH = i586
   endif
 endif
-ifeq ($(findstring BSD,$(OSNAME)), BSD)
+ifeq ($(findstring BSD,$(UNAME_S)), BSD)
   PLATFORM = bsd
   SLASH_JAVA = /java
   ARCH = $(shell uname -m)
@@ -63,12 +85,12 @@
   endif
 endif
 ifeq ($(PLATFORM),)
-  # detect wether we're running in MKS or cygwin
-  ifeq ($(OSNAME), Windows_NT) # MKS
+  # detect whether we're running in MKS or cygwin
+  ifeq ($(UNAME_S), Windows_NT) # MKS
     GETMIXEDPATH=dosname -s
   endif
-  ifeq ($(findstring CYGWIN,$(OSNAME)), CYGWIN)
-    GETMIXEDPATH=cygpath -m -s
+  ifeq ($(findstring CYGWIN,$(UNAME_S)), CYGWIN)
+    GETMIXEDPATH=cygpath -m
   endif
   PLATFORM = windows
   SLASH_JAVA = J:
@@ -92,13 +114,6 @@
   SLASH_JAVA = $(ALT_SLASH_JAVA)
 endif
 
-# Utilities used
-CD    = cd
-CP    = cp
-ECHO  = echo
-MKDIR = mkdir
-ZIP   = zip
-
 # Root of this test area (important to use full paths in some places)
 TEST_ROOT := $(shell pwd)
 
@@ -136,17 +151,83 @@
 endif
 
 # How to create the test bundle (pass or fail, we want to create this)
-BUNDLE_UP = ( $(MKDIR) -p `dirname $(ARCHIVE_BUNDLE)`     \
-	      && $(CD) $(ABS_TEST_OUTPUT_DIR)             \
-	      && $(ZIP) -q -r $(ARCHIVE_BUNDLE) . )
-BUNDLE_UP_FAILED = ( exitCode=$$? && $(BUNDLE_UP) && exit $${exitCode} )
+#   Follow command with ";$(BUNDLE_UP_AND_EXIT)", so it always gets executed.
+ZIP_UP_RESULTS = ( $(MKDIR) -p `$(DIRNAME) $(ARCHIVE_BUNDLE)`     \
+	           && $(CD) $(ABS_TEST_OUTPUT_DIR)             \
+	           && $(CHMOD) -R a+r . \
+	           && $(ZIP) -q -r $(ARCHIVE_BUNDLE) . )
+
+# important results files
+SUMMARY_TXT = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/JTreport/text/summary.txt")
+STATS_TXT_NAME = Stats.txt
+STATS_TXT = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/$(STATS_TXT_NAME)")
+RUNLIST   = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/runlist.txt")
+PASSLIST  = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/passlist.txt")
+FAILLIST  = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/faillist.txt")
+EXITCODE  = $(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/exitcode.txt")
+
+TESTEXIT = \
+  if [ ! -s $(EXITCODE) ] ; then \
+    $(ECHO) "ERROR: EXITCODE file not filled in."; \
+    $(ECHO) "1" > $(EXITCODE); \
+  fi ; \
+  testExitCode=`$(CAT) $(EXITCODE)`; \
+  $(ECHO) "EXIT CODE: $${testExitCode}"; \
+  exit $${testExitCode}
+
+BUNDLE_UP_AND_EXIT = \
+( \
+  jtregExitCode=$$? && \
+  _summary="$(SUMMARY_TXT)"; \
+  $(RM) -f $(STATS_TXT) $(RUNLIST) $(PASSLIST) $(FAILLIST) $(EXITCODE); \
+  $(ECHO) "$${jtregExitCode}" > $(EXITCODE); \
+  if [ -r "$${_summary}" ] ; then \
+    $(ECHO) "Summary: $(UNIQUE_DIR)" > $(STATS_TXT); \
+    $(EXPAND) $${_summary} | $(EGREP) -v ' Not run\.' > $(RUNLIST); \
+    $(EGREP) ' Passed\.' $(RUNLIST) \
+      | $(EGREP) -v ' Error\.' \
+      | $(EGREP) -v ' Failed\.' > $(PASSLIST); \
+    ( $(EGREP) ' Failed\.' $(RUNLIST); \
+      $(EGREP) ' Error\.' $(RUNLIST); \
+      $(EGREP) -v ' Passed\.' $(RUNLIST) ) \
+      | $(SORT) | $(UNIQ) > $(FAILLIST); \
+    if [ $${jtregExitCode} != 0 -o -s $(FAILLIST) ] ; then \
+      $(EXPAND) $(FAILLIST) \
+        | $(CUT) -d' ' -f1 \
+        | $(SED) -e 's@^@FAILED: @' >> $(STATS_TXT); \
+      if [ $${jtregExitCode} = 0 ] ; then \
+        jtregExitCode=1; \
+      fi; \
+    fi; \
+    runc="`$(CAT) $(RUNLIST)      | $(WC) -l | $(AWK) '{print $$1;}'`"; \
+    passc="`$(CAT) $(PASSLIST)    | $(WC) -l | $(AWK) '{print $$1;}'`"; \
+    failc="`$(CAT) $(FAILLIST)    | $(WC) -l | $(AWK) '{print $$1;}'`"; \
+    exclc="FIXME CODETOOLS-7900176"; \
+    $(ECHO) "TEST STATS: name=$(UNIQUE_DIR)  run=$${runc}  pass=$${passc}  fail=$${failc}" \
+      >> $(STATS_TXT); \
+  else \
+    $(ECHO) "Missing file: $${_summary}" >> $(STATS_TXT); \
+  fi; \
+  if [ -f $(STATS_TXT) ] ; then \
+    $(CAT) $(STATS_TXT); \
+  fi; \
+  $(ZIP_UP_RESULTS) ; \
+  $(TESTEXIT) \
+)
 
 ################################################################
 
 # Default make rule (runs jtreg_tests)
-all: jtreg_tests
+all: hotspot_all
 	@$(ECHO) "Testing completed successfully"
 
+# Support "hotspot_" prefixed test make targets (too)
+# The hotspot_% targets are used by the top level Makefile
+# Unless explicitly defined below, hotspot_<x> is interpreted as a jtreg test group name
+hotspot_%:
+	$(ECHO) "Running tests: $@"
+	$(MAKE) -j 1 TEST_SELECTION=":$@" UNIQUE_DIR=$@ jtreg_tests;
+
 # Prep for output
 prep: clean
 	@$(MKDIR) -p $(ABS_TEST_OUTPUT_DIR)
@@ -163,20 +244,37 @@
 
 # Expect JT_HOME to be set for jtreg tests. (home for jtreg)
 ifndef JT_HOME
-  JT_HOME = $(SLASH_JAVA)/re/jtreg/4.0/promoted/latest/binaries/jtreg
+  JT_HOME = $(SLASH_JAVA)/re/jtreg/4.1/promoted/latest/binaries/jtreg
+  ifdef JPRT_JTREG_HOME
+    JT_HOME = $(JPRT_JTREG_HOME)
+  endif
 endif
-ifdef JPRT_JTREG_HOME
-  JT_HOME = $(JPRT_JTREG_HOME)
+
+# When called from JPRT the TESTDIRS variable is set to the jtreg tests to run
+ifdef TESTDIRS
+  TEST_SELECTION = $(TESTDIRS)
+endif
+
+ifdef CONCURRENCY
+  EXTRA_JTREG_OPTIONS += -concurrency:$(CONCURRENCY)
 endif
 
-# Expect JPRT to set TESTDIRS to the jtreg test dirs
-JTREG_TESTDIRS = demo/jvmti/gctest demo/jvmti/hprof
-ifdef TESTDIRS
-  JTREG_TESTDIRS = $(TESTDIRS)
-endif
+# Default JTREG to run
+JTREG = $(JT_HOME)/bin/jtreg
 
-# Default JTREG to run (win32 script works for everybody)
-JTREG = $(JT_HOME)/win32/bin/jtreg
+# Only run automatic tests
+JTREG_BASIC_OPTIONS += -a
+# Report details on all failed or error tests, times too
+JTREG_BASIC_OPTIONS += -v:fail,error,time
+# Retain all files for failing tests
+JTREG_BASIC_OPTIONS += -retain:fail,error
+# Ignore tests are not run and completely silent about it
+JTREG_IGNORE_OPTION = -ignore:quiet
+JTREG_BASIC_OPTIONS += $(JTREG_IGNORE_OPTION)
+# Add any extra options
+JTREG_BASIC_OPTIONS += $(EXTRA_JTREG_OPTIONS)
+# Set other vm and test options
+JTREG_TEST_OPTIONS = $(JAVA_ARGS:%=-javaoptions:%) $(JAVA_OPTIONS:%=-vmoption:%) $(JAVA_VM_ARGS:%=-vmoption:%)
 
 # Option to tell jtreg to not run tests marked with "ignore"
 ifeq ($(PLATFORM), windows)
@@ -184,20 +282,26 @@
 else
   JTREG_KEY_OPTION = -k:\!ignore
 endif
-
-#EXTRA_JTREG_OPTIONS =
+JTREG_BASIC_OPTIONS += $(JTREG_KEY_OPTION)
+ 
+# Make sure jtreg exists
+$(JTREG): $(JT_HOME)
 
-jtreg_tests: prep $(JT_HOME) $(PRODUCT_HOME) $(JTREG)
-	$(JTREG) -a -v:fail,error               \
-          $(JTREG_KEY_OPTION)                   \
-          $(EXTRA_JTREG_OPTIONS)                \
-          -r:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTreport    \
-          -w:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTwork      \
-          -jdk:$(shell $(GETMIXEDPATH) "$(PRODUCT_HOME)")                  \
-          $(JAVA_OPTIONS:%=-vmoption:%)         \
-          $(JTREG_TESTDIRS)                     \
-	  || $(BUNDLE_UP_FAILED)
-	$(BUNDLE_UP)
+jtreg_tests: prep $(PRODUCT_HOME) $(JTREG)
+	(                                                                    \
+	  ( JT_HOME=$(shell $(GETMIXEDPATH) "$(JT_HOME)");                   \
+            export JT_HOME;                                                  \
+            $(shell $(GETMIXEDPATH) "$(JTREG)")                              \
+              $(JTREG_BASIC_OPTIONS)                                         \
+              -r:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/JTreport")  \
+              -w:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)/JTwork")    \
+              -jdk:$(shell $(GETMIXEDPATH) "$(PRODUCT_HOME)")                \
+              $(JTREG_EXCLUSIONS)                                            \
+              $(JTREG_TEST_OPTIONS)                                          \
+              $(TEST_SELECTION)                                              \
+	  ) ;                                                                \
+	  $(BUNDLE_UP_AND_EXIT)                                              \
+	) 2>&1 | $(TEE) $(ABS_TEST_OUTPUT_DIR)/output.txt ; $(TESTEXIT)
 
 PHONY_LIST += jtreg_tests
 
@@ -205,7 +309,7 @@
 
 # clienttest (make sure various basic java client options work)
 
-clienttest: prep $(PRODUCT_HOME)
+hotspot_clienttest clienttest: prep $(PRODUCT_HOME)
 	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -version
 	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -help
 	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -X
@@ -213,73 +317,38 @@
 	$(RM) $(PRODUCT_HOME)/jre/bin/client/classes.jsa
 	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -Xshare:dump
 
-PHONY_LIST += clienttest
+PHONY_LIST += hotspot_clienttest clienttest
+
+################################################################
+
+# minimaltest (make sure various basic java minimal options work)
+
+hotspot_minimaltest minimaltest: prep $(PRODUCT_HOME)
+	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -version
+	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -help
+	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -X
+
+PHONY_LIST += hotspot_minimaltest minimaltest
 
 ################################################################
 
 # servertest (make sure various basic java server options work)
 
-servertest: prep $(PRODUCT_HOME)
+hotspot_servertest servertest: prep $(PRODUCT_HOME)
 	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -version
 	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -help
 	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -X
 
-PHONY_LIST += servertest
+PHONY_LIST += hotspot_servertest servertest
 
 ################################################################
 
 # internalvmtests (run internal unit tests inside the VM)
 
-internalvmtests: prep $(PRODUCT_HOME)
+hotspot_internalvmtests internalvmtests: prep $(PRODUCT_HOME)
 	$(PRODUCT_HOME)/bin/java $(JAVA_OPTIONS) -XX:+ExecuteInternalVMTests -version
 
-PHONY_LIST += internalvmtests
-
-################################################################
-
-# wbapitest (make sure the whitebox testing api classes work
-
-wbapitest: prep $(JT_HOME) $(PRODUCT_HOME) $(JTREG)
-	$(JTREG) -a -v:fail,error               \
-          $(JTREG_KEY_OPTION)                   \
-          $(EXTRA_JTREG_OPTIONS)                \
-          -r:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTreport    \
-          -w:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTwork      \
-          -jdk:$(shell $(GETMIXEDPATH) "$(PRODUCT_HOME)")                  \
-          $(JAVA_OPTIONS:%=-vmoption:%)         \
-          $(shell $(GETMIXEDPATH) "$(TEST_ROOT)")/sanity                   \
-	  || $(BUNDLE_UP_FAILED)
-	$(BUNDLE_UP)
-
-PHONY_LIST += wbapitest
-
-################################################################
-
-# packtest
-
-# Expect JPRT to set JPRT_PACKTEST_HOME.
-PACKTEST_HOME = /net/jprt-web.sfbay.sun.com/jprt/allproducts/packtest
-ifdef JPRT_PACKTEST_HOME
-  PACKTEST_HOME = $(JPRT_PACKTEST_HOME)
-endif
-
-#EXTRA_PACKTEST_OPTIONS =
-
-packtest: prep $(PACKTEST_HOME)/ptest $(PRODUCT_HOME)
-	( $(CD) $(PACKTEST_HOME) &&            \
-	    $(PACKTEST_HOME)/ptest             \
-		 -t "$(PRODUCT_HOME)"          \
-	         $(PACKTEST_STRESS_OPTION)     \
-		 $(EXTRA_PACKTEST_OPTIONS)     \
-		 -W $(ABS_TEST_OUTPUT_DIR)     \
-                 $(JAVA_OPTIONS:%=-J %)        \
-	 ) || $(BUNDLE_UP_FAILED)
-	$(BUNDLE_UP)
-
-packtest_stress: PACKTEST_STRESS_OPTION=-s
-packtest_stress: packtest
-
-PHONY_LIST += packtest packtest_stress
+PHONY_LIST += hotspot_internalvmtests internalvmtests
 
 ################################################################
 
@@ -287,4 +356,3 @@
 .PHONY: all clean prep $(PHONY_LIST)
 
 ################################################################
-
--- a/test/TEST.ROOT	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/TEST.ROOT	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 # It also contains test-suite configuration information.
 
 # The list of keywords supported in this test suite
-keys=cte_test jcmd nmt regression gc
+keys=cte_test jcmd nmt regression gc stress
 
 groups=TEST.groups [closed/TEST.groups]
+requires.properties=sun.arch.data.model
--- a/test/TEST.groups	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/TEST.groups	Fri Apr 10 16:58:26 2015 -0700
@@ -73,15 +73,24 @@
   runtime/jsig/Test8017498.sh \
   runtime/Metaspace/FragmentMetaspace.java \
   runtime/NMT/BaselineWithParameter.java \
+  runtime/NMT/JcmdBaselineDetail.java \
+  runtime/NMT/JcmdDetailDiff.java \
+  runtime/NMT/JcmdScaleDetail.java \
   runtime/NMT/JcmdScale.java \
   runtime/NMT/JcmdWithNMTDisabled.java \
+  runtime/NMT/MallocRoundingReportTest.java \
+  runtime/NMT/MallocSiteHashOverflow.java \
+  runtime/NMT/MallocStressTest.java \
   runtime/NMT/MallocTestType.java \
+  runtime/NMT/MallocTrackingVerify.java \
   runtime/NMT/ReleaseCommittedMemory.java \
+  runtime/NMT/ReleaseNoCommit.java \
   runtime/NMT/ShutdownTwice.java \
   runtime/NMT/SummaryAfterShutdown.java \
   runtime/NMT/SummarySanityCheck.java \
   runtime/NMT/ThreadedMallocTestType.java \
   runtime/NMT/ThreadedVirtualAllocTestType.java \
+  runtime/NMT/VirtualAllocCommitUncommitRecommit.java \
   runtime/NMT/VirtualAllocTestType.java \
   runtime/RedefineObject/TestRedefineObject.java \
   runtime/Thread/TestThreadDumpMonitorContention.java \
@@ -115,6 +124,27 @@
  -:needs_jdk
 
 
+# When called from top level the test suites use the hotspot_ prefix
+hotspot_wbapitest = \
+  sanity/
+
+hotspot_compiler = \
+  sanity/ExecuteInternalVMTests.java
+
+hotspot_gc = \
+  sanity/ExecuteInternalVMTests.java
+
+hotspot_runtime = \
+  sanity/ExecuteInternalVMTests.java
+
+hotspot_serviceability = \
+  sanity/ExecuteInternalVMTests.java
+
+hotspot_all = \
+  :hotspot_compiler \
+  :hotspot_gc \
+  :hotspot_runtime \
+  :hotspot_serviceability
 # Tests that require compact3 API's
 #
 needs_compact3 = \
@@ -129,10 +159,19 @@
   gc/6581734/Test6581734.java \
   gc/7072527/TestFullGCCount.java \
   gc/g1/TestHumongousAllocInitialMark.java \
+  gc/g1/TestHumongousShrinkHeap.java \
   gc/arguments/TestG1HeapRegionSize.java \
   gc/metaspace/TestMetaspaceMemoryPool.java \
   gc/arguments/TestDynMinHeapFreeRatio.java \
   gc/arguments/TestDynMaxHeapFreeRatio.java \
+  gc/g1/TestShrinkAuxiliaryData00.java \
+  gc/g1/TestShrinkAuxiliaryData05.java \
+  gc/g1/TestShrinkAuxiliaryData10.java \
+  gc/g1/TestShrinkAuxiliaryData15.java \
+  gc/g1/TestShrinkAuxiliaryData20.java \
+  gc/g1/TestShrinkAuxiliaryData25.java \
+  gc/g1/TestShrinkAuxiliaryData30.java \
+  gc/survivorAlignment \
   runtime/InternalApi/ThreadCpuTimesDeadlock.java \
   serviceability/threads/TestFalseDeadLock.java \
   serviceability/jvmti/GetObjectSizeOverflow.java \
@@ -169,6 +208,8 @@
 #
 needs_full_vm_compact1 = \
   runtime/NMT \
+  gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java \
+  gc/class_unloading/TestG1ClassUnloadingHWM.java \
   gc/g1/TestRegionAlignment.java \
   gc/g1/TestShrinkToOneRegion.java \
   gc/metaspace/G1AddMetaspaceDependency.java \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/EliminateAutoBox/UnsignedLoads.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox
+ *                   -XX:CompileOnly=::valueOf,::byteValue,::shortValue,::testUnsignedByte,::testUnsignedShort
+ *                   UnsignedLoads
+ */
+import static com.oracle.java.testlibrary.Asserts.assertEQ;
+
+public class UnsignedLoads {
+    public static int testUnsignedByte() {
+        byte[] bytes = new byte[] {-1};
+        int res = 0;
+        for (int i = 0; i < 100000; i++) {
+            for (Byte b : bytes) {
+                res = b & 0xff;
+            }
+        }
+        return res;
+    }
+
+    public static int testUnsignedShort() {
+        int res = 0;
+        short[] shorts = new short[] {-1};
+        for (int i = 0; i < 100000; i++) {
+            for (Short s : shorts) {
+                res = s & 0xffff;
+            }
+        }
+        return res;
+    }
+
+    public static void main(String[] args) {
+        assertEQ(testUnsignedByte(),    255);
+        assertEQ(testUnsignedShort(), 65535);
+        System.out.println("TEST PASSED");
+    }
+}
--- a/test/compiler/ciReplay/common.sh	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/ciReplay/common.sh	Fri Apr 10 16:58:26 2015 -0700
@@ -213,7 +213,7 @@
             -XX:VMThreadStackSize=512 \
             -XX:CompilerThreadStackSize=512 \
             -XX:ParallelGCThreads=1 \
-            -XX:CICompilerCount=1 \
+            -XX:CICompilerCount=2 \
             -Xcomp \
             -XX:CICrashAt=1 \
             -XX:+CreateMinidumpOnCrash \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/classUnloading/anonymousClass/TestAnonymousClassUnloading.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.hotspot.WhiteBox;
+import sun.misc.Unsafe;
+import sun.misc.IOUtils;
+
+import java.lang.reflect.Method;
+import java.net.URL;
+import java.net.URLConnection;
+
+/*
+ * @test TestAnonymousClassUnloading
+ * @bug 8054402
+ * @summary "Tests unloading of anonymous classes."
+ * @library /testlibrary /testlibrary/whitebox
+ * @compile TestAnonymousClassUnloading.java
+ * @run main ClassFileInstaller TestAnonymousClassUnloading
+ *                              sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation TestAnonymousClassUnloading
+ */
+public class TestAnonymousClassUnloading {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static final Unsafe UNSAFE = Unsafe.getUnsafe();
+    private static int COMP_LEVEL_SIMPLE = 1;
+    private static int COMP_LEVEL_FULL_OPTIMIZATION = 4;
+
+    /**
+     * We override hashCode here to be able to access this implementation
+     * via an Object reference (we cannot cast to TestAnonymousClassUnloading).
+     */
+    @Override
+    public int hashCode() {
+        return 42;
+    }
+
+    /**
+     * Does some work by using the anonymousClass.
+     * @param anonymousClass Class performing some work (will be unloaded)
+     */
+    static private void doWork(Class<?> anonymousClass) throws InstantiationException, IllegalAccessException {
+        // Create a new instance
+        Object anon = anonymousClass.newInstance();
+        // We would like to call a method of anonymousClass here but we cannot cast because the class
+        // was loaded by a different class loader. One solution would be to use reflection but since
+        // we want C2 to implement the call as an IC we call Object::hashCode() here which actually
+        // calls anonymousClass::hashCode(). C2 will then implement this call as an IC.
+        if (anon.hashCode() != 42) {
+            new RuntimeException("Work not done");
+        }
+    }
+
+    /**
+     * Makes sure that method is compiled by forcing compilation if not yet compiled.
+     * @param m Method to be checked
+     */
+    static private void makeSureIsCompiled(Method m) {
+        // Make sure background compilation is disabled
+        if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) {
+            throw new RuntimeException("Background compilation enabled");
+        }
+
+        // Check if already compiled
+        if (!WHITE_BOX.isMethodCompiled(m)) {
+            // If not, try to compile it with C2
+            if(!WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION)) {
+                // C2 compiler not available, try to compile with C1
+                WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_SIMPLE);
+            }
+            // Because background compilation is disabled, method should now be compiled
+            if(!WHITE_BOX.isMethodCompiled(m)) {
+                throw new RuntimeException(m + " not compiled");
+            }
+        }
+    }
+
+    /**
+     * This test creates stale Klass* metadata referenced by a compiled IC.
+     *
+     * The following steps are performed:
+     * (1) An anonymous version of TestAnonymousClassUnloading is loaded by a custom class loader
+     * (2) The method doWork that calls a method of the anonymous class is compiled. The call
+     *     is implemented as an IC referencing Klass* metadata of the anonymous class.
+     * (3) Unloading of the anonymous class is enforced. The IC now references dead metadata.
+     */
+    static public void main(String[] args) throws Exception {
+        // (1) Load an anonymous version of this class using the corresponding Unsafe method
+        URL classUrl = TestAnonymousClassUnloading.class.getResource("TestAnonymousClassUnloading.class");
+        URLConnection connection = classUrl.openConnection();
+        byte[] classBytes = IOUtils.readFully(connection.getInputStream(), connection.getContentLength(), true);
+        Class<?> anonymousClass = UNSAFE.defineAnonymousClass(TestAnonymousClassUnloading.class, classBytes, null);
+
+        // (2) Make sure all paths of doWork are profiled and compiled
+        for (int i = 0; i < 100000; ++i) {
+            doWork(anonymousClass);
+        }
+
+        // Make sure doWork is compiled now
+        Method doWork = TestAnonymousClassUnloading.class.getDeclaredMethod("doWork", Class.class);
+        makeSureIsCompiled(doWork);
+
+        // (3) Throw away reference to anonymousClass to allow unloading
+        anonymousClass = null;
+
+        // Force garbage collection to trigger unloading of anonymousClass
+        // Dead metadata reference to anonymousClass triggers JDK-8054402
+        WHITE_BOX.fullGC();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/classUnloading/methodUnloading/TestMethodUnloading.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.hotspot.WhiteBox;
+
+import java.lang.reflect.Method;
+import java.net.URL;
+import java.net.URLClassLoader;
+
+/*
+ * @test MethodUnloadingTest
+ * @bug 8029443
+ * @summary "Tests the unloading of methods to to class unloading"
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestMethodUnloading
+ * @build WorkerClass
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-BackgroundCompilation -XX:-UseCompressedOops -XX:CompileOnly=TestMethodUnloading::doWork TestMethodUnloading
+ */
+public class TestMethodUnloading {
+    private static final String workerClassName = "WorkerClass";
+    private static int work = -1;
+
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static int COMP_LEVEL_SIMPLE = 1;
+    private static int COMP_LEVEL_FULL_OPTIMIZATION = 4;
+
+    /**
+     * Does some work by either using the workerClass or locally producing values.
+     * @param workerClass Class performing some work (will be unloaded)
+     * @param useWorker If true the workerClass is used
+     */
+    static private void doWork(Class<?> workerClass, boolean useWorker) throws InstantiationException, IllegalAccessException {
+        if (useWorker) {
+            // Create a new instance
+            Object worker = workerClass.newInstance();
+            // We would like to call a method of WorkerClass here but we cannot cast to WorkerClass
+            // because the class was loaded by a different class loader. One solution would be to use
+            // reflection but since we want C2 to implement the call as an optimized IC we call
+            // Object::hashCode() here which actually calls WorkerClass::hashCode().
+            // C2 will then implement this call as an optimized IC that points to a to-interpreter stub
+            // referencing the Method* for WorkerClass::hashCode().
+            work = worker.hashCode();
+            if (work != 42) {
+                new RuntimeException("Work not done");
+            }
+        } else {
+            // Do some important work here
+            work = 1;
+        }
+    }
+
+    /**
+     * Makes sure that method is compiled by forcing compilation if not yet compiled.
+     * @param m Method to be checked
+     */
+    static private void makeSureIsCompiled(Method m) {
+        // Make sure background compilation is disabled
+        if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) {
+            throw new RuntimeException("Background compilation enabled");
+        }
+
+        // Check if already compiled
+        if (!WHITE_BOX.isMethodCompiled(m)) {
+            // If not, try to compile it with C2
+            if(!WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_FULL_OPTIMIZATION)) {
+                // C2 compiler not available, try to compile with C1
+                WHITE_BOX.enqueueMethodForCompilation(m, COMP_LEVEL_SIMPLE);
+            }
+            // Because background compilation is disabled, method should now be compiled
+            if(!WHITE_BOX.isMethodCompiled(m)) {
+                throw new RuntimeException(m + " not compiled");
+            }
+        }
+    }
+
+    /**
+     * This test creates stale Method* metadata in a to-interpreter stub of an optimized IC.
+     *
+     * The following steps are performed:
+     * (1) A workerClass is loaded by a custom class loader
+     * (2) The method doWork that calls a method of the workerClass is compiled. The call
+     *     is implemented as an optimized IC calling a to-interpreted stub. The to-interpreter
+     *     stub contains a Method* to a workerClass method.
+     * (3) Unloading of the workerClass is enforced. The to-interpreter stub now contains a dead Method*.
+     * (4) Depending on the implementation of the IC, the compiled version of doWork should still be
+     *     valid. We call it again without using the workerClass.
+     */
+    static public void main(String[] args) throws Exception {
+        // (1) Create a custom class loader with no parent class loader
+        URL url = TestMethodUnloading.class.getProtectionDomain().getCodeSource().getLocation();
+        URLClassLoader loader = new URLClassLoader(new URL[] {url}, null);
+
+        // Load worker class with custom class loader
+        Class<?> workerClass = Class.forName(workerClassName, true, loader);
+
+        // (2) Make sure all paths of doWork are profiled and compiled
+        for (int i = 0; i < 100000; ++i) {
+            doWork(workerClass, true);
+            doWork(workerClass, false);
+        }
+
+        // Make sure doWork is compiled now
+        Method doWork = TestMethodUnloading.class.getDeclaredMethod("doWork", Class.class, boolean.class);
+        makeSureIsCompiled(doWork);
+
+        // (3) Throw away class loader and reference to workerClass to allow unloading
+        loader.close();
+        loader = null;
+        workerClass = null;
+
+        // Force garbage collection to trigger unloading of workerClass
+        // Dead reference to WorkerClass::hashCode triggers JDK-8029443
+        WHITE_BOX.fullGC();
+
+        // (4) Depending on the implementation of the IC, the compiled version of doWork
+        // may still be valid here. Execute it without a workerClass.
+        doWork(null, false);
+        if (work != 1) {
+            throw new RuntimeException("Work not done");
+        }
+
+        doWork(Object.class, false);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/classUnloading/methodUnloading/WorkerClass.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * Worker class that is dynamically loaded/unloaded by TestMethodUnloading.
+ */
+public class WorkerClass {
+    /**
+     * We override hashCode here to be able to access this implementation
+     * via an Object reference (we cannot cast to WorkerClass).
+     */
+    @Override
+    public int hashCode() {
+        return 42;
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/dependencies/MonomorphicObjectCall/TestMonomorphicObjectCall.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Collections;
+
+import com.oracle.java.testlibrary.*;
+
+/*
+ * @test
+ * @bug 8050079
+ * @summary Compiles a monomorphic call to finalizeObject() on a modified java.lang.Object to test C1 CHA.
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file java/lang/Object.java TestMonomorphicObjectCall.java
+ * @run main TestMonomorphicObjectCall
+ */
+public class TestMonomorphicObjectCall {
+    final static String testClasses = System.getProperty("test.classes") + File.separator;
+
+    private static void callFinalize(Object object) throws Throwable {
+        // Call modified version of java.lang.Object::finalize() that is
+        // not overridden by any subclass. C1 CHA should mark the call site
+        // as monomorphic and inline the method.
+        object.finalizeObject();
+    }
+
+    public static void main(String[] args) throws Throwable {
+        if (args.length == 0) {
+            // Execute new instance with modified java.lang.Object
+            executeTestJvm();
+        } else {
+            // Trigger compilation of 'callFinalize'
+            callFinalize(new Object());
+        }
+    }
+
+    public static void executeTestJvm() throws Throwable {
+        // Execute test with modified version of java.lang.Object
+        // in -Xbootclasspath.
+        String[] vmOpts = new String[] {
+                "-Xbootclasspath/p:" + testClasses,
+                "-Xcomp",
+                "-XX:-VerifyDependencies",
+                "-XX:CompileOnly=TestMonomorphicObjectCall::callFinalize",
+                "-XX:CompileOnly=Object::finalizeObject",
+                "-XX:TieredStopAtLevel=1",
+                TestMonomorphicObjectCall.class.getName(),
+                "true"};
+        OutputAnalyzer output = ProcessTools.executeTestJvm(vmOpts);
+        output.shouldHaveExitValue(0);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/dependencies/MonomorphicObjectCall/java/lang/Object.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 1994, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package java.lang;
+
+/**
+ * Slightly modified version of java.lang.Object that replaces
+ * finalize() by finalizeObject() to avoid overriding in subclasses.
+ */
+public class Object {
+
+    private static native void registerNatives();
+    static {
+        registerNatives();
+    }
+
+    public final native Class<?> getClass();
+
+    public native int hashCode();
+
+    public boolean equals(Object obj) {
+        return (this == obj);
+    }
+
+    protected native Object clone() throws CloneNotSupportedException;
+
+    public String toString() {
+        return getClass().getName() + "@" + Integer.toHexString(hashCode());
+    }
+
+    public final native void notify();
+
+    public final native void notifyAll();
+
+    public final native void wait(long timeout) throws InterruptedException;
+
+    public final void wait(long timeout, int nanos) throws InterruptedException {
+        if (timeout < 0) {
+            throw new IllegalArgumentException("timeout value is negative");
+        }
+
+        if (nanos < 0 || nanos > 999999) {
+            throw new IllegalArgumentException(
+                                "nanosecond timeout value out of range");
+        }
+
+        if (nanos >= 500000 || (nanos != 0 && timeout == 0)) {
+            timeout++;
+        }
+
+        wait(timeout);
+    }
+
+    public final void wait() throws InterruptedException {
+        wait(0);
+    }
+
+    /**
+     * Replaces original finalize() method and is therefore not
+     * overridden by any subclasses of Object.
+     * @throws Throwable
+     */
+    // protected void finalize() throws Throwable { }
+    public void finalizeObject() throws Throwable { }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/exceptions/CatchInlineExceptions.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8059299
+ * @summary assert(adr_type != NULL) failed: expecting TypeKlassPtr
+ * @run main/othervm -Xbatch CatchInlineExceptions
+ */
+
+class Exception1 extends Exception {};
+class Exception2 extends Exception {};
+
+public class CatchInlineExceptions {
+    private static int counter0;
+    private static int counter1;
+    private static int counter2;
+    private static int counter;
+
+    static void foo(int i) throws Exception {
+        if ((i & 1023) == 2) {
+            counter0++;
+            throw new Exception2();
+        }
+    }
+
+    static void test(int i) throws Exception {
+        try {
+           foo(i);
+        }
+        catch (Exception e) {
+            if (e instanceof Exception1) {
+                counter1++;
+            } else if (e instanceof Exception2) {
+                counter2++;
+            }
+            counter++;
+            throw e;
+        }
+    }
+
+    public static void main(String[] args) throws Throwable {
+        for (int i = 0; i < 15000; i++) {
+            try {
+                test(i);
+            } catch (Exception e) {
+                // expected
+            }
+        }
+        if (counter1 != 0) {
+            throw new RuntimeException("Failed: counter1(" + counter1  + ") != 0");
+        }
+        if (counter2 != counter0) {
+            throw new RuntimeException("Failed: counter2(" + counter2  + ") != counter0(" + counter0  + ")");
+        }
+        if (counter2 != counter) {
+            throw new RuntimeException("Failed: counter2(" + counter2  + ") != counter(" + counter  + ")");
+        }
+        System.out.println("TEST PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/exceptions/SumTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8066900
+ * @summary FP registers are not properly restored by C1 when handling exceptions
+ * @run main/othervm -Xbatch SumTest
+ *
+ */
+public class SumTest {
+    private static class Sum {
+
+        double[] sums;
+
+        /**
+         * Construct empty Sum
+         */
+        public Sum() {
+            sums = new double[0];
+        }
+
+        /**
+         * Return the sum of all numbers added to this Sum
+         *
+         * @return the sum
+         */
+        final public double getSum() {
+            double sum = 0;
+            for (final double s : sums) {
+                sum += s;
+            }
+
+            return sum;
+        }
+
+        /**
+         * Add a new number to this Sum
+         *
+         * @param a number to be added.
+         */
+        final public void add(double a) {
+            try {
+                sums[sums.length] = -1; // Cause IndexOutOfBoundsException
+            } catch (final IndexOutOfBoundsException e) {
+                final double[] oldSums = sums;
+                sums = new double[oldSums.length + 1]; // Extend sums
+                System.arraycopy(oldSums, 0, sums, 0, oldSums.length);
+                sums[oldSums.length] = a; // Append a
+            }
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        final Sum sum = new Sum();
+        for (int i = 1; i <= 10000; ++i) {
+            sum.add(1);
+            double ii = sum.getSum();
+            if (i != ii) {
+                throw new Exception("Failure: computed = " + ii + ", expected = " + i);
+            }
+        }
+    }
+
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/exceptions/TestRecursiveReplacedException.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8054224
+ * @summary Recursive method compiled by C1 is unable to catch StackOverflowError
+ * @run main/othervm -Xcomp -XX:CompileOnly=Test.run -XX:+TieredCompilation -XX:TieredStopAtLevel=2 -Xss256K TestRecursiveReplacedException
+ *
+ */
+
+public class TestRecursiveReplacedException {
+
+    public static void main(String args[]) {
+        new TestRecursiveReplacedException().run();
+    }
+
+    public void run() {
+        try {
+            run();
+        } catch (Throwable t) {
+        }
+    }
+}
--- a/test/compiler/intrinsics/mathexact/sanity/AddExactIntTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/intrinsics/mathexact/sanity/AddExactIntTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ *          /compiler/testlibrary
  * @build AddExactIntTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
@@ -34,7 +35,7 @@
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
  *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics AddExactIntTest
- * @run main Verifier hs_neg.log hs.log
+ * @run main intrinsics.Verifier hs_neg.log hs.log
  */
 
 public class AddExactIntTest {
--- a/test/compiler/intrinsics/mathexact/sanity/AddExactLongTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/intrinsics/mathexact/sanity/AddExactLongTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ *          /compiler/testlibrary
  * @build AddExactLongTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
@@ -34,7 +35,7 @@
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
  *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics AddExactLongTest
- * @run main Verifier hs_neg.log hs.log
+ * @run main intrinsics.Verifier hs_neg.log hs.log
  */
 
 public class AddExactLongTest {
--- a/test/compiler/intrinsics/mathexact/sanity/DecrementExactIntTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/intrinsics/mathexact/sanity/DecrementExactIntTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ *          /compiler/testlibrary
  * @build DecrementExactIntTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
@@ -34,7 +35,7 @@
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
  *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics DecrementExactIntTest
- * @run main Verifier hs_neg.log hs.log
+ * @run main intrinsics.Verifier hs_neg.log hs.log
  */
 
 public class DecrementExactIntTest {
--- a/test/compiler/intrinsics/mathexact/sanity/DecrementExactLongTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/intrinsics/mathexact/sanity/DecrementExactLongTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ *          /compiler/testlibrary
  * @build DecrementExactLongTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
@@ -34,7 +35,7 @@
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
  *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics DecrementExactLongTest
- * @run main Verifier hs_neg.log hs.log
+ * @run main intrinsics.Verifier hs_neg.log hs.log
  */
 
 public class DecrementExactLongTest {
--- a/test/compiler/intrinsics/mathexact/sanity/IncrementExactIntTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/intrinsics/mathexact/sanity/IncrementExactIntTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ *          /compiler/testlibrary
  * @build IncrementExactIntTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
@@ -34,7 +35,7 @@
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
  *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics IncrementExactIntTest
- * @run main Verifier hs_neg.log hs.log
+ * @run main intrinsics.Verifier hs_neg.log hs.log
  */
 
 public class IncrementExactIntTest {
--- a/test/compiler/intrinsics/mathexact/sanity/IncrementExactLongTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/intrinsics/mathexact/sanity/IncrementExactLongTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ *          /compiler/testlibrary
  * @build IncrementExactLongTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
@@ -34,7 +35,7 @@
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
  *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics IncrementExactLongTest
- * @run main Verifier hs_neg.log hs.log
+ * @run main intrinsics.Verifier hs_neg.log hs.log
  */
 
 public class IncrementExactLongTest {
--- a/test/compiler/intrinsics/mathexact/sanity/IntrinsicBase.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/intrinsics/mathexact/sanity/IntrinsicBase.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,7 @@
  */
 
 import com.oracle.java.testlibrary.Platform;
+import intrinsics.Verifier;
 
 import java.io.FileOutputStream;
 import java.lang.reflect.Executable;
@@ -79,10 +80,10 @@
 
         System.out.println("Expected intrinsic count is " + expectedIntrinsicCount + " name " + getIntrinsicId());
 
-        final FileOutputStream out = new FileOutputStream(getVMOption("LogFile") + ".verify.properties");
+        final FileOutputStream out = new FileOutputStream(getVMOption("LogFile") + Verifier.PROPERTY_FILE_SUFFIX);
         Properties expectedProps = new Properties();
-        expectedProps.setProperty("intrinsic.name", getIntrinsicId());
-        expectedProps.setProperty("intrinsic.expectedCount", String.valueOf(expectedIntrinsicCount));
+        expectedProps.setProperty(Verifier.INTRINSIC_NAME_PROPERTY, getIntrinsicId());
+        expectedProps.setProperty(Verifier.INTRINSIC_EXPECTED_COUNT_PROPERTY, String.valueOf(expectedIntrinsicCount));
         expectedProps.store(out, null);
 
         out.close();
--- a/test/compiler/intrinsics/mathexact/sanity/MultiplyExactIntTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/intrinsics/mathexact/sanity/MultiplyExactIntTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ *          /compiler/testlibrary
  * @build MultiplyExactIntTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
@@ -34,7 +35,7 @@
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
  *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics MultiplyExactIntTest
- * @run main Verifier hs_neg.log hs.log
+ * @run main intrinsics.Verifier hs_neg.log hs.log
  */
 
 public class MultiplyExactIntTest {
--- a/test/compiler/intrinsics/mathexact/sanity/MultiplyExactLongTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/intrinsics/mathexact/sanity/MultiplyExactLongTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ *          /compiler/testlibrary
  * @build MultiplyExactLongTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
@@ -34,7 +35,7 @@
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
  *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics MultiplyExactLongTest
- * @run main Verifier hs_neg.log hs.log
+ * @run main intrinsics.Verifier hs_neg.log hs.log
  */
 
 public class MultiplyExactLongTest {
--- a/test/compiler/intrinsics/mathexact/sanity/NegateExactIntTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/intrinsics/mathexact/sanity/NegateExactIntTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ *          /compiler/testlibrary
  * @build NegateExactIntTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
@@ -34,7 +35,7 @@
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
  *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics NegateExactIntTest
- * @run main Verifier hs_neg.log hs.log
+ * @run main intrinsics.Verifier hs_neg.log hs.log
  */
 
 public class NegateExactIntTest {
--- a/test/compiler/intrinsics/mathexact/sanity/NegateExactLongTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/intrinsics/mathexact/sanity/NegateExactLongTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ *          /compiler/testlibrary
  * @build NegateExactLongTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
@@ -34,7 +35,7 @@
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
  *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics NegateExactLongTest
- * @run main Verifier hs_neg.log hs.log
+ * @run main intrinsics.Verifier hs_neg.log hs.log
  */
 
 public class NegateExactLongTest {
--- a/test/compiler/intrinsics/mathexact/sanity/SubtractExactIntTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/intrinsics/mathexact/sanity/SubtractExactIntTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ *          /compiler/testlibrary
  * @build SubtractExactIntTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
@@ -34,7 +35,7 @@
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
  *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics SubtractExactIntTest
- * @run main Verifier hs_neg.log hs.log
+ * @run main intrinsics.Verifier hs_neg.log hs.log
 
  */
 
--- a/test/compiler/intrinsics/mathexact/sanity/SubtractExactLongTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/intrinsics/mathexact/sanity/SubtractExactLongTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test
  * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ *          /compiler/testlibrary
  * @build SubtractExactLongTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
@@ -34,7 +35,7 @@
  *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
  *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
  *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics SubtractExactLongTest
- * @run main Verifier hs_neg.log hs.log
+ * @run main intrinsics.Verifier hs_neg.log hs.log
  */
 
 public class SubtractExactLongTest {
--- a/test/compiler/intrinsics/mathexact/sanity/Verifier.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-import java.io.BufferedReader;
-import java.io.FileReader;
-import java.util.Properties;
-
-public class Verifier {
-
-    public static void main(String[] args) throws Exception {
-        if (args.length == 0)
-            throw new RuntimeException("Test bug, nothing to verify");
-        for (String hsLogFile : args) {
-            verify(hsLogFile);
-        }
-    }
-
-    private static void verify(String hsLogFile) throws Exception {
-        System.out.println("Verifying " + hsLogFile);
-
-        final Properties expectedProperties = new Properties();
-        final FileReader reader = new FileReader(hsLogFile + ".verify.properties");
-        expectedProperties.load(reader);
-        reader.close();
-
-        int fullMatchCnt = 0;
-        int suspectCnt = 0;
-        final String intrinsicId = expectedProperties.getProperty("intrinsic.name");
-        final String prefix = "<intrinsic id='";
-        final String prefixWithId = prefix + intrinsicId + "'";
-        final int expectedCount = Integer.parseInt(expectedProperties.getProperty("intrinsic.expectedCount"));
-
-        BufferedReader r = new BufferedReader(new FileReader(hsLogFile));
-        String s;
-        while ((s = r.readLine()) != null) {
-            if (s.startsWith(prefix)) {
-                if (s.startsWith(prefixWithId)) {
-                    fullMatchCnt++;
-                } else {
-                    suspectCnt++;
-                    System.out.println("WARNING: Other intrinsic detected " + s);
-                }
-            }
-        }
-        r.close();
-
-        System.out.println("Intrinsic " + intrinsicId + " verification, expected: " + expectedCount + ", matched: " + fullMatchCnt + ", suspected: " + suspectCnt);
-        if (expectedCount != fullMatchCnt)
-            throw new RuntimeException("Unexpected count of intrinsic  " + prefixWithId + " expected:" + expectedCount + ", matched: " + fullMatchCnt + ", suspected: " + suspectCnt);
-    }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/multiplytolen/TestMultiplyToLen.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8055494
+ * @summary Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
+ *
+ * @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch
+ *      -XX:CompileCommand=exclude,TestMultiplyToLen::main
+ *      -XX:CompileCommand=option,TestMultiplyToLen::base_multiply,ccstr,DisableIntrinsic,_multiplyToLen
+ *      -XX:CompileCommand=option,java.math.BigInteger::multiply,ccstr,DisableIntrinsic,_multiplyToLen
+ *      -XX:CompileCommand=inline,java.math.BigInteger::multiply TestMultiplyToLen
+ */
+
+import java.util.Random;
+import java.math.*;
+
+public class TestMultiplyToLen {
+
+    // Avoid intrinsic by preventing inlining multiply() and multiplyToLen().
+    public static BigInteger base_multiply(BigInteger op1, BigInteger op2) {
+      return op1.multiply(op2);
+    }
+
+    // Generate multiplyToLen() intrinsic by inlining multiply().
+    public static BigInteger new_multiply(BigInteger op1, BigInteger op2) {
+      return op1.multiply(op2);
+    }
+
+    public static boolean bytecompare(BigInteger b1, BigInteger b2) {
+      byte[] data1 = b1.toByteArray();
+      byte[] data2 = b2.toByteArray();
+      if (data1.length != data2.length)
+        return false;
+      for (int i = 0; i < data1.length; i++) {
+        if (data1[i] != data2[i])
+          return false;
+      }
+      return true;
+    }
+
+    public static String stringify(BigInteger b) {
+      String strout= "";
+      byte [] data = b.toByteArray();
+      for (int i = 0; i < data.length; i++) {
+        strout += (String.format("%02x",data[i]) + " ");
+      }
+      return strout;
+    }
+
+    public static void main(String args[]) throws Exception {
+
+      BigInteger oldsum = new BigInteger("0");
+      BigInteger newsum = new BigInteger("0");
+
+      BigInteger b1, b2, oldres, newres;
+
+      Random rand = new Random();
+      long seed = System.nanoTime();
+      Random rand1 = new Random();
+      long seed1 = System.nanoTime();
+      rand.setSeed(seed);
+      rand1.setSeed(seed1);
+
+      for (int j = 0; j < 1000000; j++) {
+        int rand_int = rand1.nextInt(3136)+32;
+        int rand_int1 = rand1.nextInt(3136)+32;
+        b1 = new BigInteger(rand_int, rand);
+        b2 = new BigInteger(rand_int1, rand);
+
+        oldres = base_multiply(b1,b2);
+        newres = new_multiply(b1,b2);
+
+        oldsum = oldsum.add(oldres);
+        newsum = newsum.add(newres);
+
+        if (!bytecompare(oldres,newres)) {
+          System.out.print("mismatch for:b1:" + stringify(b1) + " :b2:" + stringify(b2) + " :oldres:" + stringify(oldres) + " :newres:" + stringify(newres));
+          System.out.println(b1);
+          System.out.println(b2);
+          throw new Exception("Failed");
+        }
+      }
+      if (!bytecompare(oldsum,newsum))  {
+        System.out.println("Failure: oldsum:" + stringify(oldsum) + " newsum:" + stringify(newsum));
+        throw new Exception("Failed");
+      } else {
+        System.out.println("Success");
+      }
+   }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/TestSHA.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary C2 support for SHA on SPARC
+ *
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-1   TestSHA
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-224 TestSHA
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-256 TestSHA
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-384 TestSHA
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-512 TestSHA
+ *
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-1   -Doffset=1 TestSHA
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-224 -Doffset=1 TestSHA
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-256 -Doffset=1 TestSHA
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-384 -Doffset=1 TestSHA
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-512 -Doffset=1 TestSHA
+ *
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-1   -Dalgorithm2=SHA-256 TestSHA
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-1   -Dalgorithm2=SHA-512 TestSHA
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-256 -Dalgorithm2=SHA-512 TestSHA
+ *
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=SHA-1   -Dalgorithm2=MD5     TestSHA
+ * @run main/othervm/timeout=600 -Xbatch -Dalgorithm=MD5     -Dalgorithm2=SHA-1   TestSHA
+ */
+
+import java.security.MessageDigest;
+import java.util.Arrays;
+
+public class TestSHA {
+    private static final int HASH_LEN = 64; /* up to 512-bit */
+    private static final int ALIGN = 8;     /* for different data alignments */
+
+    public static void main(String[] args) throws Exception {
+        String provider = System.getProperty("provider", "SUN");
+        String algorithm = System.getProperty("algorithm", "SHA-1");
+        String algorithm2 = System.getProperty("algorithm2", "");
+        int msgSize = Integer.getInteger("msgSize", 1024);
+        int offset = Integer.getInteger("offset", 0)  % ALIGN;
+        int iters = (args.length > 0 ? Integer.valueOf(args[0]) : 100000);
+        int warmupIters = (args.length > 1 ? Integer.valueOf(args[1]) : 20000);
+
+        testSHA(provider, algorithm, msgSize, offset, iters, warmupIters);
+
+        if (algorithm2.equals("") == false) {
+            testSHA(provider, algorithm2, msgSize, offset, iters, warmupIters);
+        }
+    }
+
+    static void testSHA(String provider, String algorithm, int msgSize,
+                        int offset, int iters, int warmupIters) throws Exception {
+        System.out.println("provider = " + provider);
+        System.out.println("algorithm = " + algorithm);
+        System.out.println("msgSize = " + msgSize + " bytes");
+        System.out.println("offset = " + offset);
+        System.out.println("iters = " + iters);
+
+        byte[] expectedHash = new byte[HASH_LEN];
+        byte[] hash = new byte[HASH_LEN];
+        byte[] data = new byte[msgSize + offset];
+        for (int i = 0; i < (msgSize + offset); i++) {
+            data[i] = (byte)(i & 0xff);
+        }
+
+        try {
+            MessageDigest sha = MessageDigest.getInstance(algorithm, provider);
+
+            /* do once, which doesn't use intrinsics */
+            sha.reset();
+            sha.update(data, offset, msgSize);
+            expectedHash = sha.digest();
+
+            /* warm up */
+            for (int i = 0; i < warmupIters; i++) {
+                sha.reset();
+                sha.update(data, offset, msgSize);
+                hash = sha.digest();
+            }
+
+            /* check result */
+            if (Arrays.equals(hash, expectedHash) == false) {
+                System.out.println("TestSHA Error: ");
+                showArray(expectedHash, "expectedHash");
+                showArray(hash,         "computedHash");
+                //System.exit(1);
+                throw new Exception("TestSHA Error");
+            } else {
+                showArray(hash, "hash");
+            }
+
+            /* measure performance */
+            long start = System.nanoTime();
+            for (int i = 0; i < iters; i++) {
+                sha.reset();
+                sha.update(data, offset, msgSize);
+                hash = sha.digest();
+            }
+            long end = System.nanoTime();
+            double total = (double)(end - start)/1e9;         /* in seconds */
+            double thruput = (double)msgSize*iters/1e6/total; /* in MB/s */
+            System.out.println("TestSHA runtime = " + total + " seconds");
+            System.out.println("TestSHA throughput = " + thruput + " MB/s");
+            System.out.println();
+        } catch (Exception e) {
+            System.out.println("Exception: " + e);
+            //System.exit(1);
+            throw new Exception(e);
+        }
+    }
+
+    static void showArray(byte b[], String name) {
+        System.out.format("%s [%d]: ", name, b.length);
+        for (int i = 0; i < Math.min(b.length, HASH_LEN); i++) {
+            System.out.format("%02x ", b[i] & 0xff);
+        }
+        System.out.println();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/SHAOptionsBase.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.cli.CommandLineOptionTest;
+import sha.predicate.IntrinsicPredicates;
+
+import java.util.function.BooleanSupplier;
+
+/**
+ * Base class for all CLI tests on SHA-related options.
+ *
+ * Instead of using huge complex tests for each option, each test is constructed
+ * from several test cases shared among different tests.
+ */
+public class SHAOptionsBase extends CommandLineOptionTest {
+    protected static final String USE_SHA_OPTION = "UseSHA";
+    protected static final String USE_SHA1_INTRINSICS_OPTION
+            = "UseSHA1Intrinsics";
+    protected static final String USE_SHA256_INTRINSICS_OPTION
+            = "UseSHA256Intrinsics";
+    protected static final String USE_SHA512_INTRINSICS_OPTION
+            = "UseSHA512Intrinsics";
+
+    // Note that strings below will be passed to
+    // CommandLineOptionTest.verifySameJVMStartup and thus are regular
+    // expressions, not just a plain strings.
+    protected static final String SHA_INSTRUCTIONS_ARE_NOT_AVAILABLE
+            = "SHA instructions are not available on this CPU";
+    protected static final String SHA1_INSTRUCTION_IS_NOT_AVAILABLE
+            = "SHA1 instruction is not available on this CPU\\.";
+    protected static final String SHA256_INSTRUCTION_IS_NOT_AVAILABLE
+            = "SHA256 instruction \\(for SHA-224 and SHA-256\\) "
+            + "is not available on this CPU\\.";
+    protected static final String SHA512_INSTRUCTION_IS_NOT_AVAILABLE
+            = "SHA512 instruction \\(for SHA-384 and SHA-512\\) "
+            + "is not available on this CPU\\.";
+    protected static final String SHA_INTRINSICS_ARE_NOT_AVAILABLE
+            = "SHA intrinsics are not available on this CPU";
+
+    private final TestCase[] testCases;
+
+    /**
+     * Returns warning message that should occur in VM output if an option with
+     * the name {@code optionName} was turned on and CPU does not support
+     * required instructions.
+     *
+     * @param optionName The name of the option for which warning message should
+     *                   be returned.
+     * @return A warning message that will be printed out to VM output if CPU
+     *         instructions required by the option are not supported.
+     */
+    protected static String getWarningForUnsupportedCPU(String optionName) {
+        if (Platform.isSparc()) {
+            switch (optionName) {
+                case SHAOptionsBase.USE_SHA_OPTION:
+                    return SHAOptionsBase.SHA_INSTRUCTIONS_ARE_NOT_AVAILABLE;
+                case SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION:
+                    return SHAOptionsBase.SHA1_INSTRUCTION_IS_NOT_AVAILABLE;
+                case SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION:
+                    return SHAOptionsBase.SHA256_INSTRUCTION_IS_NOT_AVAILABLE;
+                case SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION:
+                    return SHAOptionsBase.SHA512_INSTRUCTION_IS_NOT_AVAILABLE;
+                default:
+                    throw new Error("Unexpected option " + optionName);
+            }
+        } else if (Platform.isX64() || Platform.isX86()) {
+            switch (optionName) {
+                case SHAOptionsBase.USE_SHA_OPTION:
+                    return SHAOptionsBase.SHA_INSTRUCTIONS_ARE_NOT_AVAILABLE;
+                case SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION:
+                case SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION:
+                case SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION:
+                    return SHAOptionsBase.SHA_INTRINSICS_ARE_NOT_AVAILABLE;
+                default:
+                    throw new Error("Unexpected option " + optionName);
+            }
+        } else {
+            throw new Error("Support for CPUs other then X86 or SPARC is not "
+                    + "implemented.");
+        }
+    }
+
+    /**
+     * Returns the predicate indicating whether or not CPU instructions required
+     * by the option with name {@code optionName} are available.
+     *
+     * @param optionName The name of the option for which a predicate should be
+     *                   returned.
+     * @return The predicate on availability of CPU instructions required by the
+     *         option.
+     */
+    protected static BooleanSupplier getPredicateForOption(String optionName) {
+        switch (optionName) {
+            case SHAOptionsBase.USE_SHA_OPTION:
+                return IntrinsicPredicates.ANY_SHA_INSTRUCTION_AVAILABLE;
+            case SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION:
+                return IntrinsicPredicates.SHA1_INSTRUCTION_AVAILABLE;
+            case SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION:
+                return IntrinsicPredicates.SHA256_INSTRUCTION_AVAILABLE;
+            case SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION:
+                return IntrinsicPredicates.SHA512_INSTRUCTION_AVAILABLE;
+            default:
+                throw new Error("Unexpected option " + optionName);
+        }
+    }
+
+    public SHAOptionsBase(TestCase... testCases) {
+        super(Boolean.TRUE::booleanValue);
+        this.testCases = testCases;
+    }
+
+    @Override
+    protected void runTestCases() throws Throwable {
+        for (TestCase testCase : testCases) {
+            testCase.test();
+        }
+    }
+
+    public static abstract class TestCase {
+        protected final String optionName;
+        private final BooleanSupplier predicate;
+
+        protected TestCase(String optionName, BooleanSupplier predicate) {
+            this.optionName = optionName;
+            this.predicate = predicate;
+        }
+
+        protected final void test() throws Throwable {
+            String testCaseName = this.getClass().getName();
+            if (!predicate.getAsBoolean()) {
+                System.out.println("Skipping " + testCaseName
+                        + " due to predicate failure.");
+                return;
+            } else {
+                System.out.println("Running " + testCaseName);
+            }
+
+            verifyWarnings();
+            verifyOptionValues();
+        }
+
+        protected void verifyWarnings() throws Throwable {
+        }
+
+        protected void verifyOptionValues() throws Throwable {
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/TestUseSHA1IntrinsicsOptionOnSupportedCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify UseSHA1Intrinsics option processing on supported CPU,
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases
+ * @build TestUseSHA1IntrinsicsOptionOnSupportedCPU
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestUseSHA1IntrinsicsOptionOnSupportedCPU
+ */
+public class TestUseSHA1IntrinsicsOptionOnSupportedCPU {
+    public static void main(String args[]) throws Throwable {
+        new SHAOptionsBase(new GenericTestCaseForSupportedSparcCPU(
+                SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION)).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/TestUseSHA1IntrinsicsOptionOnUnsupportedCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify UseSHA1Intrinsics option processing on unsupported CPU,
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases
+ * @build TestUseSHA1IntrinsicsOptionOnUnsupportedCPU
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI
+ *                   TestUseSHA1IntrinsicsOptionOnUnsupportedCPU
+ */
+public class TestUseSHA1IntrinsicsOptionOnUnsupportedCPU {
+    public static void main(String args[]) throws Throwable {
+        new SHAOptionsBase(
+                new GenericTestCaseForUnsupportedSparcCPU(
+                        SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION),
+                new UseSHAIntrinsicsSpecificTestCaseForUnsupportedSparcCPU(
+                        SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION),
+                new GenericTestCaseForUnsupportedX86CPU(
+                        SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION),
+                new GenericTestCaseForOtherCPU(
+                        SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION)).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/TestUseSHA256IntrinsicsOptionOnSupportedCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify UseSHA256Intrinsics option processing on supported CPU,
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases
+ * @build TestUseSHA256IntrinsicsOptionOnSupportedCPU
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI
+ *                   TestUseSHA256IntrinsicsOptionOnSupportedCPU
+ */
+public class TestUseSHA256IntrinsicsOptionOnSupportedCPU {
+    public static void main(String args[]) throws Throwable {
+        new SHAOptionsBase(new GenericTestCaseForSupportedSparcCPU(
+                SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION)).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/TestUseSHA256IntrinsicsOptionOnUnsupportedCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify UseSHA256Intrinsics option processing on unsupported CPU,
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases
+ * @build TestUseSHA256IntrinsicsOptionOnUnsupportedCPU
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI
+ *                   TestUseSHA256IntrinsicsOptionOnUnsupportedCPU
+ */
+public class TestUseSHA256IntrinsicsOptionOnUnsupportedCPU {
+    public static void main(String args[]) throws Throwable {
+        new SHAOptionsBase(
+                new GenericTestCaseForUnsupportedSparcCPU(
+                        SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION),
+                new UseSHAIntrinsicsSpecificTestCaseForUnsupportedSparcCPU(
+                        SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION),
+                new GenericTestCaseForUnsupportedX86CPU(
+                        SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION),
+                new GenericTestCaseForOtherCPU(
+                        SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION)).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/TestUseSHA512IntrinsicsOptionOnSupportedCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify UseSHA512Intrinsics option processing on supported CPU.
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases
+ * @build TestUseSHA512IntrinsicsOptionOnSupportedCPU
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI
+ *                   TestUseSHA512IntrinsicsOptionOnSupportedCPU
+ */
+public class TestUseSHA512IntrinsicsOptionOnSupportedCPU {
+    public static void main(String args[]) throws Throwable {
+        new SHAOptionsBase(new GenericTestCaseForSupportedSparcCPU(
+                SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION)).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/TestUseSHA512IntrinsicsOptionOnUnsupportedCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify UseSHA512Intrinsics option processing on unsupported CPU,
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases
+ * @build TestUseSHA512IntrinsicsOptionOnUnsupportedCPU
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI
+ *                   TestUseSHA512IntrinsicsOptionOnUnsupportedCPU
+ */
+public class TestUseSHA512IntrinsicsOptionOnUnsupportedCPU {
+    public static void main(String args[]) throws Throwable {
+        new SHAOptionsBase(
+                new GenericTestCaseForUnsupportedSparcCPU(
+                        SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION),
+                new UseSHAIntrinsicsSpecificTestCaseForUnsupportedSparcCPU(
+                        SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION),
+                new GenericTestCaseForUnsupportedX86CPU(
+                        SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION),
+                new GenericTestCaseForOtherCPU(
+                        SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION)).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/TestUseSHAOptionOnSupportedCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify UseSHA option processing on supported CPU,
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases
+ * @build TestUseSHAOptionOnSupportedCPU
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestUseSHAOptionOnSupportedCPU
+ */
+public class TestUseSHAOptionOnSupportedCPU {
+    public static void main(String args[]) throws Throwable {
+        new SHAOptionsBase(
+                new GenericTestCaseForSupportedSparcCPU(
+                        SHAOptionsBase.USE_SHA_OPTION),
+                new UseSHASpecificTestCaseForSupportedSparcCPU(
+                        SHAOptionsBase.USE_SHA_OPTION)).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/TestUseSHAOptionOnUnsupportedCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify UseSHA option processing on unsupported CPU.
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary testcases
+ * @build TestUseSHAOptionOnUnsupportedCPU
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI TestUseSHAOptionOnUnsupportedCPU
+ */
+public class TestUseSHAOptionOnUnsupportedCPU {
+    public static void main(String args[]) throws Throwable {
+        new SHAOptionsBase(
+                new GenericTestCaseForUnsupportedSparcCPU(
+                        SHAOptionsBase.USE_SHA_OPTION),
+                new UseSHASpecificTestCaseForUnsupportedSparcCPU(
+                        SHAOptionsBase.USE_SHA_OPTION),
+                new GenericTestCaseForUnsupportedX86CPU(
+                        SHAOptionsBase.USE_SHA_OPTION),
+                new GenericTestCaseForOtherCPU(
+                        SHAOptionsBase.USE_SHA_OPTION)).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForOtherCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.ExitCode;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.cli.CommandLineOptionTest;
+import com.oracle.java.testlibrary.cli.predicate.NotPredicate;
+import com.oracle.java.testlibrary.cli.predicate.OrPredicate;
+
+/**
+ * Generic test case for SHA-related options targeted to non-x86 and
+ * non-SPARC CPUs.
+ */
+public class GenericTestCaseForOtherCPU extends
+        SHAOptionsBase.TestCase {
+    public GenericTestCaseForOtherCPU(String optionName) {
+        // Execute the test case on any CPU except SPARC and X86
+        super(optionName, new NotPredicate(new OrPredicate(Platform::isSparc,
+                new OrPredicate(Platform::isX64, Platform::isX86))));
+    }
+
+    @Override
+    protected void verifyWarnings() throws Throwable {
+        // Verify that on non-x86 and non-SPARC CPU usage of SHA-related
+        // options will not cause any warnings.
+        CommandLineOptionTest.verifySameJVMStartup(null,
+                new String[] { ".*" + optionName + ".*" }, ExitCode.OK,
+                CommandLineOptionTest.prepareBooleanFlag(optionName, true));
+
+        CommandLineOptionTest.verifySameJVMStartup(null,
+                new String[] { ".*" + optionName + ".*" }, ExitCode.OK,
+                CommandLineOptionTest.prepareBooleanFlag(optionName, false));
+    }
+
+    @Override
+    protected void verifyOptionValues() throws Throwable {
+        // Verify that option is disabled by default.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false");
+
+        // Verify that option is disabled even if it was explicitly enabled
+        // using CLI options.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
+                CommandLineOptionTest.prepareBooleanFlag(optionName, true));
+
+        // Verify that option is disabled when it explicitly disabled
+        // using CLI options.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
+                CommandLineOptionTest.prepareBooleanFlag(optionName, false));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForSupportedSparcCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.ExitCode;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.cli.CommandLineOptionTest;
+import com.oracle.java.testlibrary.cli.predicate.AndPredicate;
+
+/**
+ * Generic test case for SHA-related options targeted to SPARC CPUs which
+ * support instructions required by the tested option.
+ */
+public class GenericTestCaseForSupportedSparcCPU extends
+        SHAOptionsBase.TestCase {
+    public GenericTestCaseForSupportedSparcCPU(String optionName) {
+        super(optionName, new AndPredicate(Platform::isSparc,
+                SHAOptionsBase.getPredicateForOption(optionName)));
+    }
+
+    @Override
+    protected void verifyWarnings() throws Throwable {
+        // Verify that there are no warning when option is explicitly enabled.
+        CommandLineOptionTest.verifySameJVMStartup(null, new String[] {
+                        SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
+                }, ExitCode.OK,
+                CommandLineOptionTest.prepareBooleanFlag(optionName, true));
+
+        // Verify that option could be disabled even if +UseSHA was passed to
+        // JVM.
+        CommandLineOptionTest.verifySameJVMStartup(null, new String[] {
+                        SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
+                }, ExitCode.OK,
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA_OPTION, true),
+                CommandLineOptionTest.prepareBooleanFlag(optionName, false));
+
+        // Verify that it is possible to enable the tested option and disable
+        // all SHA intrinsics via -UseSHA without any warnings.
+        CommandLineOptionTest.verifySameJVMStartup(null, new String[] {
+                        SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
+                }, ExitCode.OK,
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA_OPTION, false),
+                CommandLineOptionTest.prepareBooleanFlag(optionName, true));
+    }
+
+    @Override
+    protected void verifyOptionValues() throws Throwable {
+        // Verify that on supported CPU option is enabled by default.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "true");
+
+        // Verify that it is possible to explicitly enable the option.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "true",
+                CommandLineOptionTest.prepareBooleanFlag(optionName, true));
+
+        // Verify that it is possible to explicitly disable the option.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
+                CommandLineOptionTest.prepareBooleanFlag(optionName, false));
+
+        // verify that option is disabled when -UseSHA was passed to JVM.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
+                CommandLineOptionTest.prepareBooleanFlag(optionName, true),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA_OPTION, false));
+
+        // Verify that it is possible to explicitly disable the tested option
+        // even if +UseSHA was passed to JVM.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA_OPTION, true),
+                CommandLineOptionTest.prepareBooleanFlag(optionName, false));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedSparcCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.ExitCode;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.cli.CommandLineOptionTest;
+import com.oracle.java.testlibrary.cli.predicate.AndPredicate;
+import com.oracle.java.testlibrary.cli.predicate.NotPredicate;
+
+/**
+ * Generic test case for SHA-related options targeted to SPARC CPUs which don't
+ * support instruction required by the tested option.
+ */
+public class GenericTestCaseForUnsupportedSparcCPU extends
+        SHAOptionsBase.TestCase {
+    public GenericTestCaseForUnsupportedSparcCPU(String optionName) {
+        super(optionName, new AndPredicate(Platform::isSparc,
+                new NotPredicate(SHAOptionsBase.getPredicateForOption(
+                        optionName))));
+    }
+
+    @Override
+    protected void verifyWarnings() throws Throwable {
+        //Verify that option could be disabled without any warnings.
+        CommandLineOptionTest.verifySameJVMStartup(null, new String[] {
+                        SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
+                }, ExitCode.OK,
+                CommandLineOptionTest.prepareBooleanFlag(optionName, false));
+    }
+
+    @Override
+    protected void verifyOptionValues() throws Throwable {
+        // Verify that option is disabled by default.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false");
+
+        // Verify that option is disabled even if it was explicitly enabled
+        // using CLI options.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
+                CommandLineOptionTest.prepareBooleanFlag(optionName, true));
+
+        // Verify that option is disabled when +UseSHA was passed to JVM.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA_OPTION, true));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/testcases/GenericTestCaseForUnsupportedX86CPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.ExitCode;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.cli.CommandLineOptionTest;
+import com.oracle.java.testlibrary.cli.predicate.OrPredicate;
+
+/**
+ * Generic test case for SHA-related options targeted to X86 CPUs that don't
+ * support SHA-related instructions.
+ */
+public class GenericTestCaseForUnsupportedX86CPU
+        extends SHAOptionsBase.TestCase {
+    public GenericTestCaseForUnsupportedX86CPU(String optionName) {
+        super(optionName, new OrPredicate(Platform::isX64, Platform::isX86));
+    }
+
+    @Override
+    protected void verifyWarnings() throws Throwable {
+        // Verify that when the tested option is explicitly enabled, then
+        // a warning will occur in VM output.
+        CommandLineOptionTest.verifySameJVMStartup(new String[] {
+                        SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
+                }, null, ExitCode.OK,
+                CommandLineOptionTest.prepareBooleanFlag(optionName, true));
+
+        // Verify that the tested option could be explicitly disabled without
+        // a warning.
+        CommandLineOptionTest.verifySameJVMStartup(null, new String[] {
+                        SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
+                }, ExitCode.OK,
+                CommandLineOptionTest.prepareBooleanFlag(optionName, false));
+    }
+
+    @Override
+    protected void verifyOptionValues() throws Throwable {
+        // Verify that the tested option is disabled by default.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false");
+
+        // Verify that it is not possible to explicitly enable the option.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
+                CommandLineOptionTest.prepareBooleanFlag(optionName, true));
+
+        // Verify that the tested option is disabled even if +UseSHA was passed
+        // to JVM.
+        CommandLineOptionTest.verifyOptionValueForSameVM(optionName, "false",
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA_OPTION, true));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/testcases/UseSHAIntrinsicsSpecificTestCaseForUnsupportedSparcCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.ExitCode;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.cli.CommandLineOptionTest;
+import com.oracle.java.testlibrary.cli.predicate.AndPredicate;
+import com.oracle.java.testlibrary.cli.predicate.NotPredicate;
+import sha.predicate.IntrinsicPredicates;
+
+/**
+ * Test case specific to UseSHA*Intrinsics options targeted to SPARC CPUs which
+ * don't support required instruction, but support other SHA-related
+ * instructions.
+ *
+ * For example, CPU support sha1 instruction, but don't support sha256 or
+ * sha512.
+ */
+public class UseSHAIntrinsicsSpecificTestCaseForUnsupportedSparcCPU
+        extends SHAOptionsBase.TestCase {
+    public UseSHAIntrinsicsSpecificTestCaseForUnsupportedSparcCPU(
+            String optionName) {
+        // execute test case on SPARC CPU that support any sha* instructions,
+        // but does not support sha* instruction required by the tested option.
+        super(optionName, new AndPredicate(Platform::isSparc,
+                new AndPredicate(
+                        IntrinsicPredicates.ANY_SHA_INSTRUCTION_AVAILABLE,
+                        new NotPredicate(SHAOptionsBase.getPredicateForOption(
+                                optionName)))));
+    }
+    @Override
+    protected void verifyWarnings() throws Throwable {
+        // Verify that attempt to enable the tested option will cause a warning.
+        CommandLineOptionTest.verifySameJVMStartup(new String[] {
+                        SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
+                }, null, ExitCode.OK,
+                CommandLineOptionTest.prepareBooleanFlag(optionName, true));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/testcases/UseSHASpecificTestCaseForSupportedSparcCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.Asserts;
+import com.oracle.java.testlibrary.ExitCode;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.cli.CommandLineOptionTest;
+import com.oracle.java.testlibrary.cli.predicate.AndPredicate;
+import sha.predicate.IntrinsicPredicates;
+
+/**
+ * UseSHA specific test case targeted to SPARC CPUs which support any sha*
+ * instruction.
+ */
+public class UseSHASpecificTestCaseForSupportedSparcCPU
+        extends SHAOptionsBase.TestCase {
+    public UseSHASpecificTestCaseForSupportedSparcCPU(String optionName) {
+        super(SHAOptionsBase.USE_SHA_OPTION, new AndPredicate(Platform::isSparc,
+                IntrinsicPredicates.ANY_SHA_INSTRUCTION_AVAILABLE));
+
+        Asserts.assertEQ(optionName, SHAOptionsBase.USE_SHA_OPTION,
+                "Test case should be used for " + SHAOptionsBase.USE_SHA_OPTION
+                        + " option only.");
+    }
+
+    @Override
+    protected void verifyWarnings() throws Throwable {
+        // Verify that there will be no warnings when +UseSHA was passed and
+        // all UseSHA*Intrinsics options were disabled.
+        CommandLineOptionTest.verifySameJVMStartup(
+                null, new String[] { ".*UseSHA.*" }, ExitCode.OK,
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA_OPTION, true),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, false),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION, false),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION, false));
+    }
+
+    @Override
+    protected void verifyOptionValues() throws Throwable {
+        // Verify that UseSHA is disabled when all UseSHA*Intrinscs are
+        // disabled.
+        CommandLineOptionTest.verifyOptionValueForSameVM(
+                SHAOptionsBase.USE_SHA_OPTION, "false",
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, false),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION, false),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION, false));
+
+        CommandLineOptionTest.verifyOptionValueForSameVM(
+                // Verify that UseSHA is disabled when all UseSHA*Intrinscs are
+                // disabled even if it was explicitly enabled.
+                SHAOptionsBase.USE_SHA_OPTION, "false",
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA_OPTION, true),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, false),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION, false),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION, false));
+
+        // Verify that explicitly disabled UseSHA option remains disabled even
+        // if all UseSHA*Intrinsics options were enabled.
+        CommandLineOptionTest.verifyOptionValueForSameVM(
+                SHAOptionsBase.USE_SHA_OPTION, "false",
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA_OPTION, false),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, true),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION, true),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION, true));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/cli/testcases/UseSHASpecificTestCaseForUnsupportedSparcCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.Asserts;
+import com.oracle.java.testlibrary.ExitCode;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.cli.CommandLineOptionTest;
+import com.oracle.java.testlibrary.cli.predicate.AndPredicate;
+import com.oracle.java.testlibrary.cli.predicate.NotPredicate;
+import sha.predicate.IntrinsicPredicates;
+
+/**
+ * UseSHA specific test case targeted to SPARC CPUs which don't support all sha*
+ * instructions.
+ */
+public class UseSHASpecificTestCaseForUnsupportedSparcCPU
+        extends SHAOptionsBase.TestCase {
+    public UseSHASpecificTestCaseForUnsupportedSparcCPU(String optionName) {
+        super(SHAOptionsBase.USE_SHA_OPTION, new AndPredicate(Platform::isSparc,
+                new NotPredicate(
+                        IntrinsicPredicates.ANY_SHA_INSTRUCTION_AVAILABLE)));
+
+        Asserts.assertEQ(optionName, SHAOptionsBase.USE_SHA_OPTION,
+                "Test case should be used for " + SHAOptionsBase.USE_SHA_OPTION
+                        + " option only.");
+    }
+
+    @Override
+    protected void verifyWarnings() throws Throwable {
+        // Verify that attempt to use UseSHA option will cause a warning.
+        CommandLineOptionTest.verifySameJVMStartup(new String[] {
+                        SHAOptionsBase.getWarningForUnsupportedCPU(optionName)
+                }, null, ExitCode.OK,
+                CommandLineOptionTest.prepareBooleanFlag(optionName, true));
+    }
+
+    @Override
+    protected void verifyOptionValues() throws Throwable {
+        // Verify that UseSHA option remains disabled even if all
+        // UseSHA*Intrincs options were enabled.
+        CommandLineOptionTest.verifyOptionValueForSameVM(
+                SHAOptionsBase.USE_SHA_OPTION, "false",
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, true),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION, true),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION, true));
+
+        // Verify that UseSHA option remains disabled even if all
+        // UseSHA*Intrincs options were enabled and UseSHA was enabled as well.
+        CommandLineOptionTest.verifyOptionValueForSameVM(
+                SHAOptionsBase.USE_SHA_OPTION, "false",
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA_OPTION, true),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA1_INTRINSICS_OPTION, true),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA256_INTRINSICS_OPTION, true),
+                CommandLineOptionTest.prepareBooleanFlag(
+                        SHAOptionsBase.USE_SHA512_INTRINSICS_OPTION, true));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/sanity/SHASanityTestBase.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import intrinsics.Verifier;
+import sun.hotspot.WhiteBox;
+
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.function.BooleanSupplier;
+
+/**
+ * Base class for sanity tests on SHA intrinsics support.
+ */
+public class SHASanityTestBase {
+    protected static final String SHA1_INTRINSIC_ID
+            = "_sha_implCompress";
+    protected static final String SHA256_INTRINSIC_ID
+            = "_sha2_implCompress";
+    protected static final String SHA512_INTRINSIC_ID
+            = "_sha5_implCompress";
+    protected static final String MB_INTRINSIC_ID
+            = "_digestBase_implCompressMB";
+
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static final int MSG_SIZE = 1024;
+    private static final int OFFSET = 0;
+    private static final int ITERATIONS = 10000;
+    private static final int WARMUP_ITERATIONS = 1;
+    private static final String PROVIDER = "SUN";
+
+    private final BooleanSupplier predicate;
+    private final String intrinsicID;
+
+    /**
+     * Construct the new test on intrinsic with ID {@code intrinsicID},
+     * which is expected to be emitted if {@code predicate} is evaluated to
+     * {@code true}.
+     *
+     * @param predicate The predicate indicating if the intrinsic is expected to
+     *                  be used.
+     * @param intrinsicID The ID of the intrinsic to be tested.
+     */
+    protected SHASanityTestBase(BooleanSupplier predicate, String intrinsicID) {
+        this.predicate = predicate;
+        this.intrinsicID = intrinsicID;
+    }
+
+    /**
+     * Run the test and dump properties to file.
+     *
+     * @throws Exception when something went wrong.
+     */
+    public final void test() throws Exception {
+        String algorithm = Objects.requireNonNull(
+                System.getProperty("algorithm"),
+                "Algorithm name should be specified.");
+
+        dumpProperties();
+
+        TestSHA.testSHA(SHASanityTestBase.PROVIDER, algorithm,
+                SHASanityTestBase.MSG_SIZE, SHASanityTestBase.OFFSET,
+                SHASanityTestBase.ITERATIONS,
+                SHASanityTestBase.WARMUP_ITERATIONS);
+    }
+
+    /**
+     * Dump properties containing information about the tested intrinsic name
+     * and whether or not is should be used to the file
+     * &lt;LogFile value&gt;.verify.properties.
+     *
+     * @throws IOException when something went wrong during dumping to file.
+     */
+    private void dumpProperties() throws IOException {
+        Properties properties = new Properties();
+        properties.setProperty(Verifier.INTRINSIC_NAME_PROPERTY, intrinsicID);
+        properties.setProperty(Verifier.INTRINSIC_IS_EXPECTED_PROPERTY,
+                String.valueOf(predicate.getAsBoolean()));
+
+        String logFileName
+                = SHASanityTestBase.WHITE_BOX.getStringVMFlag("LogFile");
+        FileOutputStream fileOutputStream = new FileOutputStream(logFileName
+                + Verifier.PROPERTY_FILE_SUFFIX);
+
+        properties.store(fileOutputStream, null);
+        fileOutputStream.close();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/sanity/TestSHA1Intrinsics.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify that SHA-1 intrinsic is actually used.
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary ../
+ * @build TestSHA intrinsics.Verifier TestSHA1Intrinsics
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA1Intrinsics
+ *                   -Dalgorithm=SHA-1 TestSHA1Intrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=negative.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:-UseSHA1Intrinsics
+ *                   -Dalgorithm=SHA-1 TestSHA1Intrinsics
+ * @run main/othervm -DverificationStrategy=VERIFY_INTRINSIC_USAGE
+ *                   intrinsics.Verifier positive.log negative.log
+ */
+import sha.predicate.IntrinsicPredicates;
+
+public class TestSHA1Intrinsics {
+    public static void main(String args[]) throws Exception {
+        new SHASanityTestBase(IntrinsicPredicates.SHA1_INTRINSICS_AVAILABLE,
+                SHASanityTestBase.SHA1_INTRINSIC_ID).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/sanity/TestSHA1MultiBlockIntrinsics.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sha.predicate.IntrinsicPredicates;
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify that SHA-1 multi block intrinsic is actually used.
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary ../
+ * @build TestSHA intrinsics.Verifier TestSHA1MultiBlockIntrinsics
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA1Intrinsics -XX:-UseSHA256Intrinsics
+ *                   -XX:-UseSHA512Intrinsics
+ *                   -Dalgorithm=SHA-1 TestSHA1MultiBlockIntrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive_def.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA1Intrinsics -Dalgorithm=SHA-1
+ *                   TestSHA1MultiBlockIntrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=negative.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA -XX:-UseSHA
+ *                   -Dalgorithm=SHA-1 TestSHA1MultiBlockIntrinsics
+ * @run main/othervm -DverificationStrategy=VERIFY_INTRINSIC_USAGE
+ *                   intrinsics.Verifier positive.log positive_def.log
+ *                   negative.log
+ */
+public class TestSHA1MultiBlockIntrinsics {
+    public static void main(String args[]) throws Exception {
+        new SHASanityTestBase(IntrinsicPredicates.SHA1_INTRINSICS_AVAILABLE,
+                SHASanityTestBase.MB_INTRINSIC_ID).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/sanity/TestSHA256Intrinsics.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sha.predicate.IntrinsicPredicates;
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify that SHA-256 intrinsic is actually used.
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary ../
+ * @build TestSHA intrinsics.Verifier TestSHA256Intrinsics
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive_224.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA256Intrinsics
+ *                   -Dalgorithm=SHA-224 TestSHA256Intrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=negative_224.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:-UseSHA256Intrinsics
+ *                   -Dalgorithm=SHA-224 TestSHA256Intrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive_256.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA256Intrinsics
+ *                   -Dalgorithm=SHA-256 TestSHA256Intrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=negative_256.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:-UseSHA256Intrinsics
+ *                   -Dalgorithm=SHA-256 TestSHA256Intrinsics
+ * @run main/othervm -DverificationStrategy=VERIFY_INTRINSIC_USAGE
+ *                    intrinsics.Verifier positive_224.log positive_256.log
+ *                    negative_224.log negative_256.log
+ */
+public class TestSHA256Intrinsics {
+    public static void main(String args[]) throws Exception {
+        new SHASanityTestBase(IntrinsicPredicates.SHA256_INTRINSICS_AVAILABLE,
+                SHASanityTestBase.SHA256_INTRINSIC_ID).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/sanity/TestSHA256MultiBlockIntrinsics.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sha.predicate.IntrinsicPredicates;
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify that SHA-256 multi block intrinsic is actually used.
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary ../
+ * @build TestSHA intrinsics.Verifier TestSHA256MultiBlockIntrinsics
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive_224.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA256Intrinsics -XX:-UseSHA1Intrinsics
+ *                   -XX:-UseSHA512Intrinsics
+ *                   -Dalgorithm=SHA-224 TestSHA256MultiBlockIntrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive_224_def.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA256Intrinsics -Dalgorithm=SHA-224
+ *                   TestSHA256MultiBlockIntrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=negative_224.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA -XX:-UseSHA
+ *                   -Dalgorithm=SHA-224 TestSHA256MultiBlockIntrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive_256.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA256Intrinsics -XX:-UseSHA1Intrinsics
+ *                   -XX:-UseSHA512Intrinsics
+ *                   -Dalgorithm=SHA-256 TestSHA256MultiBlockIntrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive_256_def.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA256Intrinsics -Dalgorithm=SHA-256
+ *                   TestSHA256MultiBlockIntrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=negative_256.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA -XX:-UseSHA
+ *                   -Dalgorithm=SHA-256 TestSHA256MultiBlockIntrinsics
+ * @run main/othervm -DverificationStrategy=VERIFY_INTRINSIC_USAGE
+ *                   intrinsics.Verifier positive_224.log positive_256.log
+ *                   positive_224_def.log positive_256_def.log negative_224.log
+ *                   negative_256.log
+ */
+public class TestSHA256MultiBlockIntrinsics {
+    public static void main(String args[]) throws Exception {
+        new SHASanityTestBase(IntrinsicPredicates.SHA256_INTRINSICS_AVAILABLE,
+                SHASanityTestBase.MB_INTRINSIC_ID).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/sanity/TestSHA512Intrinsics.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sha.predicate.IntrinsicPredicates;
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify that SHA-512 intrinsic is actually used.
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary ../
+ * @build TestSHA intrinsics.Verifier TestSHA512Intrinsics
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive_384.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA512Intrinsics
+ *                   -Dalgorithm=SHA-384 TestSHA512Intrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=negative_384.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:-UseSHA512Intrinsics
+ *                   -Dalgorithm=SHA-384 TestSHA512Intrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive_512.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA512Intrinsics
+ *                   -Dalgorithm=SHA-512 TestSHA512Intrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=negative_512.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:-UseSHA512Intrinsics
+ *                   -Dalgorithm=SHA-512 TestSHA512Intrinsics
+ * @run main/othervm -DverificationStrategy=VERIFY_INTRINSIC_USAGE
+ *                    intrinsics.Verifier positive_384.log positive_512.log
+ *                    negative_384.log negative_512.log
+ */
+public class TestSHA512Intrinsics {
+    public static void main(String args[]) throws Exception {
+        new SHASanityTestBase(IntrinsicPredicates.SHA512_INTRINSICS_AVAILABLE,
+                SHASanityTestBase.SHA512_INTRINSIC_ID).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/sha/sanity/TestSHA512MultiBlockIntrinsics.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sha.predicate.IntrinsicPredicates;
+
+/**
+ * @test
+ * @bug 8035968
+ * @summary Verify that SHA-512 multi block intrinsic is actually used.
+ * @library /testlibrary /testlibrary/whitebox /compiler/testlibrary ../
+ * @build TestSHA intrinsics.Verifier TestSHA512MultiBlockIntrinsics
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive_384.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA512Intrinsics -XX:-UseSHA1Intrinsics
+ *                   -XX:-UseSHA256Intrinsics
+ *                   -Dalgorithm=SHA-384 TestSHA512MultiBlockIntrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive_384_def.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA512Intrinsics -Dalgorithm=SHA-384
+ *                   TestSHA512MultiBlockIntrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=negative_384.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA -XX:-UseSHA
+ *                   -Dalgorithm=SHA-384 TestSHA1Intrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive_512.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA512Intrinsics -XX:-UseSHA1Intrinsics
+ *                   -XX:-UseSHA256Intrinsics
+ *                   -Dalgorithm=SHA-512 TestSHA512MultiBlockIntrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=positive_512_def.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA
+ *                   -XX:+UseSHA512Intrinsics -Dalgorithm=SHA-512
+ *                   TestSHA512MultiBlockIntrinsics
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -Xbatch -XX:CompileThreshold=500
+ *                   -XX:Tier4InvocationThreshold=500
+ *                   -XX:+LogCompilation -XX:LogFile=negative_512.log
+ *                   -XX:CompileOnly=sun/security/provider/DigestBase
+ *                   -XX:CompileOnly=sun/security/provider/SHA -XX:-UseSHA
+ *                   -Dalgorithm=SHA-512 TestSHA512MultiBlockIntrinsics
+ * @run main/othervm -DverificationStrategy=VERIFY_INTRINSIC_USAGE
+ *                    intrinsics.Verifier positive_384.log positive_512.log
+ *                    positive_384_def.log positive_512_def.log negative_384.log
+ *                    negative_512.log
+ */
+public class TestSHA512MultiBlockIntrinsics {
+    public static void main(String args[]) throws Exception {
+        new SHASanityTestBase(IntrinsicPredicates.SHA512_INTRINSICS_AVAILABLE,
+                SHASanityTestBase.MB_INTRINSIC_ID).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/jsr292/NullConstantReceiver.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8059556
+ * @run main/othervm -Xbatch NullConstantReceiver
+ */
+
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+
+public class NullConstantReceiver {
+    static final MethodHandle target;
+    static {
+        try {
+            target = MethodHandles.lookup().findVirtual(NullConstantReceiver.class, "test", MethodType.methodType(void.class));
+        } catch (ReflectiveOperationException e) {
+            throw new Error(e);
+        }
+    }
+
+    public void test() {}
+
+    static void run() throws Throwable {
+        target.invokeExact((NullConstantReceiver) null);
+    }
+
+    public static void main(String[] args) throws Throwable {
+        for (int i = 0; i<15000; i++) {
+            try {
+                run();
+            } catch (NullPointerException e) {
+                // expected
+                continue;
+            }
+            throw new AssertionError("NPE wasn't thrown");
+        }
+        System.out.println("TEST PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/jsr292/RedefineMethodUsedByMultipleMethodHandles.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8042235
+ * @summary redefining method used by multiple MethodHandles crashes VM
+ * @compile -XDignore.symbol.file RedefineMethodUsedByMultipleMethodHandles.java
+ * @run main RedefineMethodUsedByMultipleMethodHandles
+ */
+
+import java.io.*;
+import java.lang.instrument.*;
+import java.lang.invoke.*;
+import java.lang.invoke.MethodHandles.Lookup;
+import java.lang.management.*;
+import java.lang.reflect.*;
+import java.nio.file.*;
+import java.security.*;
+import java.util.jar.*;
+
+import javax.tools.*;
+
+import jdk.internal.org.objectweb.asm.*;
+
+public class RedefineMethodUsedByMultipleMethodHandles {
+
+    static class Foo {
+        public static Object getName() {
+            return "foo";
+        }
+    }
+
+    public static void main(String[] args) throws Throwable {
+
+        Lookup lookup = MethodHandles.lookup();
+        Method fooMethod = Foo.class.getDeclaredMethod("getName");
+
+        // fooMH2 displaces fooMH1 from the MemberNamesTable
+        MethodHandle fooMH1 = lookup.unreflect(fooMethod);
+        MethodHandle fooMH2 = lookup.unreflect(fooMethod);
+
+        System.out.println("fooMH1.invoke = " + fooMH1.invokeExact());
+        System.out.println("fooMH2.invoke = " + fooMH2.invokeExact());
+
+        // Redefining Foo.getName() causes vmtarget to be updated
+        // in fooMH2 but not fooMH1
+        redefineFoo();
+
+        // Full GC causes fooMH1.vmtarget to be deallocated
+        System.gc();
+
+        // Calling fooMH1.vmtarget crashes the VM
+        System.out.println("fooMH1.invoke = " + fooMH1.invokeExact());
+    }
+
+    /**
+     * Adds the class file bytes for {@code c} to {@code jar}.
+     */
+    static void add(JarOutputStream jar, Class<?> c) throws IOException {
+        String classAsPath = c.getName().replace('.', '/') + ".class";
+        jar.putNextEntry(new JarEntry(classAsPath));
+        InputStream stream = c.getClassLoader().getResourceAsStream(classAsPath);
+
+        int b;
+        while ((b = stream.read()) != -1) {
+            jar.write(b);
+        }
+    }
+
+    static void redefineFoo() throws Exception {
+        Manifest manifest = new Manifest();
+        manifest.getMainAttributes().put(Attributes.Name.MANIFEST_VERSION, "1.0");
+        Attributes mainAttrs = manifest.getMainAttributes();
+        mainAttrs.putValue("Agent-Class", FooAgent.class.getName());
+        mainAttrs.putValue("Can-Redefine-Classes", "true");
+        mainAttrs.putValue("Can-Retransform-Classes", "true");
+
+        Path jar = Files.createTempFile("myagent", ".jar");
+        try {
+            JarOutputStream jarStream = new JarOutputStream(new FileOutputStream(jar.toFile()), manifest);
+            add(jarStream, FooAgent.class);
+            add(jarStream, FooTransformer.class);
+            jarStream.close();
+            runAgent(jar);
+        } finally {
+            Files.deleteIfExists(jar);
+        }
+    }
+
+    public static void runAgent(Path agent) throws Exception {
+        String vmName = ManagementFactory.getRuntimeMXBean().getName();
+        int p = vmName.indexOf('@');
+        assert p != -1 : "VM name not in <pid>@<host> format: " + vmName;
+        String pid = vmName.substring(0, p);
+        ClassLoader cl = ToolProvider.getSystemToolClassLoader();
+        Class<?> c = Class.forName("com.sun.tools.attach.VirtualMachine", true, cl);
+        Method attach = c.getDeclaredMethod("attach", String.class);
+        Method loadAgent = c.getDeclaredMethod("loadAgent", String.class);
+        Method detach = c.getDeclaredMethod("detach");
+        Object vm = attach.invoke(null, pid);
+        loadAgent.invoke(vm, agent.toString());
+        detach.invoke(vm);
+    }
+
+    public static class FooAgent {
+
+        public static void agentmain(@SuppressWarnings("unused") String args, Instrumentation inst) throws Exception {
+            assert inst.isRedefineClassesSupported();
+            assert inst.isRetransformClassesSupported();
+            inst.addTransformer(new FooTransformer(), true);
+            Class<?>[] classes = inst.getAllLoadedClasses();
+            for (int i = 0; i < classes.length; i++) {
+                Class<?> c = classes[i];
+                if (c == Foo.class) {
+                    inst.retransformClasses(new Class[]{c});
+                }
+            }
+        }
+    }
+
+    static class FooTransformer implements ClassFileTransformer {
+
+        @Override
+        public byte[] transform(ClassLoader cl, String className, Class<?> classBeingRedefined, ProtectionDomain protectionDomain, byte[] classfileBuffer) throws IllegalClassFormatException {
+            if (Foo.class.equals(classBeingRedefined)) {
+                System.out.println("redefining " + classBeingRedefined);
+                ClassReader cr = new ClassReader(classfileBuffer);
+                ClassWriter cw = new ClassWriter(cr, ClassWriter.COMPUTE_FRAMES);
+                ClassVisitor adapter = new ClassVisitor(Opcodes.ASM5, cw) {
+                    @Override
+                    public MethodVisitor visitMethod(int access, String base, String desc, String signature, String[] exceptions) {
+                        MethodVisitor mv = cv.visitMethod(access, base, desc, signature, exceptions);
+                        if (mv != null) {
+                            mv = new MethodVisitor(Opcodes.ASM5, mv) {
+                                @Override
+                                public void visitLdcInsn(Object cst) {
+                                    System.out.println("replacing \"" + cst + "\" with \"bar\"");
+                                    mv.visitLdcInsn("bar");
+                                }
+                            };
+                        }
+                        return mv;
+                    }
+                };
+
+                cr.accept(adapter, ClassReader.SKIP_FRAMES);
+                cw.visitEnd();
+                return cw.toByteArray();
+            }
+            return classfileBuffer;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/jsr292/VMAnonymousClasses.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8058828
+ * @run main/bootclasspath -Xbatch VMAnonymousClasses
+ */
+
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.Opcodes;
+import sun.misc.Unsafe;
+
+import java.lang.invoke.ConstantCallSite;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+import java.lang.invoke.MutableCallSite;
+import java.lang.invoke.VolatileCallSite;
+
+public class VMAnonymousClasses {
+    static final String TEST_METHOD_NAME = "constant";
+
+    static final Unsafe UNSAFE = Unsafe.getUnsafe();
+
+    static int getConstantPoolSize(byte[] classFile) {
+        // The first few bytes:
+        // u4 magic;
+        // u2 minor_version;
+        // u2 major_version;
+        // u2 constant_pool_count;
+        return ((classFile[8] & 0xFF) << 8) | (classFile[9] & 0xFF);
+    }
+
+    static void test(Object value) throws ReflectiveOperationException {
+        System.out.printf("Test: %s", value != null ? value.getClass() : "null");
+
+        ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_MAXS | ClassWriter.COMPUTE_FRAMES);
+        cw.visit(Opcodes.V1_8, Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER, "Test", null, "java/lang/Object", null);
+
+        MethodVisitor mv = cw.visitMethod(Opcodes.ACC_STATIC | Opcodes.ACC_PUBLIC, TEST_METHOD_NAME, "()Ljava/lang/Object;", null, null);
+
+        String placeholder = "CONSTANT";
+        int index = cw.newConst(placeholder);
+        mv.visitLdcInsn(placeholder);
+        mv.visitInsn(Opcodes.ARETURN);
+
+        mv.visitMaxs(0, 0);
+        mv.visitEnd();
+
+        byte[] classFile = cw.toByteArray();
+
+        Object[] cpPatches = new Object[getConstantPoolSize(classFile)];
+        cpPatches[index] = value;
+
+        Class<?> test = UNSAFE.defineAnonymousClass(VMAnonymousClasses.class, classFile, cpPatches);
+
+        Object expectedResult = (value != null) ? value : placeholder;
+        for (int i = 0; i<15000; i++) {
+            Object result = test.getMethod(TEST_METHOD_NAME).invoke(null);
+            if (result != expectedResult) {
+                throw new AssertionError(String.format("Wrong value returned: %s != %s", value, result));
+            }
+        }
+        System.out.println(" PASSED");
+    }
+
+    public static void main(String[] args) throws ReflectiveOperationException  {
+        // Objects
+        test(new Object());
+        test("TEST");
+        test(new VMAnonymousClasses());
+        test(null);
+
+        // Class
+        test(String.class);
+
+        // Arrays
+        test(new boolean[0]);
+        test(new byte[0]);
+        test(new char[0]);
+        test(new short[0]);
+        test(new int[0]);
+        test(new long[0]);
+        test(new float[0]);
+        test(new double[0]);
+        test(new Object[0]);
+
+        // Multi-dimensional arrays
+        test(new byte[0][0]);
+        test(new Object[0][0]);
+
+        // MethodHandle-related
+        MethodType   mt = MethodType.methodType(void.class, String[].class);
+        MethodHandle mh = MethodHandles.lookup().findStatic(VMAnonymousClasses.class, "main", mt);
+        test(mt);
+        test(mh);
+        test(new ConstantCallSite(mh));
+        test(new MutableCallSite(MethodType.methodType(void.class)));
+        test(new VolatileCallSite(MethodType.methodType(void.class)));
+
+        System.out.println("TEST PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/loopopts/TestDeadBackbranchArrayAccess.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 8054478
+ * @summary dead backbranch in main loop results in erroneous array access
+ * @run main/othervm -XX:CompileOnly=TestDeadBackbranchArrayAccess -Xcomp TestDeadBackbranchArrayAccess
+ *
+ */
+
+public class TestDeadBackbranchArrayAccess {
+    static char[] pattern0 = {0};
+    static char[] pattern1 = {1};
+
+    static void test(char[] array) {
+        if (pattern1 == null) return;
+
+        int i = 0;
+        int pos = 0;
+        char c = array[pos];
+
+        while (i >= 0 && (c == pattern0[i] || c == pattern1[i])) {
+            i--;
+            pos--;
+            if (pos != -1) {
+                c = array[pos];
+            }
+        }
+    }
+
+    public static void main(String[] args) {
+        for (int i = 0; i < 1000000; i++) {
+            test(new char[1]);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/macronodes/TestEliminateAllocationPhi.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8046698
+ * @summary PhiNode inserted between AllocateNode and Initialization node confuses allocation elimination
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestEliminateAllocationPhi
+ *
+ */
+
+public class TestEliminateAllocationPhi {
+
+    // This will return I when called from m(0 and once optimized will
+    // go away but this will confuse escape analysis in m(): it will
+    // find I as non escaping but non scalar replaceable. In its own
+    // method so that we can make the profile of the if() branch look
+    // like it's taken sometimes.
+    static Integer m2(Integer I, int i) {
+        for (; i < 10; i=(i+2)*(i+2)) {
+        }
+        if (i == 121) {
+            return II;
+        }
+        return I;
+    }
+
+    static Integer II = new Integer(42);
+
+    static int m(int[] integers, boolean flag) {
+        int j = 0;
+        while(true) {
+            try {
+                int k = integers[j++];
+                // A branch that will cause loop unswitching
+                if (flag) {
+                    k += 42;
+                }
+                if (k < 1000) {
+                    throw new Exception();
+                }
+                // Because of the try/catch the Allocate node for this
+                // new will be in the loop while the Initialization
+                // node will be outside the loop. When loop
+                // unswitching happens, the Allocate node will be
+                // cloned and the results of both will be inputs to a
+                // Phi that will be between the Allocate nodes and the
+                // Initialization nodes.
+                Integer I = new Integer(k);
+
+                I = m2(I, 0);
+
+                int i = I.intValue();
+                return i;
+            } catch(Exception e) {
+            }
+        }
+    }
+
+    static public void main(String[] args) {
+        for (int i = 0; i < 5000; i++) {
+            m2(null, 1);
+        }
+
+        int[] integers = { 2000 };
+        for (int i = 0; i < 6000; i++) {
+            m(integers, (i%2) == 0);
+        }
+        int[] integers2 = { 1, 2, 3, 4, 5, 2000 };
+        for (int i = 0; i < 10000; i++) {
+            m(integers2, (i%2) == 0);
+        }
+    }
+}
--- a/test/compiler/membars/DekkerTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/membars/DekkerTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -25,9 +25,9 @@
  * @test
  * @bug 8007898
  * @summary Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier().
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest
  * @author Martin Doerr martin DOT doerr AT sap DOT com
  *
  * Run 3 times since the failure is intermittent.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/membars/TestMemBarAcquire.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestMemBarAcquire
+ * @bug 8048879
+ * @summary "Tests optimization of MemBarAcquireNodes"
+ * @run main/othervm -XX:-TieredCompilation -XX:-BackgroundCompilation TestMemBarAcquire
+ */
+public class TestMemBarAcquire {
+  private volatile static Object defaultObj = new Object();
+  private Object obj;
+
+  public TestMemBarAcquire(Object param) {
+    // Volatile load. MemBarAcquireNode is added after the
+    // load to prevent following loads from floating up past.
+    // StoreNode is added to store result of load in 'obj'.
+    this.obj = defaultObj;
+    // Overrides 'obj' and therefore makes previous StoreNode
+    // and the corresponding LoadNode useless. However, the
+    // LoadNode is still connected to the MemBarAcquireNode
+    // that should now release the reference.
+    this.obj = param;
+  }
+
+  public static void main(String[] args) throws Exception {
+    // Make sure TestMemBarAcquire::<init> is compiled
+    for (int i = 0; i < 100000; ++i) {
+      TestMemBarAcquire p = new TestMemBarAcquire(new Object());
+    }
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/osr/TestOSRWithNonEmptyStack.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.Label;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import static jdk.internal.org.objectweb.asm.Opcodes.*;
+
+/**
+ * @test
+ * @bug 8051344
+ * @summary Force OSR compilation with non-empty stack at the OSR entry point.
+ * @compile -XDignore.symbol.file TestOSRWithNonEmptyStack.java
+ * @run main/othervm -XX:CompileOnly=TestCase.test TestOSRWithNonEmptyStack
+ */
+public class TestOSRWithNonEmptyStack extends ClassLoader {
+    private static final int CLASS_FILE_VERSION = 52;
+    private static final String CLASS_NAME = "TestCase";
+    private static final String METHOD_NAME = "test";
+    private static final int ITERATIONS = 1_000_000;
+
+    private static byte[] generateTestClass() {
+        ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES);
+
+        cw.visit(TestOSRWithNonEmptyStack.CLASS_FILE_VERSION, ACC_PUBLIC,
+                TestOSRWithNonEmptyStack.CLASS_NAME, null, "java/lang/Object",
+                null);
+
+        TestOSRWithNonEmptyStack.generateConstructor(cw);
+        TestOSRWithNonEmptyStack.generateTestMethod(cw);
+
+        cw.visitEnd();
+        return cw.toByteArray();
+    }
+
+    private static void generateConstructor(ClassWriter classWriter) {
+        MethodVisitor mv = classWriter.visitMethod(ACC_PUBLIC, "<init>", "()V",
+                null, null);
+
+        mv.visitCode();
+
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V",
+                false);
+        mv.visitInsn(RETURN);
+
+        mv.visitMaxs(0, 0);
+        mv.visitEnd();
+    }
+
+    private static void generateTestMethod(ClassWriter classWriter) {
+        MethodVisitor mv = classWriter.visitMethod(ACC_PUBLIC,
+                TestOSRWithNonEmptyStack.METHOD_NAME, "()V", null, null);
+        Label osrEntryPoint = new Label();
+
+        mv.visitCode();
+        // Push 'this' into stack before OSR entry point to bail out compilation
+        mv.visitVarInsn(ALOAD, 0);
+        // Setup loop counter
+        mv.visitInsn(ICONST_0);
+        mv.visitVarInsn(ISTORE, 1);
+        // Begin loop
+        mv.visitLabel(osrEntryPoint);
+        // Increment loop counter
+        mv.visitVarInsn(ILOAD, 1);
+        mv.visitInsn(ICONST_1);
+        mv.visitInsn(IADD);
+        // Duplicate it for loop condition check
+        mv.visitInsn(DUP);
+        mv.visitVarInsn(ISTORE, 1);
+        // Check loop condition
+        mv.visitLdcInsn(TestOSRWithNonEmptyStack.ITERATIONS);
+        mv.visitJumpInsn(IF_ICMPLT, osrEntryPoint);
+        // Pop 'this'.
+        mv.visitInsn(POP);
+        mv.visitInsn(RETURN);
+
+        mv.visitMaxs(0, 0);
+        mv.visitEnd();
+    }
+
+    private void run() {
+        byte[] bytecode = TestOSRWithNonEmptyStack.generateTestClass();
+
+        try {
+            Class klass = defineClass(TestOSRWithNonEmptyStack.CLASS_NAME,
+                    bytecode, 0, bytecode.length);
+
+            Constructor ctor = klass.getConstructor();
+            Method method = klass.getDeclaredMethod(
+                    TestOSRWithNonEmptyStack.METHOD_NAME);
+
+            Object testCase = ctor.newInstance();
+            method.invoke(testCase);
+        } catch (Exception e) {
+            throw new RuntimeException(
+                    "Test bug: generated class should be valid.", e);
+        }
+    }
+
+    public static void main(String args[]) {
+        new TestOSRWithNonEmptyStack().run();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/osr/TestRangeCheck.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestRangeCheck
+ * @bug 8054883
+ * @summary Tests that range check is not skipped
+ */
+
+public class TestRangeCheck {
+    public static void main(String args[]) {
+        try {
+            test();
+            throw new AssertionError("Expected ArrayIndexOutOfBoundsException was not thrown");
+        } catch (ArrayIndexOutOfBoundsException e) {
+            System.out.println("Expected ArrayIndexOutOfBoundsException was thrown");
+        }
+    }
+
+    private static void test() {
+        int arr[] = new int[1];
+        int result = 1;
+
+        // provoke OSR compilation
+        for (int i = 0; i < Integer.MAX_VALUE; i++) {
+        }
+
+        if (result > 0 && arr[~result] > 0) {
+            arr[~result] = 0;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/rangechecks/TestRangeCheckSmearing.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8066103
+ * @summary C2's range check smearing allows out of bound array accesses
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox /testlibrary/com/oracle/java/testlibrary
+ * @build TestRangeCheckSmearing
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main ClassFileInstaller com.oracle.java.testlibrary.Platform
+ * @run main/othervm -ea -Xmixed -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ *                   -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestRangeCheckSmearing
+ *
+ */
+
+import java.lang.annotation.*;
+import java.lang.reflect.*;
+import java.util.*;
+import sun.hotspot.WhiteBox;
+import sun.hotspot.code.NMethod;
+import com.oracle.java.testlibrary.Platform;
+
+public class TestRangeCheckSmearing {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+
+    @Retention(RetentionPolicy.RUNTIME)
+    @interface Args { int[] value(); }
+
+    // first range check is i + max of all constants
+    @Args({0, 8})
+    static int m1(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+9];
+        if (allaccesses) {
+            res += array[i+8];
+            res += array[i+7];
+            res += array[i+6];
+            res += array[i+5];
+            res += array[i+4];
+            res += array[i+3];
+            res += array[i+2];
+            res += array[i+1];
+        }
+        return res;
+    }
+
+    // first range check is i + min of all constants
+    @Args({0, -9})
+    static int m2(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+1];
+        if (allaccesses) {
+            res += array[i+2];
+            res += array[i+3];
+            res += array[i+4];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    // first range check is not i + min/max of all constants
+    @Args({0, 8})
+    static int m3(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        if (allaccesses) {
+            res += array[i+2];
+            res += array[i+1];
+            res += array[i+4];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    @Args({0, -9})
+    static int m4(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        if (allaccesses) {
+            res += array[i+4];
+            res += array[i+1];
+            res += array[i+2];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    @Args({0, -3})
+    static int m5(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i+2];
+        if (allaccesses) {
+            res += array[i+1];
+            res += array[i+4];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    @Args({0, 6})
+    static int m6(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i+4];
+        if (allaccesses) {
+            res += array[i+2];
+            res += array[i+1];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    @Args({0, 6})
+    static int m7(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i+2];
+        res += array[i+4];
+        if (allaccesses) {
+            res += array[i+1];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    @Args({0, -3})
+    static int m8(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i+4];
+        res += array[i+2];
+        if (allaccesses) {
+            res += array[i+1];
+            res += array[i+5];
+            res += array[i+6];
+            res += array[i+7];
+            res += array[i+8];
+            res += array[i+9];
+        }
+        return res;
+    }
+
+    @Args({6, 15})
+    static int m9(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        if (allaccesses) {
+            res += array[i-2];
+            res += array[i-1];
+            res += array[i-4];
+            res += array[i-5];
+            res += array[i-6];
+        }
+        return res;
+    }
+
+    @Args({3, 12})
+    static int m10(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        if (allaccesses) {
+            res += array[i-2];
+            res += array[i-1];
+            res += array[i-3];
+            res += array[i+4];
+            res += array[i+5];
+            res += array[i+6];
+        }
+        return res;
+    }
+
+    @Args({3, -3})
+    static int m11(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i-2];
+        if (allaccesses) {
+            res += array[i+5];
+            res += array[i+6];
+        }
+        return res;
+    }
+
+    @Args({3, 6})
+    static int m12(int[] array, int i, boolean allaccesses) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i+6];
+        if (allaccesses) {
+            res += array[i-2];
+            res += array[i-3];
+        }
+        return res;
+    }
+
+    // check that identical range check is replaced by dominating one
+    // only when correct
+    @Args({0})
+    static int m13(int[] array, int i, boolean ignore) {
+        int res = 0;
+        res += array[i+3];
+        res += array[i+3];
+        return res;
+    }
+
+    @Args({2, 0})
+    static int m14(int[] array, int i, boolean ignore) {
+        int res = 0;
+
+        res += array[i];
+        res += array[i-2];
+        res += array[i]; // If range check below were to be removed first this cannot be considered identical to first range check
+        res += array[i-1]; // range check removed so i-1 array access depends on previous check
+
+        return res;
+    }
+
+    static int[] m15_dummy = new int[10];
+    @Args({2, 0})
+    static int m15(int[] array, int i, boolean ignore) {
+        int res = 0;
+        res += array[i];
+
+        // When the loop is optimized out we don't want the
+        // array[i-1] access which is dependent on array[i]'s
+        // range check to become dependent on the identical range
+        // check above.
+
+        int[] array2 = m15_dummy;
+        int j = 0;
+        for (; j < 10; j++);
+        if (j == 10) {
+            array2 = array;
+        }
+
+        res += array2[i-2];
+        res += array2[i];
+        res += array2[i-1]; // range check removed so i-1 array access depends on previous check
+
+        return res;
+    }
+
+    @Args({2, 0})
+    static int m16(int[] array, int i, boolean ignore) {
+        int res = 0;
+
+        res += array[i];
+        res += array[i-1];
+        res += array[i-1];
+        res += array[i-2];
+
+        return res;
+    }
+
+    @Args({2, 0})
+    static int m17(int[] array, int i, boolean ignore) {
+        int res = 0;
+
+        res += array[i];
+        res += array[i-2];
+        res += array[i-2];
+        res += array[i+2];
+        res += array[i+2];
+        res += array[i-1];
+        res += array[i-1];
+
+        return res;
+    }
+
+    static public void main(String[] args) {
+        if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) {
+            throw new AssertionError("Background compilation enabled");
+        }
+        new TestRangeCheckSmearing().doTests();
+    }
+    boolean success = true;
+    boolean exception = false;
+    final int[] array = new int[10];
+    final HashMap<String,Method> tests = new HashMap<>();
+    {
+        final Class<?> TEST_PARAM_TYPES[] = { int[].class, int.class, boolean.class };
+        for (Method m : this.getClass().getDeclaredMethods()) {
+            if (m.getName().matches("m[0-9]+")) {
+                assert(Modifier.isStatic(m.getModifiers())) : m;
+                assert(m.getReturnType() == int.class) : m;
+                assert(Arrays.equals(m.getParameterTypes(), TEST_PARAM_TYPES)) : m;
+                tests.put(m.getName(), m);
+            }
+        }
+    }
+
+    void invokeTest(Method m, int[] array, int index, boolean z) {
+        try {
+            m.invoke(null, array, index, z);
+        } catch (ReflectiveOperationException roe) {
+            Throwable ex = roe.getCause();
+            if (ex instanceof ArrayIndexOutOfBoundsException)
+                throw (ArrayIndexOutOfBoundsException) ex;
+            throw new AssertionError(roe);
+        }
+    }
+
+    void doTest(String name) {
+        Method m = tests.get(name);
+        tests.remove(name);
+        int[] args = m.getAnnotation(Args.class).value();
+        int index0 = args[0], index1;
+        boolean exceptionRequired = true;
+        if (args.length == 2) {
+            index1 = args[1];
+        } else {
+            // no negative test for this one
+            assert(args.length == 1);
+            assert(name.equals("m13"));
+            exceptionRequired = false;
+            index1 = index0;
+        }
+        // Get the method compiled.
+        if (!WHITE_BOX.isMethodCompiled(m)) {
+            // If not, try to compile it with C2
+            if(!WHITE_BOX.enqueueMethodForCompilation(m, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION)) {
+                // C2 compiler not available, try to compile with C1
+                WHITE_BOX.enqueueMethodForCompilation(m, CompilerWhiteBoxTest.COMP_LEVEL_SIMPLE);
+            }
+        }
+        if (!WHITE_BOX.isMethodCompiled(m)) {
+            throw new RuntimeException(m + " not compiled");
+        }
+
+        // valid access
+        invokeTest(m, array, index0, true);
+
+        if (!WHITE_BOX.isMethodCompiled(m)) {
+            throw new RuntimeException(m + " deoptimized on valid array access");
+        }
+
+        exception = false;
+        boolean test_success = true;
+        try {
+            invokeTest(m, array, index1, false);
+        } catch(ArrayIndexOutOfBoundsException aioob) {
+            exception = true;
+            System.out.println("ArrayIndexOutOfBoundsException thrown in "+name);
+        }
+        if (!exception) {
+            System.out.println("ArrayIndexOutOfBoundsException was not thrown in "+name);
+        }
+
+        if (Platform.isServer()) {
+            if (exceptionRequired == WHITE_BOX.isMethodCompiled(m)) {
+                System.out.println((exceptionRequired?"Didn't deoptimized":"deoptimized") + " in "+name);
+                test_success = false;
+            }
+        }
+
+        if (exception != exceptionRequired) {
+            System.out.println((exceptionRequired?"exception required but not thrown":"not exception required but thrown") + " in "+name);
+            test_success = false;
+        }
+
+        if (!test_success) {
+            success = false;
+            System.out.println("TEST FAILED: "+name);
+        }
+
+    }
+    void doTests() {
+        doTest("m1");
+        doTest("m2");
+        doTest("m3");
+        doTest("m4");
+        doTest("m5");
+        doTest("m6");
+        doTest("m7");
+        doTest("m8");
+        doTest("m9");
+        doTest("m10");
+        doTest("m11");
+        doTest("m12");
+        doTest("m13");
+        doTest("m14");
+        doTest("m15");
+        doTest("m16");
+        doTest("m17");
+        if (!success) {
+            throw new RuntimeException("Some tests failed");
+        }
+        assert(tests.isEmpty()) : tests;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/rangechecks/TestRangeCheckSmearingLoopOpts.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8048170
+ * @summary Following range check smearing, range check cannot be replaced by dominating identical test.
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestRangeCheckSmearingLoopOpts
+ *
+ */
+public class TestRangeCheckSmearingLoopOpts {
+
+    static int dummy;
+
+    static int m1(int[] array, int i) {
+        for (;;) {
+            for (;;) {
+                if (array[i] < 0) { // range check (i+0) dominates equivalent check below
+                    break;
+                }
+                i++;
+            }
+
+            // A control flow that stops IfNode::up_one_dom()
+            if ((i % 2)== 0) {
+                if ((array[i] % 2) == 0) {
+                    dummy = i;
+                }
+            }
+
+            // IfNode::Ideal will rewrite some range checks if Compile::allow_range_check_smearing
+            if (array[i-1] == 9) {    // range check (i-1) unchanged
+                int res = array[i-3]; // range check (i-3) unchanged
+                res += array[i];      // range check (i+0) unchanged
+                res += array[i-2];    // removed redundant range check
+                // the previous access might be hoisted by
+                // PhaseIdealLoop::split_if_with_blocks_post because
+                // it appears to have the same guard, but it also
+                // depends on the previous guards
+                return res;
+            }
+            i++;
+        }
+    }
+
+    static public void main(String[] args) {
+        int[] array = { 0, 1, 2, -3, 4, 5, -2, 7, 8, 9, -1 };
+        for (int i = 0; i < 20000; i++) {
+            m1(array, 0);
+        }
+        array[0] = -1;
+        try {
+            m1(array, 0);
+        } catch(ArrayIndexOutOfBoundsException aioobe) {}
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/relocations/TestPrintRelocations.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8044538
+ * @summary assert hit while printing relocations for jump table entries
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:+PrintRelocations TestPrintRelocations
+ */
+
+/**
+ * The test compiles all methods (-Xcomp) and prints their relocation
+ * entries (-XX:+PrintRelocations) to make sure the printing works.
+ */
+public class TestPrintRelocations {
+
+   static public void main(String[] args) { }
+}
--- a/test/compiler/rtm/cli/TestRTMRetryCountOption.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/rtm/cli/TestRTMRetryCountOption.java	Fri Apr 10 16:58:26 2015 -0700
@@ -35,7 +35,7 @@
     private static final String DEFAULT_VALUE = "5";
 
     private TestRTMRetryCountOption() {
-        super(Boolean.TRUE::booleanValue, "RTMRetryCount", false, true,
+        super(Boolean.TRUE::booleanValue, "RTMRetryCount", false, false,
                 TestRTMRetryCountOption.DEFAULT_VALUE,
                 "0", "10", "100", "1000");
     }
--- a/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnSupportedConfig.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnSupportedConfig.java	Fri Apr 10 16:58:26 2015 -0700
@@ -50,25 +50,15 @@
 
     @Override
     public void runTestCases() throws Throwable {
-        String experimentalOptionError
-                = CommandLineOptionTest.getExperimentalOptionErrorMessage(
-                "UseRTMDeopt");
-        // verify that option is experimental
+        // verify that option could be turned on
         CommandLineOptionTest.verifySameJVMStartup(
-                new String[] { experimentalOptionError }, null, ExitCode.FAIL,
-                "-XX:+UseRTMDeopt");
-        // verify that option could be turned on
-        CommandLineOptionTest.verifySameJVMStartup(null, null, ExitCode.OK,
-                CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
-                "-XX:+UseRTMDeopt");
+                null, null, ExitCode.OK, "-XX:+UseRTMDeopt");
         // verify that option could be turned off
-        CommandLineOptionTest.verifySameJVMStartup(null, null, ExitCode.OK,
-                CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
-                "-XX:-UseRTMDeopt");
+        CommandLineOptionTest.verifySameJVMStartup(
+                null, null, ExitCode.OK, "-XX:-UseRTMDeopt");
         // verify default value
         CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt",
-                TestUseRTMDeoptOptionOnSupportedConfig.DEFAULT_VALUE,
-                CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS);
+                TestUseRTMDeoptOptionOnSupportedConfig.DEFAULT_VALUE);
         // verify default value
         CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMDeopt",
                 TestUseRTMDeoptOptionOnSupportedConfig.DEFAULT_VALUE,
--- a/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnUnsupportedConfig.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/rtm/cli/TestUseRTMDeoptOptionOnUnsupportedConfig.java	Fri Apr 10 16:58:26 2015 -0700
@@ -48,7 +48,7 @@
     private TestUseRTMDeoptOptionOnUnsupportedConfig() {
         super(new NotPredicate(new AndPredicate(new SupportedCPU(),
                         new SupportedVM())),
-                "UseRTMDeopt", true, true,
+                "UseRTMDeopt", true, false,
                 TestUseRTMDeoptOptionOnUnsupportedConfig.DEFAULT_VALUE, "true");
     }
 
@@ -57,14 +57,11 @@
         super.verifyJVMStartup();
         // verify default value
         CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
-                defaultValue,
-                CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS);
+                defaultValue);
         // verify that until RTMLocking is not used, value
         // will be set to default false.
         CommandLineOptionTest.verifyOptionValueForSameVM(optionName,
-                defaultValue,
-                CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
-                "-XX:+UseRTMDeopt");
+                defaultValue, "-XX:+UseRTMDeopt");
     }
 
     public static void main(String args[]) throws Throwable {
--- a/test/compiler/rtm/cli/TestUseRTMLockingOptionOnSupportedConfig.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/rtm/cli/TestUseRTMLockingOptionOnSupportedConfig.java	Fri Apr 10 16:58:26 2015 -0700
@@ -51,15 +51,8 @@
     @Override
     public void runTestCases() throws Throwable {
         String unrecongnizedOption
-                = CommandLineOptionTest.getUnrecognizedOptionErrorMessage(
-                "UseRTMLocking");
-        String experimentalOptionError
-                = CommandLineOptionTest.getExperimentalOptionErrorMessage(
+                =  CommandLineOptionTest.getUnrecognizedOptionErrorMessage(
                 "UseRTMLocking");
-        // verify that options is experimental
-        CommandLineOptionTest.verifySameJVMStartup(
-                new String[] { experimentalOptionError }, null, ExitCode.FAIL,
-                "-XX:+UseRTMLocking");
         // verify that there are no warning or error in VM output
         CommandLineOptionTest.verifySameJVMStartup(null,
                 new String[]{
@@ -67,7 +60,8 @@
                         unrecongnizedOption
                 }, ExitCode.OK,
                 CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
-                "-XX:+UseRTMLocking");
+                "-XX:+UseRTMLocking"
+        );
 
         CommandLineOptionTest.verifySameJVMStartup(null,
                 new String[]{
@@ -75,7 +69,8 @@
                         unrecongnizedOption
                 }, ExitCode.OK,
                 CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
-                "-XX:-UseRTMLocking");
+                "-XX:-UseRTMLocking"
+        );
         // verify that UseRTMLocking is of by default
         CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMLocking",
                 TestUseRTMLockingOptionOnSupportedConfig.DEFAULT_VALUE,
--- a/test/compiler/rtm/cli/TestUseRTMLockingOptionOnUnsupportedCPU.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/rtm/cli/TestUseRTMLockingOptionOnUnsupportedCPU.java	Fri Apr 10 16:58:26 2015 -0700
@@ -63,9 +63,7 @@
             CommandLineOptionTest.verifySameJVMStartup(
                     new String[] { errorMessage },
                     new String[] { unrecongnizedOption },
-                    ExitCode.FAIL,
-                    CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
-                    "-XX:+UseRTMLocking");
+                    ExitCode.FAIL, "-XX:+UseRTMLocking");
             // verify that we can pass -UseRTMLocking without
             // getting any error messages
             CommandLineOptionTest.verifySameJVMStartup(
@@ -73,27 +71,20 @@
                     new String[]{
                             errorMessage,
                             unrecongnizedOption
-                    }, ExitCode.OK,
-                    CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
-                    "-XX:-UseRTMLocking");
+                    }, ExitCode.OK, "-XX:-UseRTMLocking");
 
             // verify that UseRTMLocking is false by default
             CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMLocking",
-                    TestUseRTMLockingOptionOnUnsupportedCPU.DEFAULT_VALUE,
-                    CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS);
+                    TestUseRTMLockingOptionOnUnsupportedCPU.DEFAULT_VALUE);
         } else {
             // verify that on non-x86 CPUs RTMLocking could not be used
             CommandLineOptionTest.verifySameJVMStartup(
                     new String[] { unrecongnizedOption },
-                    null, ExitCode.FAIL,
-                    CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
-                    "-XX:+UseRTMLocking");
+                    null, ExitCode.FAIL, "-XX:+UseRTMLocking");
 
             CommandLineOptionTest.verifySameJVMStartup(
                     new String[] { unrecongnizedOption },
-                    null, ExitCode.FAIL,
-                    CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
-                    "-XX:-UseRTMLocking");
+                    null, ExitCode.FAIL, "-XX:-UseRTMLocking");
         }
     }
 
--- a/test/compiler/rtm/cli/TestUseRTMLockingOptionOnUnsupportedVM.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/rtm/cli/TestUseRTMLockingOptionOnUnsupportedVM.java	Fri Apr 10 16:58:26 2015 -0700
@@ -53,27 +53,17 @@
     public void runTestCases() throws Throwable {
         String errorMessage
                 = RTMGenericCommandLineOptionTest.RTM_UNSUPPORTED_VM_ERROR;
-        String experimentalOptionError
-                = CommandLineOptionTest.getExperimentalOptionErrorMessage(
-                "UseRTMLocking");
-        // verify that options is experimental
-        CommandLineOptionTest.verifySameJVMStartup(
-                new String[] { experimentalOptionError }, null, ExitCode.FAIL,
-                "-XX:+UseRTMLocking");
         // verify that we can't use +UseRTMLocking
         CommandLineOptionTest.verifySameJVMStartup(
                 new String[] { errorMessage }, null, ExitCode.FAIL,
-                CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
                 "-XX:+UseRTMLocking");
         // verify that we can turn it off
         CommandLineOptionTest.verifySameJVMStartup(null,
                 new String[] { errorMessage }, ExitCode.OK,
-                CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS,
                 "-XX:-UseRTMLocking");
         // verify that it is off by default
         CommandLineOptionTest.verifyOptionValueForSameVM("UseRTMLocking",
-                TestUseRTMLockingOptionOnUnsupportedVM.DEFAULT_VALUE,
-                CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS);
+                TestUseRTMLockingOptionOnUnsupportedVM.DEFAULT_VALUE);
     }
 
     public static void main(String args[]) throws Throwable {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/startup/NumCompilerThreadsCheck.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8034775
+ * @summary Ensures correct minimal number of compiler threads (provided by -XX:CICompilerCount=)
+ * @library /testlibrary
+ */
+import com.oracle.java.testlibrary.*;
+
+public class NumCompilerThreadsCheck {
+
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:CICompilerCount=-1");
+    OutputAnalyzer out = new OutputAnalyzer(pb.start());
+
+    String expectedOutput = "CICompilerCount of -1 is invalid";
+    out.shouldContain(expectedOutput);
+
+    if (isZeroVm()) {
+      String expectedLowWaterMarkText = "must be at least 0";
+      out.shouldContain(expectedLowWaterMarkText);
+    }
+  }
+
+  private static boolean isZeroVm() {
+    String vmName = System.getProperty("java.vm.name");
+    if (vmName == null) {
+      throw new RuntimeException("No VM name");
+    }
+    if (vmName.toLowerCase().contains("zero")) {
+      return true;
+    }
+    return false;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/testlibrary/intrinsics/Verifier.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package intrinsics;
+
+import java.io.BufferedReader;
+import java.io.FileReader;
+import java.util.Properties;
+
+public class Verifier {
+    enum VerificationStrategy {
+        VERIFY_STRONG_EQUALITY {
+            @Override
+            void verify(Properties expectedProperties, int fullMatchCnt,
+                        int suspectCnt) {
+                int expectedCount = Integer.parseInt(
+                        expectedProperties.getProperty(
+                                Verifier.INTRINSIC_EXPECTED_COUNT_PROPERTY));
+                String intrinsicID = expectedProperties.getProperty(
+                        Verifier.INTRINSIC_NAME_PROPERTY);
+
+                System.out.println("Intrinsic " + intrinsicID
+                        + " verification, expected: " + expectedCount
+                        + ", matched: " + fullMatchCnt
+                        + ", suspected: " + suspectCnt);
+                if (expectedCount != fullMatchCnt) {
+                    throw new RuntimeException(
+                            "Unexpected count of intrinsic  "
+                                    + intrinsicID
+                                    + " expected:" + expectedCount
+                                    + ", matched: " + fullMatchCnt
+                                    + ", suspected: " + suspectCnt);
+                }
+            }
+        },
+
+        VERIFY_INTRINSIC_USAGE {
+            @Override
+            void verify(Properties expectedProperties, int fullMatchCnt,
+                        int suspectCnt) {
+                boolean isExpected = Boolean.parseBoolean(
+                        expectedProperties.getProperty(
+                                Verifier.INTRINSIC_IS_EXPECTED_PROPERTY));
+                String intrinsicID = expectedProperties.getProperty(
+                        Verifier.INTRINSIC_NAME_PROPERTY);
+
+                System.out.println("Intrinsic " + intrinsicID
+                        + " verification, is expected: " + isExpected
+                        + ", matched: " + fullMatchCnt
+                        + ", suspected: " + suspectCnt);
+                if ((fullMatchCnt == 0 && isExpected)
+                        || (fullMatchCnt > 0 && !isExpected)) {
+                    throw new RuntimeException(
+                            "Unexpected count of intrinsic  "
+                                    + intrinsicID
+                                    + " is expected:" + isExpected
+                                    + ", matched: " + fullMatchCnt
+                                    + ", suspected: " + suspectCnt);
+                }
+            }
+        };
+
+        void verify(Properties expectedProperties, int fullMathCnt,
+                    int suspectCnt) {
+            throw new RuntimeException("Default strategy is not implemented.");
+        }
+    }
+
+    public static final String PROPERTY_FILE_SUFFIX = ".verify.properties";
+    public static final String INTRINSIC_NAME_PROPERTY = "intrinsic.name";
+    public static final String INTRINSIC_IS_EXPECTED_PROPERTY
+            = "intrinsic.expected";
+    public static final String INTRINSIC_EXPECTED_COUNT_PROPERTY
+            = "intrinsic.expectedCount";
+    private static final String DEFAULT_STRATEGY
+            = VerificationStrategy.VERIFY_STRONG_EQUALITY.name();
+
+    public static void main(String[] args) throws Exception {
+        if (args.length == 0) {
+            throw new RuntimeException("Test bug, nothing to verify");
+        }
+        for (String hsLogFile : args) {
+            verify(hsLogFile);
+        }
+    }
+
+    private static void verify(String hsLogFile) throws Exception {
+        System.out.println("Verifying " + hsLogFile);
+
+        Properties expectedProperties = new Properties();
+        FileReader reader = new FileReader(hsLogFile
+                + Verifier.PROPERTY_FILE_SUFFIX);
+        expectedProperties.load(reader);
+        reader.close();
+
+        int fullMatchCnt = 0;
+        int suspectCnt = 0;
+        String intrinsicId = expectedProperties.getProperty(
+                Verifier.INTRINSIC_NAME_PROPERTY);
+        String prefix = "<intrinsic id='";
+        String prefixWithId = prefix + intrinsicId + "'";
+
+        try (BufferedReader compLogReader
+                     = new BufferedReader(new FileReader(hsLogFile))) {
+            String logLine;
+            while ((logLine = compLogReader.readLine()) != null) {
+                if (logLine.startsWith(prefix)) {
+                    if (logLine.startsWith(prefixWithId)) {
+                        fullMatchCnt++;
+                    } else {
+                        suspectCnt++;
+                        System.err.println(
+                                "WARNING: Other intrinsic detected " + logLine);
+                    }
+                }
+            }
+        }
+
+        VerificationStrategy strategy = VerificationStrategy.valueOf(
+                System.getProperty("verificationStrategy",
+                        Verifier.DEFAULT_STRATEGY));
+        strategy.verify(expectedProperties, fullMatchCnt, suspectCnt);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package sha.predicate;
+
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.cli.predicate.AndPredicate;
+import com.oracle.java.testlibrary.cli.predicate.CPUSpecificPredicate;
+import com.oracle.java.testlibrary.cli.predicate.OrPredicate;
+import sun.hotspot.WhiteBox;
+
+import java.util.function.BooleanSupplier;
+
+/**
+ * Helper class aimed to provide predicates on availability of SHA-related
+ * CPU instructions and intrinsics.
+ */
+public class IntrinsicPredicates {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static final long TIERED_MAX_LEVEL = 4L;
+    /**
+     * Boolean supplier that check if any method could be compiled by C2.
+     * Method potentially could be compiled by C2 if Server VM is used and
+     * either tiered compilation is disabled or TIERED_MAX_LEVEL tier is
+     * reachable.
+     *
+     * Please don't place this definition after SHA*_INTRINSICS_AVAILABLE
+     * definitions. Otherwise its value will be {@code null} at the time when
+     * all dependent fields will be initialized.
+     */
+    private static final BooleanSupplier COMPILABLE_BY_C2 = () -> {
+        boolean isTiered = IntrinsicPredicates.WHITE_BOX.getBooleanVMFlag(
+                "TieredCompilation");
+        long tieredMaxLevel = IntrinsicPredicates.WHITE_BOX.getIntxVMFlag(
+                "TieredStopAtLevel");
+        boolean maxLevelIsReachable = (tieredMaxLevel
+                == IntrinsicPredicates.TIERED_MAX_LEVEL);
+        return Platform.isServer() && (!isTiered || maxLevelIsReachable);
+    };
+
+    public static final BooleanSupplier SHA1_INSTRUCTION_AVAILABLE
+            = new CPUSpecificPredicate("sparc.*", new String[] { "sha1" },
+                    null);
+
+    public static final BooleanSupplier SHA256_INSTRUCTION_AVAILABLE
+            = new CPUSpecificPredicate("sparc.*", new String[] { "sha256" },
+                    null);
+
+    public static final BooleanSupplier SHA512_INSTRUCTION_AVAILABLE
+            = new CPUSpecificPredicate("sparc.*", new String[] { "sha512" },
+                    null);
+
+    public static final BooleanSupplier ANY_SHA_INSTRUCTION_AVAILABLE
+            = new OrPredicate(IntrinsicPredicates.SHA1_INSTRUCTION_AVAILABLE,
+                    new OrPredicate(
+                            IntrinsicPredicates.SHA256_INSTRUCTION_AVAILABLE,
+                            IntrinsicPredicates.SHA512_INSTRUCTION_AVAILABLE));
+
+    public static final BooleanSupplier SHA1_INTRINSICS_AVAILABLE
+            = new AndPredicate(new AndPredicate(
+                    IntrinsicPredicates.SHA1_INSTRUCTION_AVAILABLE,
+                    IntrinsicPredicates.COMPILABLE_BY_C2),
+                IntrinsicPredicates.booleanOptionValue("UseSHA1Intrinsics"));
+
+    public static final BooleanSupplier SHA256_INTRINSICS_AVAILABLE
+            = new AndPredicate(new AndPredicate(
+                    IntrinsicPredicates.SHA256_INSTRUCTION_AVAILABLE,
+                    IntrinsicPredicates.COMPILABLE_BY_C2),
+                IntrinsicPredicates.booleanOptionValue("UseSHA256Intrinsics"));
+
+    public static final BooleanSupplier SHA512_INTRINSICS_AVAILABLE
+            = new AndPredicate(new AndPredicate(
+                    IntrinsicPredicates.SHA512_INSTRUCTION_AVAILABLE,
+                    IntrinsicPredicates.COMPILABLE_BY_C2),
+                IntrinsicPredicates.booleanOptionValue("UseSHA512Intrinsics"));
+
+    private static BooleanSupplier booleanOptionValue(String option) {
+        return () -> IntrinsicPredicates.WHITE_BOX.getBooleanVMFlag(option);
+    }
+
+    private IntrinsicPredicates() {
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/uncommontrap/TestDeoptOOM.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,426 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 6898462
+ * @summary failed reallocations of scalar replaced objects during deoptimization causes crash
+ * @run main/othervm -XX:-BackgroundCompilation -XX:CompileCommand=exclude,TestDeoptOOM::main -XX:CompileCommand=exclude,TestDeoptOOM::m9_1 -Xmx128M TestDeoptOOM
+ *
+ */
+
+public class TestDeoptOOM {
+
+    long f1;
+    long f2;
+    long f3;
+    long f4;
+    long f5;
+
+    static class LinkedList {
+        LinkedList l;
+        long[] array;
+        LinkedList(LinkedList l, int size) {
+            array = new long[size];
+            this.l = l;
+        }
+    }
+
+    static LinkedList ll;
+
+    static void consume_all_memory() {
+        int size = 128 * 1024 * 1024;
+        while(size > 0) {
+            try {
+                while(true) {
+                    ll = new LinkedList(ll, size);
+                }
+            } catch(OutOfMemoryError oom) {
+            }
+            size = size / 2;
+        }
+    }
+
+    static void free_memory() {
+        ll = null;
+    }
+
+    static TestDeoptOOM m1(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            if (deopt) {
+                return tdoom;
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m1");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m2_1(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            if (deopt) {
+                return tdoom;
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m2_1");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m2(boolean deopt) {
+        try {
+            return m2_1(deopt);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m2");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m3_3(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            if (deopt) {
+                return tdoom;
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m3_3");
+        }
+        return null;
+    }
+
+    static boolean m3_2(boolean deopt) {
+        try {
+            return m3_3(deopt) != null;
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m3_2");
+        }
+        return false;
+    }
+
+    static TestDeoptOOM m3_1(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            if (m3_2(deopt)) {
+                return tdoom;
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m3_1");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m3(boolean deopt) {
+        try {
+            return m3_1(deopt);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m3");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m4(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            if (deopt) {
+                tdoom.f1 = 1l;
+                tdoom.f2 = 2l;
+                tdoom.f3 = 3l;
+                return tdoom;
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m4");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m5(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            synchronized(tdoom) {
+                if (deopt) {
+                    return tdoom;
+                }
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m5");
+        }
+        return null;
+    }
+
+    synchronized TestDeoptOOM m6_1(boolean deopt) {
+        if (deopt) {
+            return this;
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m6(boolean deopt) {
+        try {
+            TestDeoptOOM tdoom = new TestDeoptOOM();
+            return tdoom.m6_1(deopt);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m6");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m7_1(boolean deopt, Object lock) {
+        try {
+            synchronized(lock) {
+                TestDeoptOOM tdoom = new TestDeoptOOM();
+                if (deopt) {
+                    return tdoom;
+                }
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m7_1");
+        }
+        return null;
+    }
+
+    static TestDeoptOOM m7(boolean deopt, Object lock) {
+        try {
+            return m7_1(deopt, lock);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m7");
+        }
+        return null;
+    }
+
+    static class A {
+        long f1;
+        long f2;
+        long f3;
+        long f4;
+        long f5;
+    }
+
+    static class B {
+        long f1;
+        long f2;
+        long f3;
+        long f4;
+        long f5;
+
+        A a;
+    }
+
+    static B m8(boolean deopt) {
+        try {
+            A a = new A();
+            B b = new B();
+            b.a = a;
+            if (deopt) {
+                return b;
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m8");
+        }
+        return null;
+    }
+
+    static void m9_1(int i) {
+        if (i > 90000) {
+            consume_all_memory();
+        }
+    }
+
+    static TestDeoptOOM m9() {
+        try {
+            for (int i = 0; i < 100000; i++) {
+                TestDeoptOOM tdoom = new TestDeoptOOM();
+                m9_1(i);
+                if (i > 90000) {
+                    return tdoom;
+                }
+            }
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in m1");
+        }
+        return null;
+    }
+
+    public static void main(String[] args) {
+        for (int i = 0; i < 20000; i++) {
+            m1(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m1(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main " + oom.getMessage());
+        }
+
+        free_memory();
+
+        for (int i = 0; i < 20000; i++) {
+            m2(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m2(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        for (int i = 0; i < 20000; i++) {
+            m3(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m3(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        for (int i = 0; i < 20000; i++) {
+            m4(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m4(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        for (int i = 0; i < 20000; i++) {
+            m5(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m5(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        for (int i = 0; i < 20000; i++) {
+            m6(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m6(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        final Object lock = new Object();
+
+        for (int i = 0; i < 20000; i++) {
+            m7(false, lock);
+        }
+
+        consume_all_memory();
+
+        try {
+            m7(true, lock);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        Thread thread = new Thread() {
+                public void run() {
+                    System.out.println("Acquiring lock");
+                    synchronized(lock) {
+                        System.out.println("Lock acquired");
+                    }
+                    System.out.println("Lock released");
+                }
+            };
+        thread.start();
+        try {
+            thread.join();
+        } catch(InterruptedException ie) {
+        }
+
+        for (int i = 0; i < 20000; i++) {
+            m8(false);
+        }
+
+        consume_all_memory();
+
+        try {
+            m8(true);
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+
+        try {
+            m9();
+        } catch(OutOfMemoryError oom) {
+            free_memory();
+            System.out.println("OOM caught in main");
+        }
+
+        free_memory();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/uncommontrap/TraceDeoptimizationNoRealloc.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8067144
+ * @summary -XX:+TraceDeoptimization tries to print realloc'ed objects even when there are none
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:+IgnoreUnrecognizedVMOptions -XX:+TraceDeoptimization TraceDeoptimizationNoRealloc
+ *
+ */
+
+public class TraceDeoptimizationNoRealloc {
+
+    static void m(boolean some_condition) {
+        if (some_condition) {
+            return;
+        }
+    }
+
+
+    static public void main(String[] args) {
+        for (int i = 0; i < 20000; i++) {
+            m(false);
+        }
+        m(true);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/unsafe/UnsafeRaw.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8058744
+ * @summary Invalid pattern-matching of address computations in raw unsafe
+ * @library /testlibrary
+ * @run main/othervm -Xbatch UnsafeRaw
+ */
+
+import com.oracle.java.testlibrary.Utils;
+import java.util.Random;
+
+public class UnsafeRaw {
+  public static class Tests {
+    public static int int_index(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
+      return unsafe.getInt(base + (index << 2));
+    }
+    public static int long_index(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
+      return unsafe.getInt(base + (index << 2));
+    }
+    public static int int_index_back_ashift(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
+      return unsafe.getInt(base + (index >> 2));
+    }
+    public static int int_index_back_lshift(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
+      return unsafe.getInt(base + (index >>> 2));
+    }
+    public static int long_index_back_ashift(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
+      return unsafe.getInt(base + (index >> 2));
+    }
+    public static int long_index_back_lshift(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
+      return unsafe.getInt(base + (index >>> 2));
+    }
+    public static int int_const_12345678_index(sun.misc.Unsafe unsafe, long base) throws Exception {
+      int idx4 = 0x12345678;
+      return unsafe.getInt(base + idx4);
+    }
+    public static int long_const_1234567890abcdef_index(sun.misc.Unsafe unsafe, long base) throws Exception {
+      long idx5 = 0x1234567890abcdefL;
+      return unsafe.getInt(base + idx5);
+    }
+    public static int int_index_mul(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
+      return unsafe.getInt(base + (index * 4));
+    }
+    public static int long_index_mul(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
+      return unsafe.getInt(base + (index * 4));
+    }
+    public static int int_index_mul_scale_16(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
+      return unsafe.getInt(base + (index * 16));
+    }
+    public static int long_index_mul_scale_16(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
+      return unsafe.getInt(base + (index * 16));
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    sun.misc.Unsafe unsafe = Utils.getUnsafe();
+    final int array_size = 128;
+    final int element_size = 4;
+    final int magic = 0x12345678;
+
+    Random rnd = new Random();
+
+    long array = unsafe.allocateMemory(array_size * element_size); // 128 ints
+    long addr = array + array_size * element_size / 2; // something in the middle to work with
+    unsafe.putInt(addr, magic);
+    for (int j = 0; j < 100000; j++) {
+       if (Tests.int_index(unsafe, addr, 0) != magic) throw new Exception();
+       if (Tests.long_index(unsafe, addr, 0) != magic) throw new Exception();
+       if (Tests.int_index_mul(unsafe, addr, 0) != magic) throw new Exception();
+       if (Tests.long_index_mul(unsafe, addr, 0) != magic) throw new Exception();
+       {
+         long idx1 = rnd.nextLong();
+         long addr1 = addr - (idx1 << 2);
+         if (Tests.long_index(unsafe, addr1, idx1) != magic) throw new Exception();
+       }
+       {
+         long idx2 = rnd.nextLong();
+         long addr2 = addr - (idx2 >> 2);
+         if (Tests.long_index_back_ashift(unsafe, addr2, idx2) != magic) throw new Exception();
+       }
+       {
+         long idx3 = rnd.nextLong();
+         long addr3 = addr - (idx3 >>> 2);
+         if (Tests.long_index_back_lshift(unsafe, addr3, idx3) != magic) throw new Exception();
+       }
+       {
+         long idx4 = 0x12345678;
+         long addr4 = addr - idx4;
+         if (Tests.int_const_12345678_index(unsafe, addr4) != magic) throw new Exception();
+       }
+       {
+         long idx5 = 0x1234567890abcdefL;
+         long addr5 = addr - idx5;
+         if (Tests.long_const_1234567890abcdef_index(unsafe, addr5) != magic) throw new Exception();
+       }
+       {
+         int idx6 = rnd.nextInt();
+         long addr6 = addr - (idx6 >> 2);
+         if (Tests.int_index_back_ashift(unsafe, addr6, idx6) != magic) throw new Exception();
+       }
+       {
+         int idx7 = rnd.nextInt();
+         long addr7 = addr - (idx7 >>> 2);
+         if (Tests.int_index_back_lshift(unsafe, addr7, idx7) != magic) throw new Exception();
+       }
+       {
+         int idx8 = rnd.nextInt();
+         long addr8 = addr - (idx8 * 16);
+         if (Tests.int_index_mul_scale_16(unsafe, addr8, idx8) != magic) throw new Exception();
+       }
+       {
+         long idx9 = rnd.nextLong();
+         long addr9 = addr - (idx9 * 16);
+         if (Tests.long_index_mul_scale_16(unsafe, addr9, idx9) != magic) throw new Exception();
+       }
+    }
+  }
+}
--- a/test/compiler/whitebox/CompilerWhiteBoxTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/whitebox/CompilerWhiteBoxTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -72,9 +72,9 @@
     /** Flag for verbose output, true if {@code -Dverbose} specified */
     protected static final boolean IS_VERBOSE
             = System.getProperty("verbose") != null;
-    /** count of invocation to triger compilation */
+    /** invocation count to trigger compilation */
     protected static final int THRESHOLD;
-    /** count of invocation to triger OSR compilation */
+    /** invocation count to trigger OSR compilation */
     protected static final long BACKEDGE_THRESHOLD;
     /** Value of {@code java.vm.info} (interpreted|mixed|comp mode) */
     protected static final String MODE = System.getProperty("java.vm.info");
@@ -206,7 +206,6 @@
      *                          is compiled, or if {@linkplain #method} has zero
      *                          compilation level.
      */
-
     protected final void checkNotCompiled(int compLevel) {
         if (WHITE_BOX.isMethodQueuedForCompilation(method)) {
             throw new RuntimeException(method + " must not be in queue");
@@ -227,20 +226,30 @@
      *                          compilation level.
      */
     protected final void checkNotCompiled() {
+        checkNotCompiled(true);
+        checkNotCompiled(false);
+    }
+
+    /**
+     * Checks, that {@linkplain #method} is not (OSR-)compiled.
+     *
+     * @param isOsr Check for OSR compilation if true
+     * @throws RuntimeException if {@linkplain #method} is in compiler queue or
+     *                          is compiled, or if {@linkplain #method} has zero
+     *                          compilation level.
+     */
+    protected final void checkNotCompiled(boolean isOsr) {
+        waitBackgroundCompilation();
         if (WHITE_BOX.isMethodQueuedForCompilation(method)) {
             throw new RuntimeException(method + " must not be in queue");
         }
-        if (WHITE_BOX.isMethodCompiled(method, false)) {
-            throw new RuntimeException(method + " must be not compiled");
-        }
-        if (WHITE_BOX.getMethodCompilationLevel(method, false) != 0) {
-            throw new RuntimeException(method + " comp_level must be == 0");
+        if (WHITE_BOX.isMethodCompiled(method, isOsr)) {
+            throw new RuntimeException(method + " must not be " +
+                                       (isOsr ? "osr_" : "") + "compiled");
         }
-        if (WHITE_BOX.isMethodCompiled(method, true)) {
-            throw new RuntimeException(method + " must be not osr_compiled");
-        }
-        if (WHITE_BOX.getMethodCompilationLevel(method, true) != 0) {
-            throw new RuntimeException(method + " osr_comp_level must be == 0");
+        if (WHITE_BOX.getMethodCompilationLevel(method, isOsr) != 0) {
+            throw new RuntimeException(method + (isOsr ? " osr_" : " ") +
+                                       "comp_level must be == 0");
         }
     }
 
@@ -306,12 +315,21 @@
      * Waits for completion of background compilation of {@linkplain #method}.
      */
     protected final void waitBackgroundCompilation() {
+        waitBackgroundCompilation(method);
+    }
+
+    /**
+     * Waits for completion of background compilation of the given executable.
+     *
+     * @param executable Executable
+     */
+    protected static final void waitBackgroundCompilation(Executable executable) {
         if (!BACKGROUND_COMPILATION) {
             return;
         }
         final Object obj = new Object();
         for (int i = 0; i < 10
-                && WHITE_BOX.isMethodQueuedForCompilation(method); ++i) {
+                && WHITE_BOX.isMethodQueuedForCompilation(executable); ++i) {
             synchronized (obj) {
                 try {
                     obj.wait(1000);
@@ -425,14 +443,14 @@
     /** constructor test case */
     CONSTRUCTOR_TEST(Helper.CONSTRUCTOR, Helper.CONSTRUCTOR_CALLABLE, false),
     /** method test case */
-    METOD_TEST(Helper.METHOD, Helper.METHOD_CALLABLE, false),
+    METHOD_TEST(Helper.METHOD, Helper.METHOD_CALLABLE, false),
     /** static method test case */
     STATIC_TEST(Helper.STATIC, Helper.STATIC_CALLABLE, false),
     /** OSR constructor test case */
     OSR_CONSTRUCTOR_TEST(Helper.OSR_CONSTRUCTOR,
             Helper.OSR_CONSTRUCTOR_CALLABLE, true),
     /** OSR method test case */
-    OSR_METOD_TEST(Helper.OSR_METHOD, Helper.OSR_METHOD_CALLABLE, true),
+    OSR_METHOD_TEST(Helper.OSR_METHOD, Helper.OSR_METHOD_CALLABLE, true),
     /** OSR static method test case */
     OSR_STATIC_TEST(Helper.OSR_STATIC, Helper.OSR_STATIC_CALLABLE, true);
 
@@ -494,7 +512,7 @@
                 = new Callable<Integer>() {
             @Override
             public Integer call() throws Exception {
-                return new Helper(null).hashCode();
+                return new Helper(null, CompilerWhiteBoxTest.BACKEDGE_THRESHOLD).hashCode();
             }
         };
 
@@ -504,7 +522,7 @@
 
             @Override
             public Integer call() throws Exception {
-                return helper.osrMethod();
+                return helper.osrMethod(CompilerWhiteBoxTest.BACKEDGE_THRESHOLD);
             }
         };
 
@@ -512,7 +530,7 @@
                 = new Callable<Integer>() {
             @Override
             public Integer call() throws Exception {
-                return osrStaticMethod();
+                return osrStaticMethod(CompilerWhiteBoxTest.BACKEDGE_THRESHOLD);
             }
         };
 
@@ -532,25 +550,24 @@
             }
             try {
                 OSR_CONSTRUCTOR = Helper.class.getDeclaredConstructor(
-                        Object.class);
+                        Object.class, long.class);
             } catch (NoSuchMethodException | SecurityException e) {
                 throw new RuntimeException(
-                        "exception on getting method Helper.<init>(Object)", e);
+                        "exception on getting method Helper.<init>(Object, long)", e);
             }
             METHOD = getMethod("method");
             STATIC = getMethod("staticMethod");
-            OSR_METHOD = getMethod("osrMethod");
-            OSR_STATIC = getMethod("osrStaticMethod");
+            OSR_METHOD = getMethod("osrMethod", long.class);
+            OSR_STATIC = getMethod("osrStaticMethod", long.class);
         }
 
-        private static Method getMethod(String name) {
+        private static Method getMethod(String name, Class<?>... parameterTypes) {
             try {
-                return Helper.class.getDeclaredMethod(name);
+                return Helper.class.getDeclaredMethod(name, parameterTypes);
             } catch (NoSuchMethodException | SecurityException e) {
                 throw new RuntimeException(
                         "exception on getting method Helper." + name, e);
             }
-
         }
 
         private static int staticMethod() {
@@ -561,17 +578,84 @@
             return 42;
         }
 
-        private static int osrStaticMethod() {
+        /**
+         * Deoptimizes all non-osr versions of the given executable after
+         * compilation finished.
+         *
+         * @param e Executable
+         * @throws Exception
+         */
+        private static void waitAndDeoptimize(Executable e) {
+            CompilerWhiteBoxTest.waitBackgroundCompilation(e);
+            if (WhiteBox.getWhiteBox().isMethodQueuedForCompilation(e)) {
+                throw new RuntimeException(e + " must not be in queue");
+            }
+            // Deoptimize non-osr versions of executable
+            WhiteBox.getWhiteBox().deoptimizeMethod(e, false);
+        }
+
+        /**
+         * Executes the method multiple times to make sure we have
+         * enough profiling information before triggering an OSR
+         * compilation. Otherwise the C2 compiler may add uncommon traps.
+         *
+         * @param m Method to be executed
+         * @return Number of times the method was executed
+         * @throws Exception
+         */
+        private static int warmup(Method m) throws Exception {
+            waitAndDeoptimize(m);
+            Helper helper = new Helper();
             int result = 0;
-            for (long i = 0; i < CompilerWhiteBoxTest.BACKEDGE_THRESHOLD; ++i) {
+            for (long i = 0; i < CompilerWhiteBoxTest.THRESHOLD; ++i) {
+                result += (int)m.invoke(helper, 1);
+            }
+            // Wait to make sure OSR compilation is not blocked by
+            // non-OSR compilation in the compile queue
+            CompilerWhiteBoxTest.waitBackgroundCompilation(m);
+            return result;
+        }
+
+        /**
+         * Executes the constructor multiple times to make sure we
+         * have enough profiling information before triggering an OSR
+         * compilation. Otherwise the C2 compiler may add uncommon traps.
+         *
+         * @param c Constructor to be executed
+         * @return Number of times the constructor was executed
+         * @throws Exception
+         */
+        private static int warmup(Constructor c) throws Exception {
+            waitAndDeoptimize(c);
+            int result = 0;
+            for (long i = 0; i < CompilerWhiteBoxTest.THRESHOLD; ++i) {
+                result += c.newInstance(null, 1).hashCode();
+            }
+            // Wait to make sure OSR compilation is not blocked by
+            // non-OSR compilation in the compile queue
+            CompilerWhiteBoxTest.waitBackgroundCompilation(c);
+            return result;
+        }
+
+        private static int osrStaticMethod(long limit) throws Exception {
+            int result = 0;
+            if (limit != 1) {
+                result = warmup(OSR_STATIC);
+            }
+            // Trigger osr compilation
+            for (long i = 0; i < limit; ++i) {
                 result += staticMethod();
             }
             return result;
         }
 
-        private int osrMethod() {
+        private int osrMethod(long limit) throws Exception {
             int result = 0;
-            for (long i = 0; i < CompilerWhiteBoxTest.BACKEDGE_THRESHOLD; ++i) {
+            if (limit != 1) {
+                result = warmup(OSR_METHOD);
+            }
+            // Trigger osr compilation
+            for (long i = 0; i < limit; ++i) {
                 result += method();
             }
             return result;
@@ -585,9 +669,13 @@
         }
 
         // for OSR constructor test case
-        private Helper(Object o) {
+        private Helper(Object o, long limit) throws Exception {
             int result = 0;
-            for (long i = 0; i < CompilerWhiteBoxTest.BACKEDGE_THRESHOLD; ++i) {
+            if (limit != 1) {
+                result = warmup(OSR_CONSTRUCTOR);
+            }
+            // Trigger osr compilation
+            for (long i = 0; i < limit; ++i) {
                 result += method();
             }
             x = result;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/whitebox/DeoptimizeMultipleOSRTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.hotspot.WhiteBox;
+import java.lang.reflect.Executable;
+import java.lang.reflect.Method;
+
+/*
+ * @test DeoptimizeMultipleOSRTest
+ * @bug 8061817
+ * @library /testlibrary /testlibrary/whitebox
+ * @build DeoptimizeMultipleOSRTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,DeoptimizeMultipleOSRTest::triggerOSR DeoptimizeMultipleOSRTest
+ * @summary testing of WB::deoptimizeMethod()
+ */
+public class DeoptimizeMultipleOSRTest {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static final long BACKEDGE_THRESHOLD = 150000;
+    private Method method;
+    private int counter = 0;
+
+    public static void main(String[] args) throws Exception {
+        DeoptimizeMultipleOSRTest test = new DeoptimizeMultipleOSRTest();
+        test.test();
+    }
+
+    /**
+     * Triggers two different OSR compilations for the same method and
+     * checks if WhiteBox.deoptimizeMethod() deoptimizes both.
+     *
+     * @throws Exception
+     */
+    public void test() throws Exception {
+        method = DeoptimizeMultipleOSRTest.class.getDeclaredMethod("triggerOSR", boolean.class, long.class);
+        // Trigger two OSR compiled versions
+        triggerOSR(true, BACKEDGE_THRESHOLD);
+        triggerOSR(false, BACKEDGE_THRESHOLD);
+        // Wait for compilation
+        CompilerWhiteBoxTest.waitBackgroundCompilation(method);
+        // Deoptimize
+        WHITE_BOX.deoptimizeMethod(method, true);
+        if (WHITE_BOX.isMethodCompiled(method, true)) {
+            throw new AssertionError("Not all OSR compiled versions were deoptimized");
+        }
+    }
+
+    /**
+     * Triggers OSR compilations by executing loops.
+     *
+     * @param first Determines which loop to execute
+     * @param limit The number of loop iterations
+     */
+    public void triggerOSR(boolean first, long limit) {
+        if (limit != 1) {
+            // Warmup method to avoid uncommon traps
+            for (int i = 0; i < limit; ++i) {
+                triggerOSR(first, 1);
+            }
+            CompilerWhiteBoxTest.waitBackgroundCompilation(method);
+        }
+        if (first) {
+            // Trigger OSR compilation 1
+            for (int i = 0; i < limit; ++i) {
+                counter++;
+            }
+        } else {
+            // Trigger OSR compilation 2
+            for (int i = 0; i < limit; ++i) {
+                counter++;
+            }
+        }
+    }
+}
--- a/test/compiler/whitebox/IsMethodCompilableTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/whitebox/IsMethodCompilableTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -28,7 +28,7 @@
  * @build IsMethodCompilableTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main ClassFileInstaller com.oracle.java.testlibrary.Platform
- * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
+ * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -Xmixed -XX:-TieredCompilation -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
  * @summary testing of WB::isMethodCompilable()
  * @author igor.ignatyev@oracle.com
  */
--- a/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -131,14 +131,15 @@
             throw new RuntimeException(method
                     + " is not compilable after clearMethodState()");
         }
-
+        // Make method not (OSR-)compilable (depending on testCase.isOsr())
         makeNotCompilable();
         if (isCompilable()) {
             throw new RuntimeException(method + " must be not compilable");
         }
-
+        // Try to (OSR-)compile method
         compile();
-        checkNotCompiled();
+        // Method should not be (OSR-)compiled
+        checkNotCompiled(testCase.isOsr());
         if (isCompilable()) {
             throw new RuntimeException(method + " must be not compilable");
         }
--- a/test/gc/arguments/TestDynMaxHeapFreeRatio.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/gc/arguments/TestDynMaxHeapFreeRatio.java	Fri Apr 10 16:58:26 2015 -0700
@@ -21,6 +21,11 @@
  * questions.
  */
 
+import static com.oracle.java.testlibrary.Asserts.assertEQ;
+import static com.oracle.java.testlibrary.Asserts.assertFalse;
+import static com.oracle.java.testlibrary.Asserts.assertTrue;
+import com.oracle.java.testlibrary.DynamicVMOption;
+
 /**
  * @test TestDynMaxHeapFreeRatio
  * @bug 8028391
@@ -33,32 +38,45 @@
  * @run main/othervm -XX:MinHeapFreeRatio=51 -XX:MaxHeapFreeRatio=52 TestDynMaxHeapFreeRatio
  * @run main/othervm -XX:MinHeapFreeRatio=75 -XX:MaxHeapFreeRatio=100 TestDynMaxHeapFreeRatio
  */
-import com.oracle.java.testlibrary.TestDynamicVMOption;
-import com.oracle.java.testlibrary.DynamicVMOptionChecker;
-
-public class TestDynMaxHeapFreeRatio extends TestDynamicVMOption {
-
-    public static final String MinFreeRatioFlagName = "MinHeapFreeRatio";
-    public static final String MaxFreeRatioFlagName = "MaxHeapFreeRatio";
-
-    public TestDynMaxHeapFreeRatio() {
-        super(MaxFreeRatioFlagName);
-    }
-
-    public void test() {
-
-        int minHeapFreeValue = DynamicVMOptionChecker.getIntValue(MinFreeRatioFlagName);
-        System.out.println(MinFreeRatioFlagName + " = " + minHeapFreeValue);
-
-        testPercentageValues();
-
-        checkInvalidValue(Integer.toString(minHeapFreeValue - 1));
-        checkValidValue(Integer.toString(minHeapFreeValue));
-        checkValidValue("100");
-    }
+public class TestDynMaxHeapFreeRatio {
 
     public static void main(String args[]) throws Exception {
-        new TestDynMaxHeapFreeRatio().test();
+
+        // low boundary value
+        int minValue = DynamicVMOption.getInt("MinHeapFreeRatio");
+        System.out.println("MinHeapFreeRatio= " + minValue);
+
+        String badValues[] = {
+            null,
+            "",
+            "not a number",
+            "8.5", "-0.01",
+            Integer.toString(Integer.MIN_VALUE),
+            Integer.toString(Integer.MAX_VALUE),
+            Integer.toString(minValue - 1),
+            "-1024", "-1", "101", "1997"
+        };
+
+        String goodValues[] = {
+            Integer.toString(minValue),
+            Integer.toString(minValue + 1),
+            Integer.toString((minValue + 100) / 2),
+            "99", "100"
+        };
+
+        DynamicVMOption option = new DynamicVMOption("MaxHeapFreeRatio");
+
+        assertTrue(option.isWriteable(), "Option " + option.name
+                + " is expected to be writable");
+
+        for (String v : badValues) {
+            assertFalse(option.isValidValue(v),
+                    "'" + v + "' is expected to be illegal for flag " + option.name);
+        }
+        for (String v : goodValues) {
+            option.setValue(v);
+            String newValue = option.getValue();
+            assertEQ(v, newValue);
+        }
     }
-
 }
--- a/test/gc/arguments/TestDynMinHeapFreeRatio.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/gc/arguments/TestDynMinHeapFreeRatio.java	Fri Apr 10 16:58:26 2015 -0700
@@ -33,30 +33,52 @@
  * @run main/othervm -XX:MinHeapFreeRatio=51 -XX:MaxHeapFreeRatio=52 TestDynMinHeapFreeRatio
  * @run main/othervm -XX:MinHeapFreeRatio=75 -XX:MaxHeapFreeRatio=100 TestDynMinHeapFreeRatio
  */
-import com.oracle.java.testlibrary.TestDynamicVMOption;
-import com.oracle.java.testlibrary.DynamicVMOptionChecker;
-
-public class TestDynMinHeapFreeRatio extends TestDynamicVMOption {
-
-    public static final String MinFreeRatioFlagName = "MinHeapFreeRatio";
-    public static final String MaxFreeRatioFlagName = "MaxHeapFreeRatio";
+import static com.oracle.java.testlibrary.Asserts.assertEQ;
+import static com.oracle.java.testlibrary.Asserts.assertFalse;
+import static com.oracle.java.testlibrary.Asserts.assertTrue;
+import com.oracle.java.testlibrary.DynamicVMOption;
 
-    public TestDynMinHeapFreeRatio() {
-        super(MinFreeRatioFlagName);
-    }
-
-    public void test() {
-        int maxHeapFreeValue = DynamicVMOptionChecker.getIntValue(MaxFreeRatioFlagName);
-        System.out.println(MaxFreeRatioFlagName + " = " + maxHeapFreeValue);
-
-        testPercentageValues();
-
-        checkInvalidValue(Integer.toString(maxHeapFreeValue + 1));
-        checkValidValue(Integer.toString(maxHeapFreeValue));
-        checkValidValue("0");
-    }
+public class TestDynMinHeapFreeRatio {
 
     public static void main(String args[]) throws Exception {
-        new TestDynMinHeapFreeRatio().test();
+
+        // high boundary value
+        int maxValue = DynamicVMOption.getInt("MaxHeapFreeRatio");
+        System.out.println("MaxHeapFreeRatio= " + maxValue);
+
+        String badValues[] = {
+            null,
+            "",
+            "not a number",
+            "8.5", "-0.01",
+            Integer.toString(Integer.MIN_VALUE),
+            Integer.toString(Integer.MAX_VALUE),
+            Integer.toString(maxValue + 1),
+            "-1024", "-1", "101", "1997"
+        };
+
+        String goodValues[] = {
+            Integer.toString(maxValue),
+            Integer.toString(maxValue - 1),
+            Integer.toString(maxValue / 2),
+            "0", "1"
+        };
+
+        // option under test
+        DynamicVMOption option = new DynamicVMOption("MinHeapFreeRatio");
+
+        assertTrue(option.isWriteable(), "Option " + option.name
+                + " is expected to be writable");
+
+        for (String v : badValues) {
+            assertFalse(option.isValidValue(v),
+                    "'" + v + "' is expected to be illegal for flag " + option.name);
+        }
+
+        for (String v : goodValues) {
+            option.setValue(v);
+            String newValue = option.getValue();
+            assertEQ(v, newValue);
+        }
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/arguments/TestG1ConcRefinementThreads.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,97 @@
+/*
+* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test TestG1ConcRefinementThreads
+ * @key gc
+ * @bug 8047976
+ * @summary Tests argument processing for G1ConcRefinementThreads
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.util.*;
+import java.util.regex.*;
+
+public class TestG1ConcRefinementThreads {
+
+  static final int AUTO_SELECT_THREADS_COUNT = 0;
+  static final int PASSED_THREADS_COUNT = 11;
+
+  public static void main(String args[]) throws Exception {
+    // default case
+    runG1ConcRefinementThreadsTest(
+        new String[]{}, // automatically selected
+        AUTO_SELECT_THREADS_COUNT /* use default setting */);
+
+    // zero setting case
+    runG1ConcRefinementThreadsTest(
+        new String[]{"-XX:G1ConcRefinementThreads=0"}, // automatically selected
+        AUTO_SELECT_THREADS_COUNT /* set to zero */);
+
+    // non-zero sestting case
+    runG1ConcRefinementThreadsTest(
+        new String[]{"-XX:G1ConcRefinementThreads="+Integer.toString(PASSED_THREADS_COUNT)},
+        PASSED_THREADS_COUNT);
+  }
+
+  private static void runG1ConcRefinementThreadsTest(String[] passedOpts,
+          int expectedValue) throws Exception {
+    List<String> vmOpts = new ArrayList<>();
+    if (passedOpts.length > 0) {
+      Collections.addAll(vmOpts, passedOpts);
+    }
+    Collections.addAll(vmOpts, "-XX:+UseG1GC", "-XX:+PrintFlagsFinal", "-version");
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(vmOpts.toArray(new String[vmOpts.size()]));
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    output.shouldHaveExitValue(0);
+    String stdout = output.getStdout();
+    checkG1ConcRefinementThreadsConsistency(stdout, expectedValue);
+  }
+
+  private static void checkG1ConcRefinementThreadsConsistency(String output, int expectedValue) {
+    int actualValue = getIntValue("G1ConcRefinementThreads", output);
+
+    if (expectedValue == 0) {
+      // If expectedValue is automatically selected, set it same as ParallelGCThreads.
+      expectedValue = getIntValue("ParallelGCThreads", output);
+    }
+
+    if (expectedValue != actualValue) {
+      throw new RuntimeException(
+            "Actual G1ConcRefinementThreads(" + Integer.toString(actualValue)
+            + ") is not equal to expected value(" + Integer.toString(expectedValue) + ")");
+    }
+  }
+
+  public static int getIntValue(String flag, String where) {
+    Matcher m = Pattern.compile(flag + "\\s+:?=\\s+\\d+").matcher(where);
+    if (!m.find()) {
+      throw new RuntimeException("Could not find value for flag " + flag + " in output string");
+    }
+    String match = m.group();
+    return Integer.parseInt(match.substring(match.lastIndexOf(" ") + 1, match.length()));
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/arguments/TestSurvivorAlignmentInBytesOption.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.ExitCode;
+import com.oracle.java.testlibrary.cli.CommandLineOptionTest;
+
+/**
+ * @test
+ * @bug 8031323
+ * @summary Verify SurvivorAlignmentInBytes option processing.
+ * @library /testlibrary
+ * @requires vm.opt.SurvivorAlignmentInBytes == null
+ *           & vm.opt.ObjectAlignmentInBytes == null
+ *           & vm.opt.UnlockExperimentalVMOptions == null
+ *           & (vm.opt.IgnoreUnrecognizedVMOptions == null
+ *              | vm.opt.IgnoreUnrecognizedVMOptions == "false")
+ * @run main TestSurvivorAlignmentInBytesOption
+ */
+public class TestSurvivorAlignmentInBytesOption {
+    public static void main(String args[]) throws Throwable {
+        String optionName = "SurvivorAlignmentInBytes";
+        String unlockExperimentalVMOpts = "UnlockExperimentalVMOptions";
+        String optionIsExperimental
+                = CommandLineOptionTest.getExperimentalOptionErrorMessage(
+                optionName);
+        String valueIsTooSmall= ".*SurvivorAlignmentInBytes=.*must be greater"
+                + " than ObjectAlignmentInBytes.*";
+        String mustBePowerOf2 = ".*SurvivorAlignmentInBytes=.*must be "
+                + "power of 2.*";
+
+        // Verify that without -XX:+UnlockExperimentalVMOptions usage of
+        // SurvivorAlignmentInBytes option will cause JVM startup failure
+        // with the warning message saying that that option is experimental.
+        CommandLineOptionTest.verifyJVMStartup(
+                new String[]{optionIsExperimental}, null, ExitCode.FAIL, false,
+                "-XX:-UnlockExperimentalVMOptions",
+                CommandLineOptionTest.prepareBooleanFlag(
+                        unlockExperimentalVMOpts, false),
+                CommandLineOptionTest.prepareNumericFlag(optionName, 64));
+
+        // Verify that with -XX:+UnlockExperimentalVMOptions passed to JVM
+        // usage of SurvivorAlignmentInBytes option won't cause JVM startup
+        // failure.
+        CommandLineOptionTest.verifyJVMStartup(
+                null, new String[]{optionIsExperimental}, ExitCode.OK, false,
+                CommandLineOptionTest.prepareBooleanFlag(
+                        unlockExperimentalVMOpts, true),
+                CommandLineOptionTest.prepareNumericFlag(optionName, 64));
+
+        // Verify that if specified SurvivorAlignmentInBytes is lower then
+        // ObjectAlignmentInBytes, then the JVM startup will fail with
+        // appropriate error message.
+        CommandLineOptionTest.verifyJVMStartup(
+                new String[]{valueIsTooSmall}, null, ExitCode.FAIL, false,
+                CommandLineOptionTest.prepareBooleanFlag(
+                        unlockExperimentalVMOpts, true),
+                CommandLineOptionTest.prepareNumericFlag(optionName, 2));
+
+        // Verify that if specified SurvivorAlignmentInBytes value is not
+        // a power of 2 then the JVM startup will fail with appropriate error
+        // message.
+        CommandLineOptionTest.verifyJVMStartup(
+                new String[]{mustBePowerOf2}, null, ExitCode.FAIL, false,
+                CommandLineOptionTest.prepareBooleanFlag(
+                        unlockExperimentalVMOpts, true),
+                CommandLineOptionTest.prepareNumericFlag(optionName, 127));
+
+        // Verify that if SurvivorAlignmentInBytes has correct value, then
+        // the JVM will be started without errors.
+        CommandLineOptionTest.verifyJVMStartup(
+                null, new String[]{".*SurvivorAlignmentInBytes.*"},
+                ExitCode.OK, false,
+                CommandLineOptionTest.prepareBooleanFlag(
+                        unlockExperimentalVMOpts, true),
+                CommandLineOptionTest.prepareNumericFlag(optionName, 128));
+
+        // Verify that we can setup different SurvivorAlignmentInBytes values.
+        for (int alignment = 32; alignment <= 128; alignment *= 2) {
+            CommandLineOptionTest.verifyOptionValue(optionName,
+                    Integer.toString(alignment),
+                    CommandLineOptionTest.prepareBooleanFlag(
+                            unlockExperimentalVMOpts, true),
+                    CommandLineOptionTest.prepareNumericFlag(
+                            optionName, alignment));
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key gc
+ * @bug 8049831
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestCMSClassUnloadingEnabledHWM
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run driver TestCMSClassUnloadingEnabledHWM
+ * @summary Test that -XX:-CMSClassUnloadingEnabled will trigger a Full GC when more than MetaspaceSize metadata is allocated.
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+import java.lang.management.GarbageCollectorMXBean;
+import java.lang.management.ManagementFactory;
+import java.util.ArrayList;
+import java.util.Arrays;
+import sun.hotspot.WhiteBox;
+
+public class TestCMSClassUnloadingEnabledHWM {
+  private static long MetaspaceSize = 32 * 1024 * 1024;
+  private static long YoungGenSize  = 32 * 1024 * 1024;
+
+  private static OutputAnalyzer run(boolean enableUnloading) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+      "-Xbootclasspath/a:.",
+      "-XX:+UnlockDiagnosticVMOptions",
+      "-XX:+WhiteBoxAPI",
+      "-Xmx128m",
+      "-XX:CMSMaxAbortablePrecleanTime=1",
+      "-XX:CMSWaitDuration=50",
+      "-XX:MetaspaceSize=" + MetaspaceSize,
+      "-Xmn" + YoungGenSize,
+      "-XX:+UseConcMarkSweepGC",
+      "-XX:" + (enableUnloading ? "+" : "-") + "CMSClassUnloadingEnabled",
+      "-XX:+PrintHeapAtGC",
+      "-XX:+PrintGCDetails",
+      "-XX:+PrintGCTimeStamps",
+      TestCMSClassUnloadingEnabledHWM.AllocateBeyondMetaspaceSize.class.getName(),
+      "" + MetaspaceSize);
+    return new OutputAnalyzer(pb.start());
+  }
+
+  public static OutputAnalyzer runWithCMSClassUnloading() throws Exception {
+    return run(true);
+  }
+
+  public static OutputAnalyzer runWithoutCMSClassUnloading() throws Exception {
+    return run(false);
+  }
+
+  public static void testWithoutCMSClassUnloading() throws Exception {
+    // -XX:-CMSClassUnloadingEnabled is used, so we expect a full GC instead of a concurrent cycle.
+    OutputAnalyzer out = runWithoutCMSClassUnloading();
+
+    out.shouldMatch(".*Full GC.*");
+    out.shouldNotMatch(".*CMS Initial Mark.*");
+  }
+
+  public static void testWithCMSClassUnloading() throws Exception {
+    // -XX:+CMSClassUnloadingEnabled is used, so we expect a concurrent cycle instead of a full GC.
+    OutputAnalyzer out = runWithCMSClassUnloading();
+
+    out.shouldMatch(".*CMS Initial Mark.*");
+    out.shouldNotMatch(".*Full GC.*");
+  }
+
+  public static void main(String args[]) throws Exception {
+    testWithCMSClassUnloading();
+    testWithoutCMSClassUnloading();
+  }
+
+  public static class AllocateBeyondMetaspaceSize {
+    public static void main(String [] args) throws Exception {
+      if (args.length != 1) {
+        throw new IllegalArgumentException("Usage: <MetaspaceSize>");
+      }
+
+      WhiteBox wb = WhiteBox.getWhiteBox();
+
+      // Allocate past the MetaspaceSize limit.
+      long metaspaceSize = Long.parseLong(args[0]);
+      long allocationBeyondMetaspaceSize  = metaspaceSize * 2;
+      long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
+
+      // Wait for at least one GC to occur. The caller will parse the log files produced.
+      GarbageCollectorMXBean cmsGCBean = getCMSGCBean();
+      while (cmsGCBean.getCollectionCount() == 0) {
+        Thread.sleep(100);
+      }
+
+      wb.freeMetaspace(null, metaspace, metaspace);
+    }
+
+    private static GarbageCollectorMXBean getCMSGCBean() {
+      for (GarbageCollectorMXBean gcBean : ManagementFactory.getGarbageCollectorMXBeans()) {
+        if (gcBean.getObjectName().toString().equals("java.lang:type=GarbageCollector,name=ConcurrentMarkSweep")) {
+          return gcBean;
+        }
+      }
+      return null;
+    }
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/class_unloading/TestG1ClassUnloadingHWM.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key gc
+ * @bug 8049831
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestG1ClassUnloadingHWM
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run driver TestG1ClassUnloadingHWM
+ * @summary Test that -XX:-ClassUnloadingWithConcurrentMark will trigger a Full GC when more than MetaspaceSize metadata is allocated.
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+import java.util.ArrayList;
+import java.util.Arrays;
+import sun.hotspot.WhiteBox;
+
+public class TestG1ClassUnloadingHWM {
+  private static long MetaspaceSize = 32 * 1024 * 1024;
+  private static long YoungGenSize  = 32 * 1024 * 1024;
+
+  private static OutputAnalyzer run(boolean enableUnloading) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+      "-Xbootclasspath/a:.",
+      "-XX:+UnlockDiagnosticVMOptions",
+      "-XX:+WhiteBoxAPI",
+      "-XX:MetaspaceSize=" + MetaspaceSize,
+      "-Xmn" + YoungGenSize,
+      "-XX:+UseG1GC",
+      "-XX:" + (enableUnloading ? "+" : "-") + "ClassUnloadingWithConcurrentMark",
+      "-XX:+PrintHeapAtGC",
+      "-XX:+PrintGCDetails",
+      TestG1ClassUnloadingHWM.AllocateBeyondMetaspaceSize.class.getName(),
+      "" + MetaspaceSize,
+      "" + YoungGenSize);
+    return new OutputAnalyzer(pb.start());
+  }
+
+  public static OutputAnalyzer runWithG1ClassUnloading() throws Exception {
+    return run(true);
+  }
+
+  public static OutputAnalyzer runWithoutG1ClassUnloading() throws Exception {
+    return run(false);
+  }
+
+  public static void testWithoutG1ClassUnloading() throws Exception {
+    // -XX:-ClassUnloadingWithConcurrentMark is used, so we expect a full GC instead of a concurrent cycle.
+    OutputAnalyzer out = runWithoutG1ClassUnloading();
+
+    out.shouldMatch(".*Full GC.*");
+    out.shouldNotMatch(".*initial-mark.*");
+  }
+
+  public static void testWithG1ClassUnloading() throws Exception {
+    // -XX:+ClassUnloadingWithConcurrentMark is used, so we expect a concurrent cycle instead of a full GC.
+    OutputAnalyzer out = runWithG1ClassUnloading();
+
+    out.shouldMatch(".*initial-mark.*");
+    out.shouldNotMatch(".*Full GC.*");
+  }
+
+  public static void main(String args[]) throws Exception {
+    testWithG1ClassUnloading();
+    testWithoutG1ClassUnloading();
+  }
+
+  public static class AllocateBeyondMetaspaceSize {
+    public static Object dummy;
+
+    public static void main(String [] args) throws Exception {
+      if (args.length != 2) {
+        throw new IllegalArgumentException("Usage: <MetaspaceSize> <YoungGenSize>");
+      }
+
+      WhiteBox wb = WhiteBox.getWhiteBox();
+
+      // Allocate past the MetaspaceSize limit
+      long metaspaceSize = Long.parseLong(args[0]);
+      long allocationBeyondMetaspaceSize  = metaspaceSize * 2;
+      long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
+
+      long youngGenSize = Long.parseLong(args[1]);
+      triggerYoungGCs(youngGenSize);
+
+      wb.freeMetaspace(null, metaspace, metaspace);
+    }
+
+    public static void triggerYoungGCs(long youngGenSize) {
+      long approxAllocSize = 32 * 1024;
+      long numAllocations  = 2 * youngGenSize / approxAllocSize;
+
+      for (long i = 0; i < numAllocations; i++) {
+        dummy = new byte[(int)approxAllocSize];
+      }
+    }
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/concurrentMarkSweep/DisableResizePLAB.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,44 @@
+/*
+* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test DisableResizePLAB
+ * @key gc
+ * @bug 8060467
+ * @author filipp.zhinkin@oracle.com, john.coomes@oracle.com
+ * @summary Run CMS with PLAB resizing disabled and a small OldPLABSize
+ * @run main/othervm -XX:+UseConcMarkSweepGC -XX:-ResizePLAB -XX:OldPLABSize=1k -Xmx256m -XX:+PrintGCDetails DisableResizePLAB
+ */
+
+public class DisableResizePLAB {
+    public static void main(String args[]) throws Exception {
+        Object garbage[] = new Object[1_000];
+        for (int i = 0; i < garbage.length; i++) {
+            garbage[i] = new byte[0];
+        }
+        long startTime = System.currentTimeMillis();
+        while (System.currentTimeMillis() - startTime < 10_000) {
+            Object o = new byte[1024];
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestEagerReclaimHumongousRegions.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestEagerReclaimHumongousRegions
+ * @bug 8027959
+ * @summary Test to make sure that eager reclaim of humongous objects work. We simply try to fill
+ * up the heap with humongous objects that should be eagerly reclaimable to avoid Full GC.
+ * @key gc
+ * @library /testlibrary
+ */
+
+import java.util.regex.Pattern;
+import java.util.regex.Matcher;
+import java.util.LinkedList;
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.Asserts;
+
+class ReclaimRegionFast {
+    public static final int M = 1024*1024;
+
+    public static LinkedList<Object> garbageList = new LinkedList<Object>();
+
+    public static void genGarbage() {
+        for (int i = 0; i < 32*1024; i++) {
+            garbageList.add(new int[100]);
+        }
+        garbageList.clear();
+    }
+
+    // A large object referenced by a static.
+    static int[] filler = new int[10 * M];
+
+    public static void main(String[] args) {
+
+        int[] large = new int[M];
+
+        Object ref_from_stack = large;
+
+        for (int i = 0; i < 100; i++) {
+            // A large object that will be reclaimed eagerly.
+            large = new int[6*M];
+            genGarbage();
+            // Make sure that the compiler cannot completely remove
+            // the allocation of the large object until here.
+            System.out.println(large);
+        }
+
+        // Keep the reference to the first object alive.
+        System.out.println(ref_from_stack);
+    }
+}
+
+public class TestEagerReclaimHumongousRegions {
+    public static void main(String[] args) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UseG1GC",
+            "-Xms128M",
+            "-Xmx128M",
+            "-Xmn16M",
+            "-XX:+PrintGC",
+            ReclaimRegionFast.class.getName());
+
+        Pattern p = Pattern.compile("Full GC");
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+        int found = 0;
+        Matcher m = p.matcher(output.getStdout());
+        while (m.find()) { found++; }
+        System.out.println("Issued " + found + " Full GCs");
+        Asserts.assertLT(found, 10, "Found that " + found + " Full GCs were issued. This is larger than the bound. Eager reclaim seems to not work at all");
+
+        output.shouldHaveExitValue(0);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestEagerReclaimHumongousRegions2.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestEagerReclaimHumongousRegions2
+ * @bug 8051973
+ * @summary Test to make sure that eager reclaim of humongous objects correctly clears
+ * mark bitmaps at reclaim.
+ * @key gc
+ * @library /testlibrary
+ */
+
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.Random;
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+// An object that has a few references to other instances to slow down marking.
+class ObjectWithSomeRefs {
+    public ObjectWithSomeRefs other1;
+    public ObjectWithSomeRefs other2;
+    public ObjectWithSomeRefs other3;
+    public ObjectWithSomeRefs other4;
+}
+
+class ReclaimRegionFast {
+    public static final long MAX_MILLIS_FOR_RUN = 50 * 1000; // The maximum runtime for the actual test.
+
+    public static final int M = 1024*1024;
+
+    public static LinkedList<Object> garbageList = new LinkedList<Object>();
+
+    public static void genGarbage(Object large) {
+        for (int i = 0; i < 64*1024; i++) {
+            Object[] garbage = new Object[50];
+            garbage[0] = large;
+            garbageList.add(garbage);
+        }
+        garbageList.clear();
+    }
+
+    public static ArrayList<ObjectWithSomeRefs> longList = new ArrayList<ObjectWithSomeRefs>();
+
+    public static void main(String[] args) {
+
+        for (int i = 0; i < 16*1024; i++) {
+             longList.add(new ObjectWithSomeRefs());
+        }
+
+        Random rnd = new Random();
+        for (int i = 0; i < longList.size(); i++) {
+             int len = longList.size();
+             longList.get(i).other1 = longList.get(rnd.nextInt(len));
+             longList.get(i).other2 = longList.get(rnd.nextInt(len));
+             longList.get(i).other3 = longList.get(rnd.nextInt(len));
+             longList.get(i).other4 = longList.get(rnd.nextInt(len));
+        }
+
+        int[] large1 = new int[M];
+        int[] large2 = null;
+        int[] large3 = null;
+        int[] large4 = null;
+
+        Object ref_from_stack = large1;
+
+        long start_millis = System.currentTimeMillis();
+
+        for (int i = 0; i < 20; i++) {
+            long current_millis = System.currentTimeMillis();
+            if ((current_millis - start_millis) > MAX_MILLIS_FOR_RUN) {
+              System.out.println("Finishing test because maximum runtime exceeded");
+              break;
+            }
+            // A set of large objects that will be reclaimed eagerly - and hopefully marked.
+            large1 = new int[M - 20];
+            large2 = new int[M - 20];
+            large3 = new int[M - 20];
+            large4 = new int[M - 20];
+            genGarbage(large1);
+            // Make sure that the compiler cannot completely remove
+            // the allocation of the large object until here.
+            System.out.println(large1 + " " + large2 + " " + large3 + " " + large4);
+        }
+
+        // Keep the reference to the first object alive.
+        System.out.println(ref_from_stack);
+    }
+}
+
+public class TestEagerReclaimHumongousRegions2 {
+    public static void main(String[] args) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:+UseG1GC",
+            "-Xms128M",
+            "-Xmx128M",
+            "-Xmn2M",
+            "-XX:G1HeapRegionSize=1M",
+            "-XX:InitiatingHeapOccupancyPercent=0", // Want to have as much as possible initial marks.
+            "-XX:+PrintGC",
+            "-XX:+VerifyAfterGC",
+            "-XX:ConcGCThreads=1", // Want to make marking as slow as possible.
+            "-XX:+IgnoreUnrecognizedVMOptions", // G1VerifyBitmaps is develop only.
+            "-XX:+G1VerifyBitmaps",
+            ReclaimRegionFast.class.getName());
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+    }
+}
+
--- a/test/gc/g1/TestGCLogMessages.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/gc/g1/TestGCLogMessages.java	Fri Apr 10 16:58:26 2015 -0700
@@ -22,8 +22,8 @@
  */
 
 /*
- * @test TestPrintGCDetails
- * @bug 8035406 8027295 8035398
+ * @test TestGCLogMessages
+ * @bug 8035406 8027295 8035398 8019342 8027959
  * @summary Ensure that the PrintGCDetails output for a minor GC with G1
  * includes the expected necessary messages.
  * @key gc
@@ -48,10 +48,13 @@
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
 
     output.shouldNotContain("[Redirty Cards");
+    output.shouldNotContain("[Parallel Redirty");
+    output.shouldNotContain("[Redirtied Cards");
     output.shouldNotContain("[Code Root Purge");
     output.shouldNotContain("[String Dedup Fixup");
     output.shouldNotContain("[Young Free CSet");
     output.shouldNotContain("[Non-Young Free CSet");
+    output.shouldNotContain("[Humongous Reclaim");
     output.shouldHaveExitValue(0);
 
     pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
@@ -63,10 +66,16 @@
     output = new OutputAnalyzer(pb.start());
 
     output.shouldContain("[Redirty Cards");
+    output.shouldNotContain("[Parallel Redirty");
+    output.shouldNotContain("[Redirtied Cards");
     output.shouldContain("[Code Root Purge");
     output.shouldContain("[String Dedup Fixup");
     output.shouldNotContain("[Young Free CSet");
     output.shouldNotContain("[Non-Young Free CSet");
+    output.shouldContain("[Humongous Reclaim");
+    output.shouldNotContain("[Humongous Total");
+    output.shouldNotContain("[Humongous Candidate");
+    output.shouldNotContain("[Humongous Reclaimed");
     output.shouldHaveExitValue(0);
 
     pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
@@ -80,16 +89,16 @@
     output = new OutputAnalyzer(pb.start());
 
     output.shouldContain("[Redirty Cards");
+    output.shouldContain("[Parallel Redirty");
+    output.shouldContain("[Redirtied Cards");
     output.shouldContain("[Code Root Purge");
     output.shouldContain("[String Dedup Fixup");
     output.shouldContain("[Young Free CSet");
     output.shouldContain("[Non-Young Free CSet");
-
-    // also check evacuation failure messages once
-    output.shouldNotContain("[Evacuation Failure");
-    output.shouldNotContain("[Recalculate Used");
-    output.shouldNotContain("[Remove Self Forwards");
-    output.shouldNotContain("[Restore RemSet");
+    output.shouldContain("[Humongous Reclaim");
+    output.shouldContain("[Humongous Total");
+    output.shouldContain("[Humongous Candidate");
+    output.shouldContain("[Humongous Reclaimed");
     output.shouldHaveExitValue(0);
   }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestHumongousShrinkHeap.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestHumongousShrinkHeap
+ * @bug 8036025 8056043
+ * @summary Verify that heap shrinks after GC in the presence of fragmentation
+ * due to humongous objects
+ * @library /testlibrary
+ * @run main/othervm -XX:-ExplicitGCInvokesConcurrent -XX:MinHeapFreeRatio=10
+ * -XX:MaxHeapFreeRatio=12 -XX:+UseG1GC -XX:G1HeapRegionSize=1M -verbose:gc
+ * TestHumongousShrinkHeap
+ */
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryUsage;
+import java.util.ArrayList;
+import java.util.List;
+import sun.management.ManagementFactoryHelper;
+import static com.oracle.java.testlibrary.Asserts.*;
+
+public class TestHumongousShrinkHeap {
+
+    public static final String MIN_FREE_RATIO_FLAG_NAME = "MinHeapFreeRatio";
+    public static final String MAX_FREE_RATIO_FLAG_NAME = "MaxHeapFreeRatio";
+
+    private static final List<List<byte[]>> garbage = new ArrayList();
+    private static final int REGION_SIZE = 1024 * 1024; // 1M
+    private static final int LISTS_COUNT = 10;
+    private static final int HUMON_SIZE = Math.round(.9f * REGION_SIZE);
+    private static final long AVAILABLE_MEMORY
+                              = Runtime.getRuntime().freeMemory();
+    private static final int HUMON_COUNT
+                             = (int) ((AVAILABLE_MEMORY / HUMON_SIZE)
+            / LISTS_COUNT);
+
+
+    public static void main(String[] args) {
+        System.out.format("Running with %s max heap size. "
+                + "Will allocate humongous object of %s size %d times.%n",
+                MemoryUsagePrinter.humanReadableByteCount(AVAILABLE_MEMORY, false),
+                MemoryUsagePrinter.humanReadableByteCount(HUMON_SIZE, false),
+                HUMON_COUNT
+        );
+        new TestHumongousShrinkHeap().test();
+    }
+
+    private final void test() {
+        System.gc();
+        MemoryUsagePrinter.printMemoryUsage("init");
+
+        allocate();
+        MemoryUsagePrinter.printMemoryUsage("allocated");
+        MemoryUsage muFull = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
+
+        free();
+        MemoryUsagePrinter.printMemoryUsage("free");
+        MemoryUsage muFree = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
+
+        assertLessThan(muFree.getCommitted(), muFull.getCommitted(), String.format(
+                "committed free heap size is not less than committed full heap size, heap hasn't been shrunk?%n"
+                + "%s = %s%n%s = %s",
+                MIN_FREE_RATIO_FLAG_NAME,
+                ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MIN_FREE_RATIO_FLAG_NAME).getValue(),
+                MAX_FREE_RATIO_FLAG_NAME,
+                ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MAX_FREE_RATIO_FLAG_NAME).getValue()
+        ));
+    }
+
+    private void allocate() {
+
+        for (int i = 0; i < LISTS_COUNT; i++) {
+            List<byte[]> stuff = new ArrayList();
+            allocateList(stuff, HUMON_COUNT, HUMON_SIZE);
+            MemoryUsagePrinter.printMemoryUsage("allocate #" + (i+1));
+            garbage.add(stuff);
+        }
+    }
+
+    private void free() {
+        // do not free last one list
+        garbage.subList(0, garbage.size() - 1).clear();
+
+        // do not free last one element from last list
+        List stuff = garbage.get(garbage.size() - 1);
+        stuff.subList(0, stuff.size() - 1).clear();
+        System.gc();
+    }
+
+    private static void allocateList(List garbage, int count, int size) {
+        for (int i = 0; i < count; i++) {
+            garbage.add(new byte[size]);
+        }
+    }
+}
+
+/**
+ * Prints memory usage to standard output
+ */
+class MemoryUsagePrinter {
+
+    public static String humanReadableByteCount(long bytes, boolean si) {
+        int unit = si ? 1000 : 1024;
+        if (bytes < unit) {
+            return bytes + " B";
+        }
+        int exp = (int) (Math.log(bytes) / Math.log(unit));
+        String pre = (si ? "kMGTPE" : "KMGTPE").charAt(exp - 1) + (si ? "" : "i");
+        return String.format("%.1f %sB", bytes / Math.pow(unit, exp), pre);
+    }
+
+    public static void printMemoryUsage(String label) {
+        MemoryUsage memusage = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
+        float freeratio = 1f - (float) memusage.getUsed() / memusage.getCommitted();
+        System.out.format("[%-24s] init: %-7s, used: %-7s, comm: %-7s, freeRatio ~= %.1f%%%n",
+                label,
+                humanReadableByteCount(memusage.getInit(), false),
+                humanReadableByteCount(memusage.getUsed(), false),
+                humanReadableByteCount(memusage.getCommitted(), false),
+                freeratio * 100
+        );
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestShrinkAuxiliaryData.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import static com.oracle.java.testlibrary.Asserts.assertLessThanOrEqual;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.Platform;
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.Utils;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryUsage;
+import java.text.DecimalFormat;
+import java.text.DecimalFormatSymbols;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import sun.misc.Unsafe;
+
+public class TestShrinkAuxiliaryData {
+
+    private final static String[] initialOpts = new String[]{
+        "-XX:MinHeapFreeRatio=10",
+        "-XX:MaxHeapFreeRatio=11",
+        "-XX:+UseG1GC",
+        "-XX:G1HeapRegionSize=1m",
+        "-XX:-ExplicitGCInvokesConcurrent",
+        "-XX:+PrintGCDetails"
+    };
+
+    private final int RSetCacheSize;
+
+    protected TestShrinkAuxiliaryData(int RSetCacheSize) {
+        this.RSetCacheSize = RSetCacheSize;
+    }
+
+    protected void test() throws Exception {
+        ArrayList<String> vmOpts = new ArrayList();
+        Collections.addAll(vmOpts, initialOpts);
+
+        int maxCacheSize = Math.max(0, Math.min(31, getMaxCacheSize()));
+        if (maxCacheSize < RSetCacheSize) {
+            System.out.format("Skiping test for %d cache size due max cache size %d",
+                    RSetCacheSize, maxCacheSize
+            );
+            return;
+        }
+
+        printTestInfo(maxCacheSize);
+
+        vmOpts.add("-XX:G1ConcRSLogCacheSize=" + RSetCacheSize);
+
+        vmOpts.addAll(Arrays.asList(Utils.getFilteredTestJavaOpts(
+                ShrinkAuxiliaryDataTest.prohibitedVmOptions)));
+
+        // for 32 bits ObjectAlignmentInBytes is not a option
+        if (Platform.is32bit()) {
+            ArrayList<String> vmOptsWithoutAlign = new ArrayList(vmOpts);
+            vmOptsWithoutAlign.add(ShrinkAuxiliaryDataTest.class.getName());
+            performTest(vmOptsWithoutAlign);
+            return;
+        }
+
+        for (int alignment = 3; alignment <= 8; alignment++) {
+            ArrayList<String> vmOptsWithAlign = new ArrayList(vmOpts);
+            vmOptsWithAlign.add("-XX:ObjectAlignmentInBytes="
+                    + (int) Math.pow(2, alignment));
+            vmOptsWithAlign.add(ShrinkAuxiliaryDataTest.class.getName());
+
+            performTest(vmOptsWithAlign);
+        }
+    }
+
+    private void performTest(List<String> opts) throws Exception {
+        ProcessBuilder pb
+                       = ProcessTools.createJavaProcessBuilder(
+                opts.toArray(new String[opts.size()])
+        );
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+    }
+
+    private void printTestInfo(int maxCacheSize) {
+
+        DecimalFormat grouped = new DecimalFormat("000,000");
+        DecimalFormatSymbols formatSymbols = grouped.getDecimalFormatSymbols();
+        formatSymbols.setGroupingSeparator(' ');
+        grouped.setDecimalFormatSymbols(formatSymbols);
+
+        System.out.format("Test will use %s bytes of memory of %s available%n"
+                + "Available memory is %s with %d bytes pointer size - can save %s pointers%n"
+                + "Max cache size: 2^%d = %s elements%n",
+                grouped.format(ShrinkAuxiliaryDataTest.getMemoryUsedByTest()),
+                grouped.format(Runtime.getRuntime().freeMemory()),
+                grouped.format(Runtime.getRuntime().freeMemory()
+                        - ShrinkAuxiliaryDataTest.getMemoryUsedByTest()),
+                Unsafe.ADDRESS_SIZE,
+                grouped.format((Runtime.getRuntime().freeMemory()
+                        - ShrinkAuxiliaryDataTest.getMemoryUsedByTest())
+                        / Unsafe.ADDRESS_SIZE),
+                maxCacheSize,
+                grouped.format((int) Math.pow(2, maxCacheSize))
+        );
+    }
+
+    /**
+     * Detects maximum possible size of G1ConcRSLogCacheSize available for
+     * current process based on maximum available process memory size
+     *
+     * @return power of two
+     */
+    private static int getMaxCacheSize() {
+        long availableMemory = Runtime.getRuntime().freeMemory()
+                - ShrinkAuxiliaryDataTest.getMemoryUsedByTest() - 1l;
+        if (availableMemory <= 0) {
+            return 0;
+        }
+        long availablePointersCount = availableMemory / Unsafe.ADDRESS_SIZE;
+        return (63 - (int) Long.numberOfLeadingZeros(availablePointersCount));
+    }
+
+    static class ShrinkAuxiliaryDataTest {
+
+        public static void main(String[] args) throws IOException {
+            int iterateCount = DEFAULT_ITERATION_COUNT;
+
+            if (args.length > 0) {
+                try {
+                    iterateCount = Integer.parseInt(args[0]);
+                } catch (NumberFormatException e) {
+                    //num_iterate remains default
+                }
+            }
+
+            new ShrinkAuxiliaryDataTest().test(iterateCount);
+        }
+
+        class GarbageObject {
+
+            private final List<byte[]> payload = new ArrayList();
+            private final List<GarbageObject> ref = new LinkedList();
+
+            public GarbageObject(int size) {
+                payload.add(new byte[size]);
+            }
+
+            public void addRef(GarbageObject g) {
+                ref.add(g);
+            }
+
+            public void mutate() {
+                if (!payload.isEmpty() && payload.get(0).length > 0) {
+                    payload.get(0)[0] = (byte) (Math.random() * Byte.MAX_VALUE);
+                }
+            }
+        }
+
+        private final List<GarbageObject> garbage = new ArrayList();
+
+        public void test(int num_iterate) throws IOException {
+
+            allocate();
+            link();
+            mutate();
+            deallocate();
+
+            MemoryUsage muBeforeHeap
+                        = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
+            MemoryUsage muBeforeNonHeap
+                        = ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage();
+
+            for (int i = 0; i < num_iterate; i++) {
+                allocate();
+                link();
+                mutate();
+                deallocate();
+            }
+
+            System.gc();
+            MemoryUsage muAfterHeap
+                        = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
+            MemoryUsage muAfterNonHeap
+                        = ManagementFactory.getMemoryMXBean().getNonHeapMemoryUsage();
+
+            assertLessThanOrEqual(muAfterHeap.getCommitted(), muBeforeHeap.getCommitted(),
+                    String.format("heap decommit failed - after > before: %d > %d",
+                            muAfterHeap.getCommitted(), muBeforeHeap.getCommitted()
+                    )
+            );
+
+            if (muAfterHeap.getCommitted() < muBeforeHeap.getCommitted()) {
+                assertLessThanOrEqual(muAfterNonHeap.getCommitted(), muBeforeNonHeap.getCommitted(),
+                        String.format("non-heap decommit failed - after > before: %d > %d",
+                                muAfterNonHeap.getCommitted(), muBeforeNonHeap.getCommitted()
+                        )
+                );
+            }
+        }
+
+        private void allocate() {
+            for (int r = 0; r < REGIONS_TO_ALLOCATE; r++) {
+                for (int i = 0; i < NUM_OBJECTS_PER_REGION; i++) {
+                    GarbageObject g = new GarbageObject(REGION_SIZE
+                            / NUM_OBJECTS_PER_REGION);
+                    garbage.add(g);
+                }
+            }
+        }
+
+        /**
+         * Iterate through all allocated objects, and link to objects in another
+         * regions
+         */
+        private void link() {
+            for (int ig = 0; ig < garbage.size(); ig++) {
+                int regionNumber = ig / NUM_OBJECTS_PER_REGION;
+
+                for (int i = 0; i < NUM_LINKS; i++) {
+                    int regionToLink;
+                    do {
+                        regionToLink = (int) (Math.random()
+                                * REGIONS_TO_ALLOCATE);
+                    } while (regionToLink == regionNumber);
+
+                    // get random garbage object from random region
+                    garbage.get(ig).addRef(garbage.get(regionToLink
+                            * NUM_OBJECTS_PER_REGION + (int) (Math.random()
+                            * NUM_OBJECTS_PER_REGION)));
+                }
+            }
+        }
+
+        private void mutate() {
+            for (int ig = 0; ig < garbage.size(); ig++) {
+                garbage.get(ig).mutate();
+            }
+        }
+
+        private void deallocate() {
+            garbage.clear();
+            System.gc();
+        }
+
+        static long getMemoryUsedByTest() {
+            return REGIONS_TO_ALLOCATE * REGION_SIZE;
+        }
+
+        private static final int REGION_SIZE = 1024 * 1024;
+        private static final int DEFAULT_ITERATION_COUNT = 1;   // iterate main scenario
+        private static final int REGIONS_TO_ALLOCATE = 5;
+        private static final int NUM_OBJECTS_PER_REGION = 10;
+        private static final int NUM_LINKS = 20; // how many links create for each object
+
+        private static final String[] prohibitedVmOptions = {
+            // remove this when @requires option will be on duty
+            "-XX:\\+UseParallelGC",
+            "-XX:\\+UseSerialGC",
+            "-XX:\\+UseConcMarkSweepGC",
+            "-XX:\\+UseParallelOldGC",
+            "-XX:\\+UseParNewGC",
+            "-Xconcgc",
+            "-Xincgc"
+        };
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestShrinkAuxiliaryData00.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestShrinkAuxiliaryData00
+ * @bug 8038423
+ * @summary Checks that decommitment occurs for JVM with different
+ * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData00
+ * @run driver/timeout=720 TestShrinkAuxiliaryData00
+ */
+public class TestShrinkAuxiliaryData00 {
+
+    public static void main(String[] args) throws Exception {
+        new TestShrinkAuxiliaryData(0).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestShrinkAuxiliaryData05.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestShrinkAuxiliaryData05
+ * @bug 8038423
+ * @summary Checks that decommitment occurs for JVM with different
+ * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData05
+ * @run driver/timeout=720 TestShrinkAuxiliaryData05
+ */
+public class TestShrinkAuxiliaryData05 {
+
+    public static void main(String[] args) throws Exception {
+        new TestShrinkAuxiliaryData(5).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestShrinkAuxiliaryData10.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestShrinkAuxiliaryData10
+ * @bug 8038423
+ * @summary Checks that decommitment occurs for JVM with different
+ * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData10
+ * @run driver/timeout=720 TestShrinkAuxiliaryData10
+ */
+public class TestShrinkAuxiliaryData10 {
+
+    public static void main(String[] args) throws Exception {
+        new TestShrinkAuxiliaryData(10).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestShrinkAuxiliaryData15.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestShrinkAuxiliaryData15
+ * @bug 8038423
+ * @summary Checks that decommitment occurs for JVM with different
+ * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData15
+ * @run driver/timeout=720 TestShrinkAuxiliaryData15
+ */
+public class TestShrinkAuxiliaryData15 {
+
+    public static void main(String[] args) throws Exception {
+        new TestShrinkAuxiliaryData(15).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestShrinkAuxiliaryData20.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestShrinkAuxiliaryData20
+ * @bug 8038423
+ * @summary Checks that decommitment occurs for JVM with different
+ * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData20
+ * @run driver/timeout=720 TestShrinkAuxiliaryData20
+ */
+public class TestShrinkAuxiliaryData20 {
+
+    public static void main(String[] args) throws Exception {
+        new TestShrinkAuxiliaryData(20).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestShrinkAuxiliaryData25.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestShrinkAuxiliaryData25
+ * @bug 8038423
+ * @summary Checks that decommitment occurs for JVM with different
+ * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData25
+ * @run driver/timeout=720 TestShrinkAuxiliaryData25
+ */
+public class TestShrinkAuxiliaryData25 {
+
+    public static void main(String[] args) throws Exception {
+        new TestShrinkAuxiliaryData(25).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestShrinkAuxiliaryData30.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestShrinkAuxiliaryData30
+ * @bug 8038423
+ * @summary Checks that decommitment occurs for JVM with different
+ * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData30
+ * @run driver/timeout=720 TestShrinkAuxiliaryData30
+ */
+public class TestShrinkAuxiliaryData30 {
+
+    public static void main(String[] args) throws Exception {
+        new TestShrinkAuxiliaryData(30).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestShrinkDefragmentedHeap.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestShrinkDefragmentedHeap
+ * @bug 8038423
+ * @summary Verify that heap shrinks after GC in the presence of fragmentation due to humongous objects
+ *     1. allocate small objects mixed with humongous ones
+ *        "ssssHssssHssssHssssHssssHssssHssssH"
+ *     2. release all allocated object except the last humongous one
+ *        "..................................H"
+ *     3. invoke gc and check that memory returned to the system (amount of committed memory got down)
+ *
+ * @library /testlibrary
+ */
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryUsage;
+import java.util.ArrayList;
+import java.util.List;
+import sun.management.ManagementFactoryHelper;
+import static com.oracle.java.testlibrary.Asserts.*;
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class TestShrinkDefragmentedHeap {
+    // Since we store all the small objects, they become old and old regions are also allocated at the bottom of the heap
+    // together with humongous regions. So if there are a lot of old regions in the lower part of the heap,
+    // the humongous regions will be allocated in the upper part of the heap anyway.
+    // To avoid this the Eden needs to be big enough to fit all the small objects.
+    private static final int INITIAL_HEAP_SIZE  = 200 * 1024 * 1024;
+    private static final int MINIMAL_YOUNG_SIZE = 190 * 1024 * 1024;
+    private static final int REGION_SIZE        = 1 * 1024 * 1024;
+
+    public static void main(String[] args) throws Exception, Throwable {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                "-XX:InitialHeapSize=" + INITIAL_HEAP_SIZE,
+                "-Xmn" + MINIMAL_YOUNG_SIZE,
+                "-XX:MinHeapFreeRatio=10",
+                "-XX:MaxHeapFreeRatio=11",
+                "-XX:+UseG1GC",
+                "-XX:G1HeapRegionSize=" + REGION_SIZE,
+                "-XX:-ExplicitGCInvokesConcurrent",
+                "-verbose:gc",
+                GCTest.class.getName()
+        );
+
+        OutputAnalyzer output = ProcessTools.executeProcess(pb);
+        output.shouldHaveExitValue(0);
+    }
+
+    static class GCTest {
+
+        private static final String MIN_FREE_RATIO_FLAG_NAME = "MinHeapFreeRatio";
+        private static final String MAX_FREE_RATIO_FLAG_NAME = "MaxHeapFreeRatio";
+        private static final String NEW_SIZE_FLAG_NAME = "NewSize";
+
+        private static final ArrayList<ArrayList<byte[]>> garbage = new ArrayList<>();
+
+        private static final int SMALL_OBJS_SIZE  = 10 * 1024; // 10kB
+        private static final int SMALL_OBJS_COUNT = MINIMAL_YOUNG_SIZE / (SMALL_OBJS_SIZE-1);
+        private static final int ALLOCATE_COUNT = 3;
+        // try to put all humongous object into gap between min young size and initial heap size
+        // to avoid implicit GCs
+        private static final int HUMONG_OBJS_SIZE = (int) Math.max(
+                (INITIAL_HEAP_SIZE - MINIMAL_YOUNG_SIZE) / ALLOCATE_COUNT / 4,
+                REGION_SIZE * 1.1
+        );
+
+        private static final long initialHeapSize = getHeapMemoryUsage().getUsed();
+
+        public static void main(String[] args) throws InterruptedException {
+            new GCTest().test();
+        }
+
+        private void test() throws InterruptedException {
+            MemoryUsagePrinter.printMemoryUsage("init");
+
+            allocate();
+            System.gc();
+            MemoryUsage muFull = getHeapMemoryUsage();
+            MemoryUsagePrinter.printMemoryUsage("allocated");
+
+            free();
+            //Thread.sleep(1000); // sleep before measures due lags in JMX
+            MemoryUsage muFree = getHeapMemoryUsage();
+            MemoryUsagePrinter.printMemoryUsage("free");
+
+            assertLessThan(muFree.getCommitted(), muFull.getCommitted(), prepareMessageCommittedIsNotLess() );
+        }
+
+        private void allocate() {
+            System.out.format("Will allocate objects of small size = %s and humongous size = %s",
+                    MemoryUsagePrinter.humanReadableByteCount(SMALL_OBJS_SIZE, false),
+                    MemoryUsagePrinter.humanReadableByteCount(HUMONG_OBJS_SIZE, false)
+            );
+
+            for (int i = 0; i < ALLOCATE_COUNT; i++) {
+                ArrayList<byte[]> stuff = new ArrayList<>();
+                allocateList(stuff, SMALL_OBJS_COUNT / ALLOCATE_COUNT, SMALL_OBJS_SIZE);
+                garbage.add(stuff);
+
+                ArrayList<byte[]> humongousStuff = new ArrayList<>();
+                allocateList(humongousStuff, 4, HUMONG_OBJS_SIZE);
+                garbage.add(humongousStuff);
+            }
+        }
+
+        private void free() {
+            // do not free last one list
+            garbage.subList(0, garbage.size() - 1).clear();
+
+            // do not free last one element from last list
+            ArrayList stuff = garbage.get(garbage.size() - 1);
+            if (stuff.size() > 1) {
+                stuff.subList(0, stuff.size() - 1).clear();
+            }
+            System.gc();
+        }
+
+        private String prepareMessageCommittedIsNotLess() {
+            return String.format(
+                    "committed free heap size is not less than committed full heap size, heap hasn't been shrunk?%n"
+                    + "%s = %s%n%s = %s",
+                    MIN_FREE_RATIO_FLAG_NAME,
+                    ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MIN_FREE_RATIO_FLAG_NAME).getValue(),
+                    MAX_FREE_RATIO_FLAG_NAME,
+                    ManagementFactoryHelper.getDiagnosticMXBean().getVMOption(MAX_FREE_RATIO_FLAG_NAME).getValue()
+            );
+        }
+
+        private static void allocateList(List garbage, int count, int size) {
+            for (int i = 0; i < count; i++) {
+                garbage.add(new byte[size]);
+            }
+        }
+    }
+
+    static MemoryUsage getHeapMemoryUsage() {
+        return ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
+    }
+
+    /**
+     * Prints memory usage to standard output
+     */
+    static class MemoryUsagePrinter {
+
+        public static String humanReadableByteCount(long bytes, boolean si) {
+            int unit = si ? 1000 : 1024;
+            if (bytes < unit) {
+                return bytes + " B";
+            }
+            int exp = (int) (Math.log(bytes) / Math.log(unit));
+            String pre = (si ? "kMGTPE" : "KMGTPE").charAt(exp - 1) + (si ? "" : "i");
+            return String.format("%.1f %sB", bytes / Math.pow(unit, exp), pre);
+        }
+
+        public static void printMemoryUsage(String label) {
+            MemoryUsage memusage = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
+            float freeratio = 1f - (float) memusage.getUsed() / memusage.getCommitted();
+            System.out.format("[%-24s] init: %-7s, used: %-7s, comm: %-7s, freeRatio ~= %.1f%%%n",
+                    label,
+                    humanReadableByteCount(memusage.getInit(), false),
+                    humanReadableByteCount(memusage.getUsed(), false),
+                    humanReadableByteCount(memusage.getCommitted(), false),
+                    freeratio * 100
+            );
+        }
+    }
+}
--- a/test/gc/g1/TestSummarizeRSetStatsThreads.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/gc/g1/TestSummarizeRSetStatsThreads.java	Fri Apr 10 16:58:26 2015 -0700
@@ -53,8 +53,8 @@
 
     // a zero in refinement thread numbers indicates that the value in ParallelGCThreads should be used.
     // Additionally use at least one thread.
-    int expectedNumRefinementThreads = refinementThreads == 0 ? workerThreads : refinementThreads;
-    expectedNumRefinementThreads = Math.max(1, expectedNumRefinementThreads);
+    int expectedNumRefinementThreads = refinementThreads;
+
     // create the pattern made up of n copies of a floating point number pattern
     String numberPattern = String.format("%0" + expectedNumRefinementThreads + "d", 0)
       .replace("0", "\\s+\\d+\\.\\d+");
@@ -73,9 +73,9 @@
       return;
     }
     // different valid combinations of number of refinement and gc worker threads
-    runTest(0, 0);
-    runTest(0, 5);
-    runTest(5, 0);
+    runTest(1, 1);
+    runTest(1, 5);
+    runTest(5, 1);
     runTest(10, 10);
     runTest(1, 2);
     runTest(4, 3);
--- a/test/gc/g1/TestSummarizeRSetStatsTools.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/gc/g1/TestSummarizeRSetStatsTools.java	Fri Apr 10 16:58:26 2015 -0700
@@ -88,7 +88,6 @@
         ArrayList<String> finalargs = new ArrayList<String>();
         String[] defaultArgs = new String[] {
             "-XX:+UseG1GC",
-            "-XX:+UseCompressedOops",
             "-Xmn4m",
             "-Xmx20m",
             "-XX:InitiatingHeapOccupancyPercent=100", // we don't want the additional GCs due to initial marking
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/logging/TestGCId.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestGCId
+ * @bug 8043607
+ * @summary Ensure that the GCId is logged
+ * @key gc
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class TestGCId {
+  public static void main(String[] args) throws Exception {
+    testGCId("UseParallelGC", "PrintGC");
+    testGCId("UseParallelGC", "PrintGCDetails");
+
+    testGCId("UseG1GC", "PrintGC");
+    testGCId("UseG1GC", "PrintGCDetails");
+
+    testGCId("UseConcMarkSweepGC", "PrintGC");
+    testGCId("UseConcMarkSweepGC", "PrintGCDetails");
+
+    testGCId("UseSerialGC", "PrintGC");
+    testGCId("UseSerialGC", "PrintGCDetails");
+  }
+
+  private static void verifyContainsGCIDs(OutputAnalyzer output) {
+    output.shouldMatch("^#0: \\[");
+    output.shouldMatch("^#1: \\[");
+    output.shouldHaveExitValue(0);
+  }
+
+  private static void verifyContainsNoGCIDs(OutputAnalyzer output) {
+    output.shouldNotMatch("^#[0-9]+: \\[");
+    output.shouldHaveExitValue(0);
+  }
+
+  private static void testGCId(String gcFlag, String logFlag) throws Exception {
+    // GCID logging enabled
+    ProcessBuilder pb_enabled =
+      ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", "-XX:+PrintGCID", GCTest.class.getName());
+    verifyContainsGCIDs(new OutputAnalyzer(pb_enabled.start()));
+
+    // GCID logging disabled
+    ProcessBuilder pb_disabled =
+      ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", "-XX:-PrintGCID", GCTest.class.getName());
+    verifyContainsNoGCIDs(new OutputAnalyzer(pb_disabled.start()));
+
+    // GCID logging default
+    ProcessBuilder pb_default =
+      ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", GCTest.class.getName());
+    verifyContainsNoGCIDs(new OutputAnalyzer(pb_default.start()));
+  }
+
+  static class GCTest {
+    private static byte[] garbage;
+    public static void main(String [] args) {
+      System.out.println("Creating garbage");
+      // create 128MB of garbage. This should result in at least one GC
+      for (int i = 0; i < 1024; i++) {
+        garbage = new byte[128 * 1024];
+      }
+      // do a system gc to get one more gc
+      System.gc();
+      System.out.println("Done");
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/metaspace/TestCapacityUntilGCWrapAround.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key gc
+ * @bug 8049831
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestCapacityUntilGCWrapAround
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI TestCapacityUntilGCWrapAround
+ */
+
+import sun.hotspot.WhiteBox;
+
+import com.oracle.java.testlibrary.Asserts;
+import com.oracle.java.testlibrary.Platform;
+
+public class TestCapacityUntilGCWrapAround {
+    private static long MB = 1024 * 1024;
+    private static long GB = 1024 * MB;
+    private static long MAX_UINT = 4 * GB - 1; // On 32-bit platforms
+
+    public static void main(String[] args) {
+        if (Platform.is32bit()) {
+            WhiteBox wb = WhiteBox.getWhiteBox();
+
+            long before = wb.metaspaceCapacityUntilGC();
+            // Now force possible overflow of capacity_until_GC.
+            long after = wb.incMetaspaceCapacityUntilGC(MAX_UINT);
+
+            Asserts.assertGTE(after, before,
+                              "Increasing with MAX_UINT should not cause wrap around: " + after + " < " + before);
+            Asserts.assertLTE(after, MAX_UINT,
+                              "Increasing with MAX_UINT should not cause value larger than MAX_UINT:" + after);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/AlignmentHelper.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.lang.management.MemoryPoolMXBean;
+import java.util.Optional;
+
+import sun.hotspot.WhiteBox;
+
+/**
+ * Helper class aimed to provide information about alignment of objects in
+ * particular heap space, expected memory usage after objects' allocation so on.
+ */
+public class AlignmentHelper {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+
+    private static final long OBJECT_ALIGNMENT_IN_BYTES_FOR_32_VM = 8L;
+
+    /**
+     * Max relative allowed actual memory usage deviation from expected memory
+     * usage.
+     */
+    private static final float MAX_RELATIVE_DEVIATION = 0.05f; // 5%
+
+    public static final long OBJECT_ALIGNMENT_IN_BYTES = Optional.ofNullable(
+            AlignmentHelper.WHITE_BOX.getIntxVMFlag("ObjectAlignmentInBytes"))
+            .orElse(AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES_FOR_32_VM);
+
+    public static final long SURVIVOR_ALIGNMENT_IN_BYTES = Optional.ofNullable(
+            AlignmentHelper.WHITE_BOX.getIntxVMFlag("SurvivorAlignmentInBytes"))
+            .orElseThrow(() ->new AssertionError(
+                    "Unable to get SurvivorAlignmentInBytes value"));
+    /**
+     * Min amount of memory that will be occupied by an object.
+     */
+    public static final long MIN_OBJECT_SIZE
+            = AlignmentHelper.WHITE_BOX.getObjectSize(new Object());
+    /**
+     * Min amount of memory that will be occupied by an empty byte array.
+     */
+    public static final long MIN_ARRAY_SIZE
+            = AlignmentHelper.WHITE_BOX.getObjectSize(new byte[0]);
+
+    /**
+     * Precision at which actual memory usage in a heap space represented by
+     * this sizing helper could be measured.
+     */
+    private final long memoryUsageMeasurementPrecision;
+    /**
+     * Min amount of memory that will be occupied by an object allocated in a
+     * heap space represented by this sizing helper.
+     */
+    private final long minObjectSizeInThisSpace;
+    /**
+     * Object's alignment in a heap space represented by this sizing helper.
+     */
+    private final long objectAlignmentInThisRegion;
+    /**
+     * MemoryPoolMXBean associated with a heap space represented by this sizing
+     * helper.
+     */
+    private final MemoryPoolMXBean poolMXBean;
+
+    private static long alignUp(long value, long alignment) {
+        return ((value - 1) / alignment + 1) * alignment;
+    }
+
+    protected AlignmentHelper(long memoryUsageMeasurementPrecision,
+            long objectAlignmentInThisRegion, long minObjectSizeInThisSpace,
+            MemoryPoolMXBean poolMXBean) {
+        this.memoryUsageMeasurementPrecision = memoryUsageMeasurementPrecision;
+        this.minObjectSizeInThisSpace = minObjectSizeInThisSpace;
+        this.objectAlignmentInThisRegion = objectAlignmentInThisRegion;
+        this.poolMXBean = poolMXBean;
+    }
+
+    /**
+     * Returns how many objects have to be allocated to fill
+     * {@code memoryToFill} bytes in this heap space using objects of size
+     * {@code objectSize}.
+     */
+    public int getObjectsCount(long memoryToFill, long objectSize) {
+        return (int) (memoryToFill / getObjectSizeInThisSpace(objectSize));
+    }
+
+    /**
+     * Returns amount of memory that {@code objectsCount} of objects with size
+     * {@code objectSize} will occupy this this space after allocation.
+     */
+    public long getExpectedMemoryUsage(long objectSize, int objectsCount) {
+        long correctedObjectSize = getObjectSizeInThisSpace(objectSize);
+        return AlignmentHelper.alignUp(correctedObjectSize * objectsCount,
+                memoryUsageMeasurementPrecision);
+    }
+
+    /**
+     * Returns current memory usage in this heap space.
+     */
+    public long getActualMemoryUsage() {
+        return poolMXBean.getUsage().getUsed();
+    }
+
+    /**
+     * Returns maximum memory usage deviation from {@code expectedMemoryUsage}
+     * given the max allowed relative deviation equal to
+     * {@code relativeDeviation}.
+     *
+     * Note that value returned by this method is aligned according to
+     * memory measurement precision for this heap space.
+     */
+    public long getAllowedMemoryUsageDeviation(long expectedMemoryUsage) {
+        long unalignedDeviation = (long) (expectedMemoryUsage *
+                AlignmentHelper.MAX_RELATIVE_DEVIATION);
+        return AlignmentHelper.alignUp(unalignedDeviation,
+                memoryUsageMeasurementPrecision);
+    }
+
+    /**
+     * Returns amount of memory that will be occupied by an object with size
+     * {@code objectSize} in this heap space.
+     */
+    public long getObjectSizeInThisSpace(long objectSize) {
+        objectSize = Math.max(objectSize, minObjectSizeInThisSpace);
+
+        long alignedObjectSize = AlignmentHelper.alignUp(objectSize,
+                objectAlignmentInThisRegion);
+        long sizeDiff = alignedObjectSize - objectSize;
+
+        // If there is not enough space to fit padding object, then object will
+        // be aligned to {@code 2 * objectAlignmentInThisRegion}.
+        if (sizeDiff >= AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES
+                && sizeDiff < AlignmentHelper.MIN_OBJECT_SIZE) {
+            alignedObjectSize += AlignmentHelper.MIN_OBJECT_SIZE;
+            alignedObjectSize = AlignmentHelper.alignUp(alignedObjectSize,
+                    objectAlignmentInThisRegion);
+        }
+
+        return alignedObjectSize;
+    }
+    @Override
+    public String toString() {
+        StringBuilder builder = new StringBuilder();
+
+        builder.append(String.format("AlignmentHelper for memory pool '%s':%n",
+                poolMXBean.getName()));
+        builder.append(String.format("Memory usage measurement precision: %d%n",
+                memoryUsageMeasurementPrecision));
+        builder.append(String.format("Min object size in this space: %d%n",
+                minObjectSizeInThisSpace));
+        builder.append(String.format("Object alignment in this space: %d%n",
+                objectAlignmentInThisRegion));
+
+        return builder.toString();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/SurvivorAlignmentTestMain.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryPoolMXBean;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.oracle.java.testlibrary.Asserts;
+import com.sun.management.ThreadMXBean;
+import sun.hotspot.WhiteBox;
+import sun.misc.Unsafe;
+
+/**
+ * Main class for tests on {@code SurvivorAlignmentInBytes} option.
+ *
+ * Typical usage is to obtain instance using fromArgs method, allocate objects
+ * and verify that actual memory usage in tested heap space is close to
+ * expected.
+ */
+public class SurvivorAlignmentTestMain {
+    enum HeapSpace {
+        EDEN,
+        SURVIVOR,
+        TENURED
+    }
+
+    public static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+
+    public static final long MAX_TENURING_THRESHOLD = Optional.ofNullable(
+            SurvivorAlignmentTestMain.WHITE_BOX.getIntxVMFlag(
+                    "MaxTenuringThreshold")).orElse(15L);
+
+    /**
+     * Regexp used to parse memory size params, like 2G, 34m or 15k.
+     */
+    private static final Pattern SIZE_REGEX
+            = Pattern.compile("(?<size>[0-9]+)(?<multiplier>[GMKgmk])?");
+
+    // Names of different heap spaces.
+    private static final String DEF_NEW_EDEN = "Eden Space";
+    private static final String DEF_NEW_SURVIVOR = "Survivor Space";
+    private static final String PAR_NEW_EDEN = "Par Eden Space";
+    private static final String PAR_NEW_SURVIVOR = "Par Survivor Space";
+    private static final String PS_EDEN = "PS Eden Space";
+    private static final String PS_SURVIVOR = "PS Survivor Space";
+    private static final String G1_EDEN = "G1 Eden Space";
+    private static final String G1_SURVIVOR = "G1 Survivor Space";
+    private static final String SERIAL_TENURED = "Tenured Gen";
+    private static final String CMS_TENURED = "CMS Old Gen";
+    private static final String PS_TENURED = "PS Old Gen";
+    private static final String G1_TENURED = "G1 Old Gen";
+
+    private static final long G1_HEAP_REGION_SIZE = Optional.ofNullable(
+            SurvivorAlignmentTestMain.WHITE_BOX.getUintxVMFlag(
+                    "G1HeapRegionSize")).orElse(-1L);
+
+    /**
+     * Min size of free chunk in CMS generation.
+     * An object allocated in CMS generation will at least occupy this amount
+     * of bytes.
+     */
+    private static final long CMS_MIN_FREE_CHUNK_SIZE
+            = 3L * Unsafe.ADDRESS_SIZE;
+
+    private static final AlignmentHelper EDEN_SPACE_HELPER;
+    private static final AlignmentHelper SURVIVOR_SPACE_HELPER;
+    private static final AlignmentHelper TENURED_SPACE_HELPER;
+    /**
+     * Amount of memory that should be filled during a test run.
+     */
+    private final long memoryToFill;
+    /**
+     * The size of an objects that will be allocated during a test run.
+     */
+    private final long objectSize;
+    /**
+     * Amount of memory that will be actually occupied by an object in eden
+     * space.
+     */
+    private final long actualObjectSize;
+    /**
+     * Storage for allocated objects.
+     */
+    private final Object[] garbage;
+    /**
+     * Heap space whose memory usage is a subject of assertions during the test
+     * run.
+     */
+    private final HeapSpace testedSpace;
+
+    private long[] baselinedThreadMemoryUsage = null;
+    private long[] threadIds = null;
+
+    /**
+     * Initialize {@code EDEN_SPACE_HELPER}, {@code SURVIVOR_SPACE_HELPER} and
+     * {@code TENURED_SPACE_HELPER} to represent heap spaces in use.
+     *
+     * Note that regardless to GC object's alignment in survivor space is
+     * expected to be equal to {@code SurvivorAlignmentInBytes} value and
+     * alignment in other spaces is expected to be equal to
+     * {@code ObjectAlignmentInBytes} value.
+     *
+     * In CMS generation we can't allocate less then {@code MinFreeChunk} value,
+     * for other CGs we expect that object of size {@code MIN_OBJECT_SIZE}
+     * could be allocated as it is (of course, its size could be aligned
+     * according to alignment value used in a particular space).
+     *
+     * For G1 GC MXBeans could report memory usage only with region size
+     * precision (if an object allocated in some G1 heap region, then all region
+     * will claimed as used), so for G1's spaces precision is equal to
+     * {@code G1HeapRegionSize} value.
+     */
+    static {
+        AlignmentHelper edenHelper = null;
+        AlignmentHelper survivorHelper = null;
+        AlignmentHelper tenuredHelper = null;
+        for (MemoryPoolMXBean pool : ManagementFactory.getMemoryPoolMXBeans()) {
+            switch (pool.getName()) {
+                case SurvivorAlignmentTestMain.DEF_NEW_EDEN:
+                case SurvivorAlignmentTestMain.PAR_NEW_EDEN:
+                case SurvivorAlignmentTestMain.PS_EDEN:
+                    Asserts.assertNull(edenHelper,
+                            "Only one bean for eden space is expected.");
+                    edenHelper = new AlignmentHelper(
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.MIN_OBJECT_SIZE, pool);
+                    break;
+                case SurvivorAlignmentTestMain.G1_EDEN:
+                    Asserts.assertNull(edenHelper,
+                            "Only one bean for eden space is expected.");
+                    edenHelper = new AlignmentHelper(
+                            SurvivorAlignmentTestMain.G1_HEAP_REGION_SIZE,
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.MIN_OBJECT_SIZE, pool);
+                    break;
+                case SurvivorAlignmentTestMain.DEF_NEW_SURVIVOR:
+                case SurvivorAlignmentTestMain.PAR_NEW_SURVIVOR:
+                case SurvivorAlignmentTestMain.PS_SURVIVOR:
+                    Asserts.assertNull(survivorHelper,
+                            "Only one bean for survivor space is expected.");
+                    survivorHelper = new AlignmentHelper(
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.SURVIVOR_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.MIN_OBJECT_SIZE, pool);
+                    break;
+                case SurvivorAlignmentTestMain.G1_SURVIVOR:
+                    Asserts.assertNull(survivorHelper,
+                            "Only one bean for survivor space is expected.");
+                    survivorHelper = new AlignmentHelper(
+                            SurvivorAlignmentTestMain.G1_HEAP_REGION_SIZE,
+                            AlignmentHelper.SURVIVOR_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.MIN_OBJECT_SIZE, pool);
+                    break;
+                case SurvivorAlignmentTestMain.SERIAL_TENURED:
+                case SurvivorAlignmentTestMain.PS_TENURED:
+                case SurvivorAlignmentTestMain.G1_TENURED:
+                    Asserts.assertNull(tenuredHelper,
+                            "Only one bean for tenured space is expected.");
+                    tenuredHelper = new AlignmentHelper(
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.MIN_OBJECT_SIZE, pool);
+                    break;
+                case SurvivorAlignmentTestMain.CMS_TENURED:
+                    Asserts.assertNull(tenuredHelper,
+                            "Only one bean for tenured space is expected.");
+                    tenuredHelper = new AlignmentHelper(
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            SurvivorAlignmentTestMain.CMS_MIN_FREE_CHUNK_SIZE,
+                            pool);
+                    break;
+            }
+        }
+        EDEN_SPACE_HELPER = Objects.requireNonNull(edenHelper,
+                "AlignmentHelper for eden space should be initialized.");
+        SURVIVOR_SPACE_HELPER = Objects.requireNonNull(survivorHelper,
+                "AlignmentHelper for survivor space should be initialized.");
+        TENURED_SPACE_HELPER = Objects.requireNonNull(tenuredHelper,
+                "AlignmentHelper for tenured space should be initialized.");
+    }
+    /**
+     * Returns an SurvivorAlignmentTestMain instance constructed using CLI
+     * options.
+     *
+     * Following options are expected:
+     * <ul>
+     *     <li>memoryToFill</li>
+     *     <li>objectSize</li>
+     * </ul>
+     *
+     * Both argument may contain multiplier suffix k, m or g.
+     */
+    public static SurvivorAlignmentTestMain fromArgs(String[] args) {
+        Asserts.assertEQ(args.length, 3, "Expected three arguments: "
+                + "memory size, object size and tested heap space name.");
+
+        long memoryToFill = parseSize(args[0]);
+        long objectSize = Math.max(parseSize(args[1]),
+                AlignmentHelper.MIN_ARRAY_SIZE);
+        HeapSpace testedSpace = HeapSpace.valueOf(args[2]);
+
+        return new SurvivorAlignmentTestMain(memoryToFill, objectSize,
+                testedSpace);
+    }
+
+    /**
+     * Returns a value parsed from a string with format
+     * &lt;integer&gt;&lt;multiplier&gt;.
+     */
+    private static long parseSize(String sizeString) {
+        Matcher matcher = SIZE_REGEX.matcher(sizeString);
+        Asserts.assertTrue(matcher.matches(),
+                "sizeString should have following format \"[0-9]+([MBK])?\"");
+        long size = Long.valueOf(matcher.group("size"));
+
+        if (matcher.group("multiplier") != null) {
+            long K = 1024L;
+            // fall through multipliers
+            switch (matcher.group("multiplier").toLowerCase()) {
+                case "g":
+                    size *= K;
+                case "m":
+                    size *= K;
+                case "k":
+                    size *= K;
+            }
+        }
+        return size;
+    }
+
+    private SurvivorAlignmentTestMain(long memoryToFill, long objectSize,
+            HeapSpace testedSpace) {
+        this.objectSize = objectSize;
+        this.memoryToFill = memoryToFill;
+        this.testedSpace = testedSpace;
+
+        AlignmentHelper helper = SurvivorAlignmentTestMain.EDEN_SPACE_HELPER;
+
+        this.actualObjectSize = helper.getObjectSizeInThisSpace(
+                this.objectSize);
+        int arrayLength = helper.getObjectsCount(memoryToFill, this.objectSize);
+        garbage = new Object[arrayLength];
+    }
+
+    /**
+     * Allocate byte arrays to fill {@code memoryToFill} memory.
+     */
+    public void allocate() {
+        int byteArrayLength = Math.max((int) (objectSize
+                - Unsafe.ARRAY_BYTE_BASE_OFFSET), 0);
+
+        for (int i = 0; i < garbage.length; i++) {
+            garbage[i] = new byte[byteArrayLength];
+        }
+    }
+
+    /**
+     * Release memory occupied after {@code allocate} call.
+     */
+    public void release() {
+        for (int i = 0; i < garbage.length; i++) {
+            garbage[i] = null;
+        }
+    }
+
+    /**
+     * Returns expected amount of memory occupied in a {@code heapSpace} by
+     * objects referenced from {@code garbage} array.
+     */
+    public long getExpectedMemoryUsage() {
+        AlignmentHelper alignmentHelper = getAlignmentHelper(testedSpace);
+        return alignmentHelper.getExpectedMemoryUsage(objectSize,
+                garbage.length);
+    }
+
+    /**
+     * Verifies that memory usage in a {@code heapSpace} deviates from
+     * {@code expectedUsage} for no more than {@code MAX_RELATIVE_DEVIATION}.
+     */
+    public void verifyMemoryUsage(long expectedUsage) {
+        AlignmentHelper alignmentHelper = getAlignmentHelper(testedSpace);
+
+        long actualMemoryUsage = alignmentHelper.getActualMemoryUsage();
+        boolean otherThreadsAllocatedMemory = areOtherThreadsAllocatedMemory();
+
+        long memoryUsageDiff = Math.abs(actualMemoryUsage - expectedUsage);
+        long maxAllowedUsageDiff
+                = alignmentHelper.getAllowedMemoryUsageDeviation(expectedUsage);
+
+        System.out.println("Verifying memory usage in space: " + testedSpace);
+        System.out.println("Allocated objects count: " + garbage.length);
+        System.out.println("Desired object size: " + objectSize);
+        System.out.println("Actual object size: " + actualObjectSize);
+        System.out.println("Expected object size in space: "
+                + alignmentHelper.getObjectSizeInThisSpace(objectSize));
+        System.out.println("Expected memory usage: " + expectedUsage);
+        System.out.println("Actual memory usage: " + actualMemoryUsage);
+        System.out.println("Memory usage diff: " + memoryUsageDiff);
+        System.out.println("Max allowed usage diff: " + maxAllowedUsageDiff);
+
+        if (memoryUsageDiff > maxAllowedUsageDiff
+                && otherThreadsAllocatedMemory) {
+            System.out.println("Memory usage diff is incorrect, but it seems "
+                    + "like someone else allocated objects");
+            return;
+        }
+
+        Asserts.assertLTE(memoryUsageDiff, maxAllowedUsageDiff,
+                "Actual memory usage should not deviate from expected for " +
+                        "more then " + maxAllowedUsageDiff);
+    }
+
+    /**
+     * Baselines amount of memory allocated by each thread.
+     */
+    public void baselineMemoryAllocation() {
+        ThreadMXBean bean = (ThreadMXBean) ManagementFactory.getThreadMXBean();
+        threadIds = bean.getAllThreadIds();
+        baselinedThreadMemoryUsage = bean.getThreadAllocatedBytes(threadIds);
+    }
+
+    /**
+     * Checks if threads other then the current thread were allocating objects
+     * after baselinedThreadMemoryUsage call.
+     *
+     * If baselinedThreadMemoryUsage was not called, then this method will return
+     * {@code false}.
+     */
+    public boolean areOtherThreadsAllocatedMemory() {
+        if (baselinedThreadMemoryUsage == null) {
+            return false;
+        }
+
+        ThreadMXBean bean = (ThreadMXBean) ManagementFactory.getThreadMXBean();
+        long currentMemoryAllocation[]
+                = bean.getThreadAllocatedBytes(threadIds);
+        boolean otherThreadsAllocatedMemory = false;
+
+        System.out.println("Verifying amount of memory allocated by threads:");
+        for (int i = 0; i < threadIds.length; i++) {
+            System.out.format("Thread %d%nbaseline allocation: %d"
+                            + "%ncurrent allocation:%d%n", threadIds[i],
+                    baselinedThreadMemoryUsage[i], currentMemoryAllocation[i]);
+            System.out.println(bean.getThreadInfo(threadIds[i]));
+
+            long bytesAllocated = Math.abs(currentMemoryAllocation[i]
+                    - baselinedThreadMemoryUsage[i]);
+            if (bytesAllocated > 0
+                    && threadIds[i] != Thread.currentThread().getId()) {
+                otherThreadsAllocatedMemory = true;
+            }
+        }
+
+        return otherThreadsAllocatedMemory;
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder builder = new StringBuilder();
+
+        builder.append(String.format("SurvivorAlignmentTestMain info:%n"));
+        builder.append(String.format("Desired object size: %d%n", objectSize));
+        builder.append(String.format("Memory to fill: %d%n", memoryToFill));
+        builder.append(String.format("Objects to be allocated: %d%n",
+                garbage.length));
+
+        builder.append(String.format("Alignment helpers to be used: %n"));
+        for (HeapSpace heapSpace: HeapSpace.values()) {
+            builder.append(String.format("For space %s:%n%s%n", heapSpace,
+                    getAlignmentHelper(heapSpace)));
+        }
+
+        return builder.toString();
+    }
+
+    /**
+     * Returns {@code AlignmentHelper} for a space {@code heapSpace}.
+     */
+    public static AlignmentHelper getAlignmentHelper(HeapSpace heapSpace) {
+        switch (heapSpace) {
+            case EDEN:
+                return SurvivorAlignmentTestMain.EDEN_SPACE_HELPER;
+            case SURVIVOR:
+                return SurvivorAlignmentTestMain.SURVIVOR_SPACE_HELPER;
+            case TENURED:
+                return SurvivorAlignmentTestMain.TENURED_SPACE_HELPER;
+            default:
+                throw new Error("Unexpected heap space: " + heapSpace);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/TestAllocationInEden.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8031323
+ * @summary Verify that object's alignment in eden space is not affected by
+ *          SurvivorAlignmentInBytes option.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestAllocationInEden SurvivorAlignmentTestMain AlignmentHelper
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32 -XX:-UseTLAB
+ *                   -XX:OldSize=128m -XX:MaxHeapSize=192m
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   TestAllocationInEden 10m 9 EDEN
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32 -XX:-UseTLAB
+ *                   -XX:OldSize=128m -XX:MaxHeapSize=192m
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   TestAllocationInEden 10m 47 EDEN
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64 -XX:-UseTLAB
+ *                   -XX:OldSize=128m  -XX:MaxHeapSize=192m
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   TestAllocationInEden 10m 9 EDEN
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64 -XX:-UseTLAB
+ *                   -XX:OldSize=128m  -XX:MaxHeapSize=192m
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   TestAllocationInEden 10m 87 EDEN
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128 -XX:-UseTLAB
+ *                   -XX:OldSize=128m -XX:MaxHeapSize=192m
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   TestAllocationInEden 10m 9 EDEN
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128 -XX:-UseTLAB
+ *                   -XX:OldSize=128m -XX:MaxHeapSize=192m
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   TestAllocationInEden 10m 147 EDEN
+ */
+public class TestAllocationInEden {
+    public static void main(String args[]) {
+        SurvivorAlignmentTestMain test
+                = SurvivorAlignmentTestMain.fromArgs(args);
+        System.out.println(test);
+
+        long expectedMemoryUsage = test.getExpectedMemoryUsage();
+        test.baselineMemoryAllocation();
+        System.gc();
+
+        test.allocate();
+
+        test.verifyMemoryUsage(expectedMemoryUsage);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/TestPromotionFromEdenToTenured.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8031323
+ * @summary Verify that objects promoted from eden space to tenured space during
+ *          full GC are not aligned to SurvivorAlignmentInBytes value.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestPromotionFromEdenToTenured SurvivorAlignmentTestMain
+ *        AlignmentHelper
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:OldSize=32m -XX:MaxHeapSize=96m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32
+ *                   TestPromotionFromEdenToTenured 10m 9 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:OldSize=32m -XX:MaxHeapSize=96m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32
+ *                   TestPromotionFromEdenToTenured 10m 47 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:OldSize=32m  -XX:MaxHeapSize=96m
+ *                   -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64
+ *                   TestPromotionFromEdenToTenured 10m 9 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:OldSize=32m -XX:MaxHeapSize=128m
+ *                   -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64
+ *                   TestPromotionFromEdenToTenured 10m 87 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:OldSize=32M -XX:MaxHeapSize=96m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128
+ *                   TestPromotionFromEdenToTenured 10m 9 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:OldSize=32m -XX:MaxHeapSize=96m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128
+ *                   TestPromotionFromEdenToTenured 10m 147 TENURED
+ */
+public class TestPromotionFromEdenToTenured {
+    public static void main(String args[]) {
+        SurvivorAlignmentTestMain test
+                = SurvivorAlignmentTestMain.fromArgs(args);
+        System.out.println(test);
+
+        long expectedMemoryUsage = test.getExpectedMemoryUsage();
+        test.baselineMemoryAllocation();
+        System.gc();
+        // increase expected usage by current old gen usage
+        expectedMemoryUsage += SurvivorAlignmentTestMain.getAlignmentHelper(
+                SurvivorAlignmentTestMain.HeapSpace.TENURED)
+                .getActualMemoryUsage();
+
+        test.allocate();
+        System.gc();
+
+        test.verifyMemoryUsage(expectedMemoryUsage);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8031323
+ * @summary Verify that objects promoted from survivor space to tenured space
+ *          during full GC are not aligned to SurvivorAlignmentInBytes value.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestPromotionFromSurvivorToTenuredAfterFullGC
+ *        SurvivorAlignmentTestMain AlignmentHelper
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32m -XX:MaxHeapSize=160m
+ *                   -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32
+ *                   TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32m -XX:MaxHeapSize=160m
+ *                   -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32
+ *                   TestPromotionFromSurvivorToTenuredAfterFullGC 20m 47
+ *                   TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=200m -XX:MaxNewSize=200m
+ *                   -XX:OldSize=32m -XX:MaxHeapSize=232m
+ *                   -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64
+ *                   TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32m -XX:MaxHeapSize=160m
+ *                   -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64
+ *                   TestPromotionFromSurvivorToTenuredAfterFullGC 20m 87
+ *                   TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m
+ *                   -XX:OldSize=32M -XX:MaxHeapSize=288m
+ *                   -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128
+ *                    TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9
+ *                    TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32m -XX:MaxHeapSize=160m
+ *                   -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128
+ *                   TestPromotionFromSurvivorToTenuredAfterFullGC 20m 147
+ *                   TENURED
+ */
+public class TestPromotionFromSurvivorToTenuredAfterFullGC {
+    public static void main(String args[]) {
+        SurvivorAlignmentTestMain test
+                = SurvivorAlignmentTestMain.fromArgs(args);
+        System.out.println(test);
+
+        long expectedMemoryUsage = test.getExpectedMemoryUsage();
+        test.baselineMemoryAllocation();
+        System.gc();
+        // increase expected usage by current old gen usage
+        expectedMemoryUsage += SurvivorAlignmentTestMain.getAlignmentHelper(
+                SurvivorAlignmentTestMain.HeapSpace.TENURED)
+                .getActualMemoryUsage();
+
+        test.allocate();
+        SurvivorAlignmentTestMain.WHITE_BOX.youngGC();
+        System.gc();
+
+        test.verifyMemoryUsage(expectedMemoryUsage);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8031323
+ * @summary Verify that objects promoted from survivor space to tenured space
+ *          when their age exceeded tenuring threshold are not aligned to
+ *          SurvivorAlignmentInBytes value.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestPromotionFromSurvivorToTenuredAfterMinorGC
+ *        SurvivorAlignmentTestMain AlignmentHelper
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32
+ *                   TestPromotionFromSurvivorToTenuredAfterMinorGC 10m 9
+ *                   TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32
+ *                   TestPromotionFromSurvivorToTenuredAfterMinorGC 20m 47
+ *                   TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=200m -XX:MaxNewSize=200m
+ *                   -XX:OldSize=32M -XX:MaxHeapSize=232m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64
+ *                   TestPromotionFromSurvivorToTenuredAfterMinorGC 10m 9
+ *                   TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64
+ *                   TestPromotionFromSurvivorToTenuredAfterMinorGC 20m 87
+ *                   TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m
+ *                   -XX:OldSize=32M -XX:MaxHeapSize=288m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128
+ *                    TestPromotionFromSurvivorToTenuredAfterMinorGC 10m 9
+ *                    TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32M -XX:MaxHeapSize=160m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128
+ *                   TestPromotionFromSurvivorToTenuredAfterMinorGC 20m 147
+ *                   TENURED
+ */
+public class TestPromotionFromSurvivorToTenuredAfterMinorGC {
+    public static void main(String args[]) throws Exception {
+        SurvivorAlignmentTestMain test
+                = SurvivorAlignmentTestMain.fromArgs(args);
+        System.out.println(test);
+
+        long expectedMemoryUsage = test.getExpectedMemoryUsage();
+        test.baselineMemoryAllocation();
+        SurvivorAlignmentTestMain.WHITE_BOX.fullGC();
+        // increase expected usage by current old gen usage
+        expectedMemoryUsage += SurvivorAlignmentTestMain.getAlignmentHelper(
+                SurvivorAlignmentTestMain.HeapSpace.TENURED)
+                .getActualMemoryUsage();
+
+        test.allocate();
+        for (int i = 0; i <= SurvivorAlignmentTestMain.MAX_TENURING_THRESHOLD;
+             i++) {
+            SurvivorAlignmentTestMain.WHITE_BOX.youngGC();
+        }
+
+        test.verifyMemoryUsage(expectedMemoryUsage);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/TestPromotionToSurvivor.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8031323
+ * @summary Verify that objects promoted from eden space to survivor space after
+ *          minor GC are aligned to SurvivorAlignmentInBytes.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestPromotionToSurvivor
+ *        SurvivorAlignmentTestMain AlignmentHelper
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32 -XX:OldSize=128m
+ *                   -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent
+ *                   TestPromotionToSurvivor 10m 9 SURVIVOR
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32 -XX:OldSize=128m
+ *                   -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent
+ *                   TestPromotionToSurvivor 20m 47 SURVIVOR
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64 -XX:OldSize=128m
+ *                   -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent
+ *                   TestPromotionToSurvivor 8m 9 SURVIVOR
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64 -XX:OldSize=128m
+ *                   -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent
+ *                   TestPromotionToSurvivor 20m 87 SURVIVOR
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128 -XX:OldSize=128m
+ *                   -XX:MaxHeapSize=384m  -XX:-ExplicitGCInvokesConcurrent
+ *                   TestPromotionToSurvivor 10m 9 SURVIVOR
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128 -XX:OldSize=128m
+ *                   -XX:MaxHeapSize=256m -XX:-ExplicitGCInvokesConcurrent
+ *                   TestPromotionToSurvivor 20m 147 SURVIVOR
+ */
+public class TestPromotionToSurvivor {
+    public static void main(String args[]) {
+        SurvivorAlignmentTestMain test
+                = SurvivorAlignmentTestMain.fromArgs(args);
+        System.out.println(test);
+
+        long expectedUsage = test.getExpectedMemoryUsage();
+        test.baselineMemoryAllocation();
+        SurvivorAlignmentTestMain.WHITE_BOX.fullGC();
+
+        test.allocate();
+        SurvivorAlignmentTestMain.WHITE_BOX.youngGC();
+
+        test.verifyMemoryUsage(expectedUsage);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/whitebox/TestWBGC.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestWBGC
+ * @bug 8055098
+ * @summary Test verify that WB methods isObjectInOldGen and youngGC works correctly.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestWBGC
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run driver TestWBGC
+ */
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class TestWBGC {
+
+    public static void main(String args[]) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                true,
+                "-Xbootclasspath/a:.",
+                "-XX:+UnlockDiagnosticVMOptions",
+                "-XX:+WhiteBoxAPI",
+                "-XX:MaxTenuringThreshold=1",
+                "-XX:+PrintGC",
+                GCYoungTest.class.getName());
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        System.out.println(output.getStdout());
+        output.shouldHaveExitValue(0);
+        output.shouldContain("WhiteBox Initiated Young GC");
+        output.shouldNotContain("Full");
+        // To be sure that we don't provoke Full GC additionaly to young
+    }
+
+    public static class GCYoungTest {
+        static WhiteBox wb = WhiteBox.getWhiteBox();
+        public static Object obj;
+
+        public static void main(String args[]) {
+            obj = new Object();
+            Asserts.assertFalse(wb.isObjectInOldGen(obj));
+            wb.youngGC();
+            wb.youngGC();
+            // 2 young GC is needed to promote object into OldGen
+            Asserts.assertTrue(wb.isObjectInOldGen(obj));
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/CheckEndorsedAndExtDirs/EndorsedExtDirs.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8064667
+ * @summary Sanity test for -XX:+CheckEndorsedAndExtDirs
+ * @library /testlibrary
+ * @build com.oracle.java.testlibrary.*
+ * @run main/othervm EndorsedExtDirs
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+
+public class EndorsedExtDirs {
+    static final String cpath = System.getProperty("test.classes", ".");
+    public static void main(String arg[]) throws Exception {
+        fatalError("-XX:+CheckEndorsedAndExtDirs", "-Djava.endorsed.dirs=foo");
+        fatalError("-XX:+CheckEndorsedAndExtDirs", "-Djava.ext.dirs=bar");
+        testNonEmptySystemExtDirs();
+    }
+
+    static void testNonEmptySystemExtDirs() throws Exception {
+        String home = System.getProperty("java.home");
+        Path ext = Paths.get(home, "lib", "ext");
+        String extDirs = System.getProperty("java.ext.dirs");
+        String[] dirs = extDirs.split(File.pathSeparator);
+        long count = 0;
+        for (String d : dirs) {
+            Path path = Paths.get(d);
+            if (Files.notExists(path) || path.equals(ext)) continue;
+            count += Files.find(path, 1, (Path p, BasicFileAttributes attr)
+                                       -> p.getFileName().toString().endsWith(".jar"))
+                          .count();
+        }
+        if (count > 0) {
+            fatalError("-XX:+CheckEndorsedAndExtDirs");
+        }
+    }
+
+    static ProcessBuilder newProcessBuilder(String... args) {
+        List<String> commands = new ArrayList<>();
+        String java = System.getProperty("java.home") + "/bin/java";
+        commands.add(java);
+        for (String s : args) {
+            commands.add(s);
+        }
+        commands.add("-cp");
+        commands.add(cpath);
+        commands.add("EndorsedExtDirs");
+
+        System.out.println("Process " + commands);
+        return new ProcessBuilder(commands);
+    }
+
+    static void fatalError(String... args) throws Exception {
+        fatalError(newProcessBuilder(args));
+    }
+
+    static void fatalError(ProcessBuilder pb) throws Exception {
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Could not create the Java Virtual Machine");
+        output.shouldHaveExitValue(1);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/LoadClass/ShowClassLoader.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key regression
+ * @bug 8058927
+ * @summary Make sure array class has the right class loader
+ * @run main ShowClassLoader
+ */
+
+public class ShowClassLoader {
+
+    public static void main(String[] args) {
+        Object[] oa = new Object[0];
+        ShowClassLoader[] sa = new ShowClassLoader[0];
+
+        System.out.println("Classloader for Object[] is " + oa.getClass().getClassLoader());
+        System.out.println("Classloader for SCL[] is " + sa.getClass().getClassLoader() );
+
+        if (sa.getClass().getClassLoader() == null) {
+            throw new RuntimeException("Wrong class loader");
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/NMT/AutoshutdownNMT.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key nmt
+ * @summary Test for deprecated message if -XX:-AutoShutdownNMT is specified
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class AutoshutdownNMT {
+
+    public static void main(String args[]) throws Exception {
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+                "-XX:NativeMemoryTracking=detail",
+                "-XX:-AutoShutdownNMT",
+                "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ignoring option AutoShutdownNMT");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/NMT/ChangeTrackingLevel.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8059100
+ * @summary Test that you can decrease NMT tracking level but not increase it.
+ * @key nmt
+ * @library /testlibrary /testlibrary/whitebox
+ * @build ChangeTrackingLevel
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ChangeTrackingLevel
+ */
+
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class ChangeTrackingLevel {
+
+    public static WhiteBox wb = WhiteBox.getWhiteBox();
+    public static void main(String args[]) throws Exception {
+        boolean testChangeLevel = wb.NMTChangeTrackingLevel();
+        if (testChangeLevel) {
+            System.out.println("NMT level change test passed.");
+        } else {
+            // it also fails if the VM asserts.
+            throw new RuntimeException("NMT level change test failed");
+        }
+    }
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/NMT/JcmdBaselineDetail.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key nmt jcmd
+ * @summary Verify that jcmd correctly reports that baseline succeeds with NMT enabled with detailed tracking.
+ * @library /testlibrary
+ * @run main/othervm -XX:NativeMemoryTracking=detail JcmdBaselineDetail
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class JcmdBaselineDetail {
+
+    public static void main(String args[]) throws Exception {
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+        OutputAnalyzer output;
+
+        ProcessBuilder pb = new ProcessBuilder();
+
+        // Run 'jcmd <pid> VM.native_memory baseline=true'
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "baseline=true"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Baseline succeeded");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/NMT/JcmdDetailDiff.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary run NMT baseline, allocate memory and verify output from detail.diff
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @ignore
+ * @build JcmdDetailDiff
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail JcmdDetailDiff
+ */
+
+import com.oracle.java.testlibrary.*;
+
+import sun.hotspot.WhiteBox;
+
+public class JcmdDetailDiff {
+
+    public static WhiteBox wb = WhiteBox.getWhiteBox();
+
+    public static void main(String args[]) throws Exception {
+        ProcessBuilder pb = new ProcessBuilder();
+        OutputAnalyzer output;
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+
+        long commitSize = 128 * 1024;
+        long reserveSize = 256 * 1024;
+        long addr;
+
+        // Run 'jcmd <pid> VM.native_memory baseline=true'
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "baseline=true"});
+        pb.start().waitFor();
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Baseline succeeded");
+
+        addr = wb.NMTReserveMemory(reserveSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
+
+        wb.NMTCommitMemory(addr, commitSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB +256KB, committed=128KB +128KB)");
+
+        wb.NMTUncommitMemory(addr, commitSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
+
+        wb.NMTReleaseMemory(addr, reserveSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Test (reserved=");
+    }
+}
--- a/test/runtime/NMT/JcmdScale.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/runtime/NMT/JcmdScale.java	Fri Apr 10 16:58:26 2015 -0700
@@ -41,15 +41,15 @@
 
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"});
     output = new OutputAnalyzer(pb.start());
-    output.shouldContain("KB,  committed=");
+    output.shouldContain("KB, committed=");
 
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=MB"});
     output = new OutputAnalyzer(pb.start());
-    output.shouldContain("MB,  committed=");
+    output.shouldContain("MB, committed=");
 
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=GB"});
     output = new OutputAnalyzer(pb.start());
-    output.shouldContain("GB,  committed=");
+    output.shouldContain("GB, committed=");
 
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=apa"});
     output = new OutputAnalyzer(pb.start());
@@ -57,7 +57,7 @@
 
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=GB"});
     output = new OutputAnalyzer(pb.start());
-    output.shouldContain("GB,  committed=");
+    output.shouldContain("GB, committed=");
 
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=apa"});
     output = new OutputAnalyzer(pb.start());
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/NMT/JcmdScaleDetail.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key nmt jcmd
+ * @summary Test the NMT scale parameter with detail tracking level
+ * @library /testlibrary
+ * @run main/othervm -XX:NativeMemoryTracking=detail JcmdScaleDetail
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class JcmdScaleDetail {
+
+    public static void main(String args[]) throws Exception {
+        ProcessBuilder pb = new ProcessBuilder();
+        OutputAnalyzer output;
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("KB, committed=");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=MB"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("MB, committed=");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=GB"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("GB, committed=");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=apa"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Incorrect scale value: apa");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=GB"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("GB, committed=");
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=apa"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Incorrect scale value: apa");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/NMT/JcmdSummaryDiff.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary run NMT baseline, allocate memory and verify output from summary.diff
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @build JcmdSummaryDiff
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=summary JcmdSummaryDiff
+ */
+
+import com.oracle.java.testlibrary.*;
+
+import sun.hotspot.WhiteBox;
+
+public class JcmdSummaryDiff {
+
+    public static WhiteBox wb = WhiteBox.getWhiteBox();
+
+    public static void main(String args[]) throws Exception {
+        ProcessBuilder pb = new ProcessBuilder();
+        OutputAnalyzer output;
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+
+        long commitSize = 128 * 1024;
+        long reserveSize = 256 * 1024;
+        long addr;
+
+        // Run 'jcmd <pid> VM.native_memory baseline=true'
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "baseline=true"});
+        pb.start().waitFor();
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Baseline succeeded");
+
+        addr = wb.NMTReserveMemory(reserveSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
+
+        wb.NMTCommitMemory(addr, commitSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB +256KB, committed=128KB +128KB)");
+
+        wb.NMTUncommitMemory(addr, commitSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
+
+        wb.NMTReleaseMemory(addr, reserveSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"});
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Test (reserved=");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/NMT/MallocRoundingReportTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test consistency of NMT by creating allocations of the Test type with various sizes and verifying visibility with jcmd
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @build MallocRoundingReportTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocRoundingReportTest
+ *
+ */
+
+import com.oracle.java.testlibrary.*;
+
+import sun.hotspot.WhiteBox;
+
+public class MallocRoundingReportTest {
+    private static long K = 1024;
+
+    public static void main(String args[]) throws Exception {
+        OutputAnalyzer output;
+        WhiteBox wb = WhiteBox.getWhiteBox();
+
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+        ProcessBuilder pb = new ProcessBuilder();
+
+        long[] additionalBytes = {0, 1, 512, 650};
+        long[] kByteSize = {1024, 2048};
+        long mallocd_total = 0;
+        for ( int i = 0; i < kByteSize.length; i++)
+        {
+            for (int j = 0; j < (additionalBytes.length); j++) {
+                long curKB = kByteSize[i] + additionalBytes[j];
+                // round up/down to the nearest KB to match NMT reporting
+                long numKB = (curKB % kByteSize[i] >= 512) ? ((curKB / K) + 1) : curKB / K;
+                // Use WB API to alloc and free with the mtTest type
+                mallocd_total = wb.NMTMalloc(curKB);
+                // Run 'jcmd <pid> VM.native_memory summary', check for expected output
+                // NMT does not track memory allocations less than 1KB, and rounds to the nearest KB
+                String expectedOut = ("Test (reserved=" + numKB + "KB, committed=" + numKB + "KB)");
+
+                pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary" });
+                output = new OutputAnalyzer(pb.start());
+                output.shouldContain(expectedOut);
+
+                wb.NMTFree(mallocd_total);
+                // Run 'jcmd <pid> VM.native_memory summary', check for expected output
+                pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary" });
+                output = new OutputAnalyzer(pb.start());
+                output.shouldNotContain("Test (reserved=");
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/NMT/MallocSiteHashOverflow.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test corner case that overflows malloc site hashtable bucket
+ * @requires sun.arch.data.model == "32"
+ * @key nmt jcmd stress
+ * @library /testlibrary /testlibrary/whitebox
+ * @build MallocSiteHashOverflow
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocSiteHashOverflow
+ */
+
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class MallocSiteHashOverflow {
+
+    public static void main(String args[]) throws Exception {
+
+        // Size of entries based on malloc tracking header defined in mallocTracker.hpp
+        // For 32-bit systems, create 257 malloc sites with the same hash bucket to overflow a hash bucket
+        long entries = 257;
+
+        OutputAnalyzer output;
+        WhiteBox wb = WhiteBox.getWhiteBox();
+        int MAX_HASH_SIZE = wb.NMTGetHashSize();
+
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+        ProcessBuilder pb = new ProcessBuilder();
+
+        // Verify that current tracking level is "detail"
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Native Memory Tracking Statistics");
+
+        // Attempt to cause NMT to downgrade tracking level by allocating small amounts
+        // of memory with random pseudo call stack
+        int pc = 1;
+        for (int i = 0; i < entries; i++) {
+            long addr = wb.NMTMallocWithPseudoStack(1, pc);
+            if (addr == 0) {
+                throw new RuntimeException("NMTMallocWithPseudoStack: out of memory");
+            }
+            // We free memory here since it doesn't affect pseudo malloc alloc site hash table entries
+            wb.NMTFree(addr);
+            pc += MAX_HASH_SIZE;
+            if (i == entries) {
+                // Verify that tracking has been downgraded
+                pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"});
+                output = new OutputAnalyzer(pb.start());
+                output.shouldContain("Tracking level has been downgraded due to lack of resources");
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/NMT/MallocStressTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Stress test for malloc tracking
+ * @key nmt jcmd stress
+ * @library /testlibrary /testlibrary/whitebox
+ * @build MallocStressTest
+ * @ignore - This test is disabled since it will stress NMT and timeout during normal testing
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocStressTest
+ */
+
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class MallocStressTest {
+    private static int K = 1024;
+
+    // The stress test runs in three phases:
+    // 1. alloc: A lot of malloc with fewer free, which simulates a burst memory allocation
+    //    that is usually seen during startup or class loading.
+    // 2. pause: Pause the test to check accuracy of native memory tracking
+    // 3. release: Release all malloc'd memory and check native memory tracking result.
+    public enum TestPhase {
+        alloc,
+        pause,
+        release
+    };
+
+    static TestPhase phase = TestPhase.alloc;
+
+    // malloc'd memory
+    static ArrayList<MallocMemory>  mallocd_memory = new ArrayList<MallocMemory>();
+    static long                     mallocd_total  = 0;
+    static WhiteBox                 whiteBox;
+    static AtomicInteger            pause_count = new AtomicInteger();
+
+    static boolean                  is_64_bit_system;
+
+    private static boolean is_64_bit_system() { return is_64_bit_system; }
+
+    public static void main(String args[]) throws Exception {
+        is_64_bit_system = (Platform.is64bit());
+
+        OutputAnalyzer output;
+        whiteBox = WhiteBox.getWhiteBox();
+
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+        ProcessBuilder pb = new ProcessBuilder();
+
+        AllocThread[]   alloc_threads = new AllocThread[256];
+        ReleaseThread[] release_threads = new ReleaseThread[64];
+
+        int index;
+        // Create many allocation threads
+        for (index = 0; index < alloc_threads.length; index ++) {
+            alloc_threads[index] = new AllocThread();
+        }
+
+        // Fewer release threads
+        for (index = 0; index < release_threads.length; index ++) {
+            release_threads[index] = new ReleaseThread();
+        }
+
+        if (is_64_bit_system()) {
+            sleep_wait(2*60*1000);
+        } else {
+            sleep_wait(60*1000);
+        }
+        // pause the stress test
+        phase = TestPhase.pause;
+        while (pause_count.intValue() <  alloc_threads.length + release_threads.length) {
+            sleep_wait(10);
+        }
+
+        long mallocd_total_in_KB = (mallocd_total + K / 2) / K;
+
+        // Now check if the result from NMT matches the total memory allocated.
+        String expected_test_summary = "Test (reserved=" + mallocd_total_in_KB +"KB, committed=" + mallocd_total_in_KB + "KB)";
+        // Run 'jcmd <pid> VM.native_memory summary'
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain(expected_test_summary);
+
+        // Release all allocated memory
+        phase = TestPhase.release;
+        synchronized(mallocd_memory) {
+            mallocd_memory.notifyAll();
+        }
+
+        // Join all threads
+        for (index = 0; index < alloc_threads.length; index ++) {
+            try {
+                alloc_threads[index].join();
+            } catch (InterruptedException e) {
+            }
+        }
+
+        for (index = 0; index < release_threads.length; index ++) {
+            try {
+                release_threads[index].join();
+            } catch (InterruptedException e) {
+            }
+        }
+
+        // All test memory allocated should be released
+        output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Test (reserved=");
+
+        // Verify that tracking level has not been downgraded
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Tracking level has been downgraded due to lack of resources");
+    }
+
+    private static void sleep_wait(int n) {
+        try {
+            Thread.sleep(n);
+        } catch (InterruptedException e) {
+        }
+    }
+
+
+    static class MallocMemory {
+        private long  addr;
+        private int   size;
+
+        MallocMemory(long addr, int size) {
+            this.addr = addr;
+            this.size = size;
+        }
+
+        long addr()  { return this.addr; }
+        int  size()  { return this.size; }
+    }
+
+    static class AllocThread extends Thread {
+        AllocThread() {
+            this.setName("MallocThread");
+            this.start();
+        }
+
+        // AllocThread only runs "Alloc" phase
+        public void run() {
+            Random random = new Random();
+            while (MallocStressTest.phase == TestPhase.alloc) {
+                int r = Math.abs(random.nextInt());
+                // Only malloc small amount to avoid OOM
+                int size = r % 32;
+                if (is_64_bit_system()) {
+                    r = r % 32 * K;
+                } else {
+                    r = r % 64;
+                }
+                if (size == 0) size = 1;
+                long addr = MallocStressTest.whiteBox.NMTMallocWithPseudoStack(size, r);
+                if (addr != 0) {
+                    MallocMemory mem = new MallocMemory(addr, size);
+                    synchronized(MallocStressTest.mallocd_memory) {
+                        MallocStressTest.mallocd_memory.add(mem);
+                        MallocStressTest.mallocd_total += size;
+                    }
+                } else {
+                    System.out.println("Out of malloc memory");
+                    break;
+                }
+            }
+            MallocStressTest.pause_count.incrementAndGet();
+        }
+    }
+
+    static class ReleaseThread extends Thread {
+        private Random random = new Random();
+        ReleaseThread() {
+            this.setName("ReleaseThread");
+            this.start();
+        }
+
+        public void run() {
+            while(true) {
+                switch(MallocStressTest.phase) {
+                case alloc:
+                    slow_release();
+                    break;
+                case pause:
+                    enter_pause();
+                    break;
+                case release:
+                    quick_release();
+                    return;
+                }
+            }
+        }
+
+        private void enter_pause() {
+            MallocStressTest.pause_count.incrementAndGet();
+            while (MallocStressTest.phase != MallocStressTest.TestPhase.release) {
+                try {
+                    synchronized(MallocStressTest.mallocd_memory) {
+                        MallocStressTest.mallocd_memory.wait(10);
+                    }
+                } catch (InterruptedException e) {
+                }
+            }
+        }
+
+        private void quick_release() {
+            List<MallocMemory> free_list;
+            while (true) {
+                synchronized(MallocStressTest.mallocd_memory) {
+                    if (MallocStressTest.mallocd_memory.isEmpty()) return;
+                    int size =  Math.min(MallocStressTest.mallocd_memory.size(), 5000);
+                    List<MallocMemory> subList = MallocStressTest.mallocd_memory.subList(0, size);
+                    free_list = new ArrayList<MallocMemory>(subList);
+                    subList.clear();
+                }
+                for (int index = 0; index < free_list.size(); index ++) {
+                    MallocMemory mem = free_list.get(index);
+                    MallocStressTest.whiteBox.NMTFree(mem.addr());
+                }
+            }
+        }
+
+        private void slow_release() {
+            try {
+                Thread.sleep(10);
+            } catch (InterruptedException e) {
+            }
+            synchronized(MallocStressTest.mallocd_memory) {
+                if (MallocStressTest.mallocd_memory.isEmpty()) return;
+                int n = Math.abs(random.nextInt()) % MallocStressTest.mallocd_memory.size();
+                MallocMemory mem = mallocd_memory.remove(n);
+                MallocStressTest.whiteBox.NMTFree(mem.addr());
+                MallocStressTest.mallocd_total -= mem.size();
+            }
+        }
+    }
+}
--- a/test/runtime/NMT/MallocTestType.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/runtime/NMT/MallocTestType.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,11 +51,6 @@
     long memAlloc1 = wb.NMTMalloc(512 * 1024);
     wb.NMTFree(memAlloc2);
 
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!wb.NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
-    }
-
     // Run 'jcmd <pid> VM.native_memory summary'
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
     output = new OutputAnalyzer(pb.start());
@@ -64,10 +59,6 @@
     // Free the memory allocated by NMTAllocTest
     wb.NMTFree(memAlloc1);
 
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!wb.NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
-    }
     output = new OutputAnalyzer(pb.start());
     output.shouldNotContain("Test (reserved=");
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/NMT/MallocTrackingVerify.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8054836
+ * @summary Test to verify correctness of malloc tracking
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @build MallocTrackingVerify
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocTrackingVerify
+ *
+ */
+
+import java.util.ArrayList;
+import java.util.Random;
+
+import com.oracle.java.testlibrary.*;
+
+import sun.hotspot.WhiteBox;
+
+public class MallocTrackingVerify {
+    private static int MAX_ALLOC = 4 * 1024;
+
+    static ArrayList<MallocMemory> mallocd_memory = new ArrayList<MallocMemory>();
+    static long mallocd_total = 0;
+    public static WhiteBox wb = WhiteBox.getWhiteBox();
+
+    public static void main(String args[]) throws Exception {
+        OutputAnalyzer output;
+
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+        ProcessBuilder pb = new ProcessBuilder();
+
+        Random random = new Random();
+        // Allocate small amounts of memory with random pseudo call stack
+        while (mallocd_total < MAX_ALLOC) {
+            int size = random.nextInt(31) + 1;
+            long addr = wb.NMTMallocWithPseudoStack(size, random.nextInt());
+            if (addr != 0) {
+                MallocMemory mem = new MallocMemory(addr, size);
+                mallocd_memory.add(mem);
+                mallocd_total += size;
+            } else {
+                System.out.println("Out of malloc memory");
+                break;
+            }
+        }
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary" });
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=4KB, committed=4KB)");
+
+        // Free
+        for (MallocMemory mem : mallocd_memory) {
+            wb.NMTFree(mem.addr());
+        }
+
+        // Run 'jcmd <pid> VM.native_memory summary', check for expected output
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid,
+                "VM.native_memory", "summary" });
+        output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Test (reserved=");
+    }
+
+    static class MallocMemory {
+        private long addr;
+        private int size;
+
+        MallocMemory(long addr, int size) {
+            this.addr = addr;
+            this.size = size;
+        }
+
+        long addr() {
+            return this.addr;
+        }
+
+        int size() {
+            return this.size;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/NMT/NMTWithCDS.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8055061
+ * @key nmt
+ * @library /testlibrary
+ * @run main NMTWithCDS
+ */
+import com.oracle.java.testlibrary.*;
+
+public class NMTWithCDS {
+
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb;
+    pb = ProcessTools.createJavaProcessBuilder("-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    try {
+      output.shouldContain("Loading classes to share");
+      output.shouldHaveExitValue(0);
+
+      pb = ProcessTools.createJavaProcessBuilder(
+        "-XX:NativeMemoryTracking=detail", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
+      output = new OutputAnalyzer(pb.start());
+      output.shouldContain("sharing");
+      output.shouldHaveExitValue(0);
+
+    } catch (RuntimeException e) {
+      // Report 'passed' if CDS was turned off.
+      output.shouldContain("Unable to use shared archive");
+      output.shouldHaveExitValue(1);
+    }
+  }
+}
--- a/test/runtime/NMT/PrintNMTStatistics.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/runtime/NMT/PrintNMTStatistics.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,46 +24,40 @@
 /*
  * @test
  * @key nmt regression
- * @bug 8005936
- * @summary Make sure PrintNMTStatistics works on normal JVM exit
- * @library /testlibrary /testlibrary/whitebox
- * @build PrintNMTStatistics
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main PrintNMTStatistics
+ * @bug 8005936 8058606
+ * @summary Verify PrintNMTStatistics on normal JVM exit for detail and summary tracking level
+ * @library /testlibrary
  */
 
 import com.oracle.java.testlibrary.*;
 
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import sun.hotspot.WhiteBox;
-
 public class PrintNMTStatistics {
 
-  public static void main(String args[]) throws Exception {
-
-    // We start a new java process running with an argument and use WB API to ensure
-    // we have data for NMT on VM exit
-    if (args.length > 0) {
-      // Use WB API to ensure that all data has been merged before we continue
-      if (!WhiteBox.getWhiteBox().NMTWaitForDataMerge()) {
-        throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
-      }
-      return;
-    }
+    public static void main(String args[]) throws Exception {
 
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-        "-XX:+UnlockDiagnosticVMOptions",
-        "-Xbootclasspath/a:.",
-        "-XX:+WhiteBoxAPI",
-        "-XX:NativeMemoryTracking=summary",
-        "-XX:+PrintNMTStatistics",
-        "PrintNMTStatistics",
-        "test");
+      "-XX:+UnlockDiagnosticVMOptions",
+      "-XX:+PrintNMTStatistics",
+      "-XX:NativeMemoryTracking=detail",
+      "-version");
+
+    OutputAnalyzer output_detail = new OutputAnalyzer(pb.start());
+    output_detail.shouldContain("Virtual memory map:");
+    output_detail.shouldContain("Details:");
+    output_detail.shouldNotContain("error");
+    output_detail.shouldHaveExitValue(0);
 
-    OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldContain("Java Heap (reserved=");
-    output.shouldNotContain("error");
-    output.shouldHaveExitValue(0);
-  }
+    ProcessBuilder pb1 = ProcessTools.createJavaProcessBuilder(
+      "-XX:+UnlockDiagnosticVMOptions",
+      "-XX:+PrintNMTStatistics",
+      "-XX:NativeMemoryTracking=summary",
+      "-version");
+
+    OutputAnalyzer output_summary = new OutputAnalyzer(pb1.start());
+    output_summary.shouldContain("Java Heap (reserved=");
+    output_summary.shouldNotContain("Virtual memory map:");
+    output_summary.shouldNotContain("Details:");
+    output_summary.shouldNotContain("error");
+    output_summary.shouldHaveExitValue(0);
+    }
 }
--- a/test/runtime/NMT/ReleaseCommittedMemory.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/runtime/NMT/ReleaseCommittedMemory.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,6 @@
     addr = wb.NMTReserveMemory(reserveSize);
     wb.NMTCommitMemory(addr, 128*1024);
     wb.NMTReleaseMemory(addr, reserveSize);
-    wb.NMTWaitForDataMerge();
   }
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/NMT/ReleaseNoCommit.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Release uncommitted memory and make sure NMT handles it correctly
+ * @key nmt regression
+ * @library /testlibrary /testlibrary/whitebox
+ * @build ReleaseNoCommit
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=summary ReleaseNoCommit
+ */
+
+import com.oracle.java.testlibrary.JDKToolFinder;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+import sun.hotspot.WhiteBox;
+
+public class ReleaseNoCommit {
+
+    public static void main(String args[]) throws Exception {
+        WhiteBox wb = WhiteBox.getWhiteBox();
+        long reserveSize = 256 * 1024;
+        long addr;
+
+        ProcessBuilder pb = new ProcessBuilder();
+        OutputAnalyzer output;
+        // Grab my own PID
+        String pid = Integer.toString(ProcessTools.getProcessId());
+
+        addr = wb.NMTReserveMemory(reserveSize);
+        // Check for reserved
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain(" Test (reserved=256KB, committed=0KB)");
+
+        wb.NMTReleaseMemory(addr, reserveSize);
+
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"});
+        output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Test (reserved=");
+    }
+}
--- a/test/runtime/NMT/ShutdownTwice.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/runtime/NMT/ShutdownTwice.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,12 +45,12 @@
     output = new OutputAnalyzer(pb.start());
 
     // Verify that jcmd reports that NMT is shutting down
-    output.shouldContain("Shutdown is in progress, it will take a few moments to completely shutdown");
+    output.shouldContain("Native memory tracking has been turned off");
 
     // Run shutdown again
     output = new OutputAnalyzer(pb.start());
 
     // Verify that jcmd reports that NMT has been shutdown already
-    output.shouldContain("Native memory tracking has been shutdown by user");
+    output.shouldContain("Native memory tracking has been shutdown");
   }
 }
--- a/test/runtime/NMT/SummaryAfterShutdown.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/runtime/NMT/SummaryAfterShutdown.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,13 +44,13 @@
     output = new OutputAnalyzer(pb.start());
 
     // Verify that jcmd reports that NMT is shutting down
-    output.shouldContain("Shutdown is in progress, it will take a few moments to completely shutdown");
+    output.shouldContain("Native memory tracking has been turned off");
 
     // Run 'jcmd <pid> VM.native_memory summary'
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
     output = new OutputAnalyzer(pb.start());
 
     // Verify that jcmd reports that NMT has been shutdown
-    output.shouldContain("Native memory tracking has been shutdown by user");
+    output.shouldContain("Native memory tracking has been shutdown");
   }
 }
--- a/test/runtime/NMT/SummarySanityCheck.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/runtime/NMT/SummarySanityCheck.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,11 +44,6 @@
     // Grab my own PID
     String pid = Integer.toString(ProcessTools.getProcessId());
 
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!WhiteBox.getWhiteBox().NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
-    }
-
     ProcessBuilder pb = new ProcessBuilder();
 
     // Run  'jcmd <pid> VM.native_memory summary scale=KB'
@@ -69,13 +64,13 @@
     // Match '- <mtType> (reserved=<reserved>KB, committed=<committed>KB)
     Pattern mtTypePattern = Pattern.compile("-\\s+(?<typename>[\\w\\s]+)\\(reserved=(?<reserved>\\d+)KB,\\scommitted=(?<committed>\\d+)KB\\)");
     // Match 'Total: reserved=<reserved>KB, committed=<committed>KB'
-    Pattern totalMemoryPattern = Pattern.compile("Total\\:\\s\\sreserved=(?<reserved>\\d+)KB,\\s\\scommitted=(?<committed>\\d+)KB");
+    Pattern totalMemoryPattern = Pattern.compile("Total\\:\\sreserved=(?<reserved>\\d+)KB,\\scommitted=(?<committed>\\d+)KB");
 
     for (int i = 0; i < lines.length; i++) {
       if (lines[i].startsWith("Total")) {
         Matcher totalMemoryMatcher = totalMemoryPattern.matcher(lines[i]);
 
-        if (totalMemoryMatcher.matches() && totalMemoryMatcher.groupCount() == 2) {
+        if (totalMemoryMatcher.matches()) {
           totalCommitted = Integer.parseInt(totalMemoryMatcher.group("committed"));
           totalReserved = Integer.parseInt(totalMemoryMatcher.group("reserved"));
         } else {
--- a/test/runtime/NMT/ThreadedMallocTestType.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/runtime/NMT/ThreadedMallocTestType.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,11 +58,6 @@
     allocThread.start();
     allocThread.join();
 
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!wb.NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
-    }
-
     // Run 'jcmd <pid> VM.native_memory summary'
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
     output = new OutputAnalyzer(pb.start());
@@ -80,11 +75,6 @@
     freeThread.start();
     freeThread.join();
 
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!wb.NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
-    }
-
     output = new OutputAnalyzer(pb.start());
     output.shouldNotContain("Test (reserved=");
   }
--- a/test/runtime/NMT/ThreadedVirtualAllocTestType.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/runtime/NMT/ThreadedVirtualAllocTestType.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,8 +60,6 @@
     reserveThread.start();
     reserveThread.join();
 
-    mergeData();
-
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"});
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=512KB, committed=0KB)");
@@ -77,8 +75,6 @@
     commitThread.start();
     commitThread.join();
 
-    mergeData();
-
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=512KB, committed=128KB)");
     if (has_nmt_detail) {
@@ -93,8 +89,6 @@
     uncommitThread.start();
     uncommitThread.join();
 
-    mergeData();
-
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=512KB, committed=0KB)");
     output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed");
@@ -107,17 +101,9 @@
     releaseThread.start();
     releaseThread.join();
 
-    mergeData();
-
     output = new OutputAnalyzer(pb.start());
     output.shouldNotContain("Test (reserved=");
     output.shouldNotContain("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved");
   }
 
-  public static void mergeData() throws Exception {
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!wb.NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
     }
-  }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/NMT/VirtualAllocCommitUncommitRecommit.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test reserve/commit/uncommit/release of virtual memory and that we track it correctly
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @build VirtualAllocCommitUncommitRecommit
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail VirtualAllocCommitUncommitRecommit
+ *
+ */
+
+import com.oracle.java.testlibrary.*;
+
+import sun.hotspot.WhiteBox;
+
+public class VirtualAllocCommitUncommitRecommit {
+
+    public static WhiteBox wb = WhiteBox.getWhiteBox();
+
+    public static void main(String args[]) throws Exception {
+        OutputAnalyzer output;
+        long commitSize = 4 * 1024; // 4KB
+        long reserveSize = 1024 * 1024; // 1024KB
+        long addr;
+
+        String pid = Integer.toString(ProcessTools.getProcessId());
+        ProcessBuilder pb = new ProcessBuilder();
+
+        boolean has_nmt_detail = wb.NMTIsDetailSupported();
+        if (has_nmt_detail) {
+            System.out.println("NMT detail support detected.");
+        } else {
+            System.out.println("NMT detail support not detected.");
+        }
+
+        // reserve
+        addr = wb.NMTReserveMemory(reserveSize);
+        pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid,
+                "VM.native_memory", "detail" });
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=0KB)");
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+
+        long addrA = addr;
+        long addrB = addr + commitSize;
+        long addrC = addr + (2 * commitSize);
+        long addrD = addr + (3 * commitSize);
+        long addrE = addr + (4 * commitSize);
+        long addrF = addr + (5 * commitSize);
+
+        // commit ABCD
+        wb.NMTCommitMemory(addrA, commitSize);
+        wb.NMTCommitMemory(addrB, commitSize);
+        wb.NMTCommitMemory(addrC, commitSize);
+        wb.NMTCommitMemory(addrD, commitSize);
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=16KB)");
+
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+        // uncommit BC
+        wb.NMTUncommitMemory(addrB, commitSize);
+        wb.NMTUncommitMemory(addrC, commitSize);
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=8KB)");
+
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+
+        // commit EF
+        wb.NMTCommitMemory(addrE, commitSize);
+        wb.NMTCommitMemory(addrF, commitSize);
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=16KB)");
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+
+        // uncommit A
+        wb.NMTUncommitMemory(addrA, commitSize);
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=12KB)");
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+
+        // commit ABC
+        wb.NMTCommitMemory(addrA, commitSize);
+        wb.NMTCommitMemory(addrB, commitSize);
+        wb.NMTCommitMemory(addrC, commitSize);
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=24KB)");
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+
+        // uncommit ABCDEF
+        wb.NMTUncommitMemory(addrA, commitSize);
+        wb.NMTUncommitMemory(addrB, commitSize);
+        wb.NMTUncommitMemory(addrC, commitSize);
+        wb.NMTUncommitMemory(addrD, commitSize);
+        wb.NMTUncommitMemory(addrE, commitSize);
+        wb.NMTUncommitMemory(addrF, commitSize);
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Test (reserved=1024KB, committed=0KB)");
+        if (has_nmt_detail) {
+            output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                    + Long.toHexString(addr + reserveSize)
+                    + "\\] reserved 1024KB for Test");
+        }
+
+        // release
+        wb.NMTReleaseMemory(addr, reserveSize);
+        output = new OutputAnalyzer(pb.start());
+        output.shouldNotContain("Test (reserved=");
+        output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+                + Long.toHexString(addr + reserveSize) + "\\] reserved 1024KB for Test");
+    }
+}
--- a/test/runtime/NMT/VirtualAllocTestType.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/runtime/NMT/VirtualAllocTestType.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,7 +54,6 @@
     }
 
     addr = wb.NMTReserveMemory(reserveSize);
-    mergeData();
     pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"});
 
     output = new OutputAnalyzer(pb.start());
@@ -65,7 +64,6 @@
 
     wb.NMTCommitMemory(addr, commitSize);
 
-    mergeData();
 
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=256KB, committed=128KB)");
@@ -75,24 +73,15 @@
 
     wb.NMTUncommitMemory(addr, commitSize);
 
-    mergeData();
 
     output = new OutputAnalyzer(pb.start());
     output.shouldContain("Test (reserved=256KB, committed=0KB)");
     output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed");
 
     wb.NMTReleaseMemory(addr, reserveSize);
-    mergeData();
 
     output = new OutputAnalyzer(pb.start());
     output.shouldNotContain("Test (reserved=");
     output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved");
   }
-
-  public static void mergeData() throws Exception {
-    // Use WB API to ensure that all data has been merged before we continue
-    if (!wb.NMTWaitForDataMerge()) {
-      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
     }
-  }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/RedefineTests/RedefineAnnotations.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary
+ * @summary Test that type annotations are retained after a retransform
+ * @run main RedefineAnnotations buildagent
+ * @run main/othervm -javaagent:redefineagent.jar RedefineAnnotations
+ */
+
+import static com.oracle.java.testlibrary.Asserts.assertTrue;
+import java.io.FileNotFoundException;
+import java.io.PrintWriter;
+import java.lang.NoSuchFieldException;
+import java.lang.NoSuchMethodException;
+import java.lang.RuntimeException;
+import java.lang.annotation.Annotation;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.lang.instrument.ClassFileTransformer;
+import java.lang.instrument.IllegalClassFormatException;
+import java.lang.instrument.Instrumentation;
+import java.lang.instrument.UnmodifiableClassException;
+import java.lang.reflect.AnnotatedArrayType;
+import java.lang.reflect.AnnotatedParameterizedType;
+import java.lang.reflect.AnnotatedType;
+import java.lang.reflect.AnnotatedWildcardType;
+import java.lang.reflect.Executable;
+import java.lang.reflect.TypeVariable;
+import java.security.ProtectionDomain;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import jdk.internal.org.objectweb.asm.ClassReader;
+import jdk.internal.org.objectweb.asm.ClassVisitor;
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.FieldVisitor;
+import static jdk.internal.org.objectweb.asm.Opcodes.ASM5;
+
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.TYPE_USE)
+@interface TestAnn {
+    String site();
+}
+
+public class RedefineAnnotations {
+    static Instrumentation inst;
+    public static void premain(String agentArgs, Instrumentation inst) {
+        RedefineAnnotations.inst = inst;
+    }
+
+    static class Transformer implements ClassFileTransformer {
+
+        public byte[] asm(ClassLoader loader, String className,
+                Class<?> classBeingRedefined,
+                ProtectionDomain protectionDomain, byte[] classfileBuffer)
+            throws IllegalClassFormatException {
+
+            ClassWriter cw = new ClassWriter(0);
+            ClassVisitor cv = new ReAddDummyFieldsClassVisitor(ASM5, cw) { };
+            ClassReader cr = new ClassReader(classfileBuffer);
+            cr.accept(cv, 0);
+            return cw.toByteArray();
+        }
+
+        public class ReAddDummyFieldsClassVisitor extends ClassVisitor {
+
+            LinkedList<F> fields = new LinkedList<>();
+
+            public ReAddDummyFieldsClassVisitor(int api, ClassVisitor cv) {
+                super(api, cv);
+            }
+
+            @Override public FieldVisitor visitField(int access, String name,
+                    String desc, String signature, Object value) {
+                if (name.startsWith("dummy")) {
+                    // Remove dummy field
+                    fields.addLast(new F(access, name, desc, signature, value));
+                    return null;
+                }
+                return cv.visitField(access, name, desc, signature, value);
+            }
+
+            @Override public void visitEnd() {
+                F f;
+                while ((f = fields.pollFirst()) != null) {
+                    // Re-add dummy fields
+                    cv.visitField(f.access, f.name, f.desc, f.signature, f.value);
+                }
+            }
+
+            private class F {
+                private int access;
+                private String name;
+                private String desc;
+                private String signature;
+                private Object value;
+                F(int access, String name, String desc, String signature, Object value) {
+                    this.access = access;
+                    this.name = name;
+                    this.desc = desc;
+                    this.signature = signature;
+                    this.value = value;
+                }
+            }
+        }
+
+        @Override public byte[] transform(ClassLoader loader, String className,
+                Class<?> classBeingRedefined,
+                ProtectionDomain protectionDomain, byte[] classfileBuffer)
+            throws IllegalClassFormatException {
+
+            if (className.contains("TypeAnnotatedTestClass")) {
+                try {
+                    // Here we remove and re-add the dummy fields. This shuffles the constant pool
+                    return asm(loader, className, classBeingRedefined, protectionDomain, classfileBuffer);
+                } catch (Throwable e) {
+                    // The retransform native code that called this method does not propagate
+                    // exceptions. Instead of getting an uninformative generic error, catch
+                    // problems here and print it, then exit.
+                    e.printStackTrace();
+                    System.exit(1);
+                }
+            }
+            return null;
+        }
+    }
+
+    private static void buildAgent() {
+        try {
+            ClassFileInstaller.main("RedefineAnnotations");
+        } catch (Exception e) {
+            throw new RuntimeException("Could not write agent classfile", e);
+        }
+
+        try {
+            PrintWriter pw = new PrintWriter("MANIFEST.MF");
+            pw.println("Premain-Class: RedefineAnnotations");
+            pw.println("Agent-Class: RedefineAnnotations");
+            pw.println("Can-Retransform-Classes: true");
+            pw.close();
+        } catch (FileNotFoundException e) {
+            throw new RuntimeException("Could not write manifest file for the agent", e);
+        }
+
+        sun.tools.jar.Main jarTool = new sun.tools.jar.Main(System.out, System.err, "jar");
+        if (!jarTool.run(new String[] { "-cmf", "MANIFEST.MF", "redefineagent.jar", "RedefineAnnotations.class" })) {
+            throw new RuntimeException("Could not write the agent jar file");
+        }
+    }
+
+    public static void main(String argv[]) throws NoSuchFieldException, NoSuchMethodException {
+        if (argv.length == 1 && argv[0].equals("buildagent")) {
+            buildAgent();
+            return;
+        }
+
+        if (inst == null) {
+            throw new RuntimeException("Instrumentation object was null");
+        }
+
+        RedefineAnnotations test = new RedefineAnnotations();
+        test.testTransformAndVerify();
+    }
+
+    // Class type annotations
+    private Annotation classTypeParameterTA;
+    private Annotation extendsTA;
+    private Annotation implementsTA;
+
+    // Field type annotations
+    private Annotation fieldTA;
+    private Annotation innerTA;
+    private Annotation[] arrayTA = new Annotation[4];
+    private Annotation[] mapTA = new Annotation[5];
+
+    // Method type annotations
+    private Annotation returnTA, methodTypeParameterTA, formalParameterTA, throwsTA;
+
+    private void testTransformAndVerify()
+        throws NoSuchFieldException, NoSuchMethodException {
+
+        Class<TypeAnnotatedTestClass> c = TypeAnnotatedTestClass.class;
+        Class<?> myClass = c;
+
+        /*
+         * Verify that the expected annotations are where they should be before transform.
+         */
+        verifyClassTypeAnnotations(c);
+        verifyFieldTypeAnnotations(c);
+        verifyMethodTypeAnnotations(c);
+
+        try {
+            inst.addTransformer(new Transformer(), true);
+            inst.retransformClasses(myClass);
+        } catch (UnmodifiableClassException e) {
+            throw new RuntimeException(e);
+        }
+
+        /*
+         * Verify that the expected annotations are where they should be after transform.
+         * Also verify that before and after are equal.
+         */
+        verifyClassTypeAnnotations(c);
+        verifyFieldTypeAnnotations(c);
+        verifyMethodTypeAnnotations(c);
+    }
+
+    private void verifyClassTypeAnnotations(Class c) {
+        Annotation anno;
+
+        anno = c.getTypeParameters()[0].getAnnotations()[0];
+        verifyTestAnn(classTypeParameterTA, anno, "classTypeParameter");
+        classTypeParameterTA = anno;
+
+        anno = c.getAnnotatedSuperclass().getAnnotations()[0];
+        verifyTestAnn(extendsTA, anno, "extends");
+        extendsTA = anno;
+
+        anno = c.getAnnotatedInterfaces()[0].getAnnotations()[0];
+        verifyTestAnn(implementsTA, anno, "implements");
+        implementsTA = anno;
+    }
+
+    private void verifyFieldTypeAnnotations(Class c)
+        throws NoSuchFieldException, NoSuchMethodException {
+
+        verifyBasicFieldTypeAnnotations(c);
+        verifyInnerFieldTypeAnnotations(c);
+        verifyArrayFieldTypeAnnotations(c);
+        verifyMapFieldTypeAnnotations(c);
+    }
+
+    private void verifyBasicFieldTypeAnnotations(Class c)
+        throws NoSuchFieldException, NoSuchMethodException {
+
+        Annotation anno = c.getDeclaredField("typeAnnotatedBoolean").getAnnotatedType().getAnnotations()[0];
+        verifyTestAnn(fieldTA, anno, "field");
+        fieldTA = anno;
+    }
+
+    private void verifyInnerFieldTypeAnnotations(Class c)
+        throws NoSuchFieldException, NoSuchMethodException {
+
+        AnnotatedType at = c.getDeclaredField("typeAnnotatedInner").getAnnotatedType();
+        Annotation anno = at.getAnnotations()[0];
+        verifyTestAnn(innerTA, anno, "inner");
+        innerTA = anno;
+    }
+
+    private void verifyArrayFieldTypeAnnotations(Class c)
+        throws NoSuchFieldException, NoSuchMethodException {
+
+        Annotation anno;
+        AnnotatedType at;
+
+        at = c.getDeclaredField("typeAnnotatedArray").getAnnotatedType();
+        anno = at.getAnnotations()[0];
+        verifyTestAnn(arrayTA[0], anno, "array1");
+        arrayTA[0] = anno;
+
+        for (int i = 1; i <= 3; i++) {
+            at = ((AnnotatedArrayType) at).getAnnotatedGenericComponentType();
+            anno = at.getAnnotations()[0];
+            verifyTestAnn(arrayTA[i], anno, "array" + (i + 1));
+            arrayTA[i] = anno;
+        }
+    }
+
+    private void verifyMapFieldTypeAnnotations(Class c)
+        throws NoSuchFieldException, NoSuchMethodException {
+
+        Annotation anno;
+        AnnotatedType atBase;
+        AnnotatedType atParameter;
+        atBase = c.getDeclaredField("typeAnnotatedMap").getAnnotatedType();
+
+        anno = atBase.getAnnotations()[0];
+        verifyTestAnn(mapTA[0], anno, "map1");
+        mapTA[0] = anno;
+
+        atParameter =
+            ((AnnotatedParameterizedType) atBase).
+            getAnnotatedActualTypeArguments()[0];
+        anno = ((AnnotatedWildcardType) atParameter).getAnnotations()[0];
+        verifyTestAnn(mapTA[1], anno, "map2");
+        mapTA[1] = anno;
+
+        anno =
+            ((AnnotatedWildcardType) atParameter).
+            getAnnotatedUpperBounds()[0].getAnnotations()[0];
+        verifyTestAnn(mapTA[2], anno, "map3");
+        mapTA[2] = anno;
+
+        atParameter =
+            ((AnnotatedParameterizedType) atBase).
+            getAnnotatedActualTypeArguments()[1];
+        anno = ((AnnotatedParameterizedType) atParameter).getAnnotations()[0];
+        verifyTestAnn(mapTA[3], anno, "map4");
+        mapTA[3] = anno;
+
+        anno =
+            ((AnnotatedParameterizedType) atParameter).
+            getAnnotatedActualTypeArguments()[0].getAnnotations()[0];
+        verifyTestAnn(mapTA[4], anno, "map5");
+        mapTA[4] = anno;
+    }
+
+    private void verifyMethodTypeAnnotations(Class c)
+        throws NoSuchFieldException, NoSuchMethodException {
+        Annotation anno;
+        Executable typeAnnotatedMethod =
+            c.getDeclaredMethod("typeAnnotatedMethod", TypeAnnotatedTestClass.class);
+
+        anno = typeAnnotatedMethod.getAnnotatedReturnType().getAnnotations()[0];
+        verifyTestAnn(returnTA, anno, "return");
+        returnTA = anno;
+
+        anno = typeAnnotatedMethod.getTypeParameters()[0].getAnnotations()[0];
+        verifyTestAnn(methodTypeParameterTA, anno, "methodTypeParameter");
+        methodTypeParameterTA = anno;
+
+        anno = typeAnnotatedMethod.getAnnotatedParameterTypes()[0].getAnnotations()[0];
+        verifyTestAnn(formalParameterTA, anno, "formalParameter");
+        formalParameterTA = anno;
+
+        anno = typeAnnotatedMethod.getAnnotatedExceptionTypes()[0].getAnnotations()[0];
+        verifyTestAnn(throwsTA, anno, "throws");
+        throwsTA = anno;
+    }
+
+    private static void verifyTestAnn(Annotation verifyAgainst, Annotation anno, String expectedSite) {
+        verifyTestAnnSite(anno, expectedSite);
+
+        // When called before transform verifyAgainst will be null, when called
+        // after transform it will be the annotation from before the transform
+        if (verifyAgainst != null) {
+            assertTrue(anno.equals(verifyAgainst),
+                       "Annotations do not match before and after." +
+                       " Before: \"" + verifyAgainst + "\", After: \"" + anno + "\"");
+        }
+    }
+
+    private static void verifyTestAnnSite(Annotation testAnn, String expectedSite) {
+        String expectedAnn = "@TestAnn(site=" + expectedSite + ")";
+        assertTrue(testAnn.toString().equals(expectedAnn),
+                   "Expected \"" + expectedAnn + "\", got \"" + testAnn + "\"");
+    }
+
+    public static class TypeAnnotatedTestClass <@TestAnn(site="classTypeParameter") S,T>
+            extends @TestAnn(site="extends") Thread
+            implements @TestAnn(site="implements") Runnable {
+
+        public @TestAnn(site="field") boolean typeAnnotatedBoolean;
+
+        public
+            RedefineAnnotations.
+            @TestAnn(site="inner") TypeAnnotatedTestClass
+            typeAnnotatedInner;
+
+        public
+            @TestAnn(site="array4") boolean
+            @TestAnn(site="array1") []
+            @TestAnn(site="array2") []
+            @TestAnn(site="array3") []
+            typeAnnotatedArray;
+
+        public @TestAnn(site="map1") Map
+            <@TestAnn(site="map2") ? extends @TestAnn(site="map3") String,
+            @TestAnn(site="map4")  List<@TestAnn(site="map5")  Object>> typeAnnotatedMap;
+
+        public int dummy1;
+        public int dummy2;
+        public int dummy3;
+
+        @TestAnn(site="return") <@TestAnn(site="methodTypeParameter") U,V> Class
+            typeAnnotatedMethod(@TestAnn(site="formalParameter") TypeAnnotatedTestClass arg)
+            throws @TestAnn(site="throws") ClassNotFoundException {
+
+            @TestAnn(site="local_variable_type") int foo = 0;
+            throw new ClassNotFoundException();
+        }
+
+        public void run() {}
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test ArchiveDoesNotExist
+ * @summary Test how VM handles "file does not exist" situation while
+ *          attempting to use CDS archive. JVM should exit gracefully
+ *          when sharing mode is ON, and continue w/o sharing if sharing
+ *          mode is AUTO.
+ * @library /testlibrary
+ * @run main ArchiveDoesNotExist
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.io.File;
+
+public class ArchiveDoesNotExist {
+    public static void main(String[] args) throws Exception {
+        String fileName = "test.jsa";
+
+        File cdsFile = new File(fileName);
+        if (cdsFile.exists())
+            throw new RuntimeException("Test error: cds file already exists");
+
+        // Sharing: on
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./" + fileName,
+           "-Xshare:on",
+           "-version");
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Specified shared archive not found");
+        output.shouldHaveExitValue(1);
+
+        // Sharing: auto
+        pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./" + fileName,
+           "-Xshare:auto",
+           "-version");
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldMatch("(java|openjdk) version");
+        output.shouldNotContain("sharing");
+        output.shouldHaveExitValue(0);
+    }
+}
--- a/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java	Fri Apr 10 16:58:26 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
  *          is different from object alignment for creating a CDS file
  *          should fail when loading.
  * @library /testlibrary
+ * @bug 8025642
  */
 
 import com.oracle.java.testlibrary.*;
@@ -82,7 +83,11 @@
             createAlignment,
             loadAlignment);
 
-        output.shouldContain(expectedErrorMsg);
+        try {
+            output.shouldContain(expectedErrorMsg);
+        } catch (RuntimeException e) {
+            output.shouldContain("Unable to use shared archive");
+        }
         output.shouldHaveExitValue(1);
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/SharedArchiveFile/DefaultUseWithClient.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test DefaultUseWithClient
+ * @summary Test default behavior of sharing with -client
+ * @library /testlibrary
+ * @run main DefaultUseWithClient
+ * @bug 8032224
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.io.File;
+
+public class DefaultUseWithClient {
+    public static void main(String[] args) throws Exception {
+        String fileName = "test.jsa";
+
+        // On 32-bit windows CDS should be on by default in "-client" config
+        // Skip this test on any other platform
+        boolean is32BitWindows = (Platform.isWindows() && Platform.is32bit());
+        if (!is32BitWindows) {
+            System.out.println("Test only applicable on 32-bit Windows. Skipping");
+            return;
+        }
+
+        // create the archive
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./" + fileName,
+           "-Xshare:dump");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./" + fileName,
+           "-client",
+           "-XX:+PrintSharedSpaces",
+           "-version");
+
+        output = new OutputAnalyzer(pb.start());
+        try {
+            output.shouldContain("sharing");
+        } catch (RuntimeException e) {
+            // if sharing failed due to ASLR or similar reasons,
+            // check whether sharing was attempted at all (UseSharedSpaces)
+            output.shouldContain("UseSharedSpaces:");
+        }
+        output.shouldHaveExitValue(0);
+   }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/SharedArchiveFile/LimitSharedSizes.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test LimitSharedSizes
+ * @summary Test handling of limits on shared space size
+ * @library /testlibrary
+ * @run main LimitSharedSizes
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class LimitSharedSizes {
+    private static class SharedSizeTestData {
+        public String optionName;
+        public String optionValue;
+        public String expectedErrorMsg;
+
+        public SharedSizeTestData(String name, String value, String msg) {
+            optionName = name;
+            optionValue = value;
+            expectedErrorMsg = msg;
+        }
+    }
+
+    private static final SharedSizeTestData[] testTable = {
+        // values in this part of the test table should cause failure
+        // (shared space sizes are deliberately too small)
+        new SharedSizeTestData("-XX:SharedReadOnlySize", "4M",      "read only"),
+        new SharedSizeTestData("-XX:SharedReadWriteSize","4M",      "read write"),
+
+        // Known issue, JDK-8038422 (assert() on Windows)
+        // new SharedSizeTestData("-XX:SharedMiscDataSize", "500k",    "miscellaneous data"),
+
+        // Too small of a misc code size should not cause a vm crash.
+        // It should result in the following error message:
+        // The shared miscellaneous code space is not large enough
+        // to preload requested classes. Use -XX:SharedMiscCodeSize=
+        // to increase the initial size of shared miscellaneous code space.
+        new SharedSizeTestData("-XX:SharedMiscCodeSize", "20k",     "miscellaneous code"),
+
+        // these values are larger than default ones, but should
+        // be acceptable and not cause failure
+        new SharedSizeTestData("-XX:SharedReadOnlySize",    "20M", null),
+        new SharedSizeTestData("-XX:SharedReadWriteSize",   "20M", null),
+        new SharedSizeTestData("-XX:SharedMiscDataSize",    "20M", null),
+        new SharedSizeTestData("-XX:SharedMiscCodeSize",    "20M", null)
+    };
+
+    public static void main(String[] args) throws Exception {
+        String fileName = "test.jsa";
+
+        for (SharedSizeTestData td : testTable) {
+            String option = td.optionName + "=" + td.optionValue;
+            System.out.println("testing option <" + option + ">");
+
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+               "-XX:+UnlockDiagnosticVMOptions",
+               "-XX:SharedArchiveFile=./" + fileName,
+               option,
+               "-Xshare:dump");
+
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+            if (td.expectedErrorMsg != null) {
+                output.shouldContain("The shared " + td.expectedErrorMsg
+                    + " space is not large enough");
+
+                output.shouldHaveExitValue(2);
+            } else {
+                output.shouldNotContain("space is not large enough");
+                output.shouldHaveExitValue(0);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/SharedArchiveFile/PrintSharedArchiveAndExit.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8066670
+ * @summary Testing -XX:+PrintSharedArchiveAndExit option
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class PrintSharedArchiveAndExit {
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+        "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    try {
+      output.shouldContain("Loading classes to share");
+      output.shouldHaveExitValue(0);
+
+      // (1) With a valid archive
+      pb = ProcessTools.createJavaProcessBuilder(
+          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
+          "-XX:+PrintSharedArchiveAndExit", "-version");
+      output = new OutputAnalyzer(pb.start());
+      output.shouldContain("archive is valid");
+      output.shouldNotContain("java version");     // Should not print JVM version
+      output.shouldHaveExitValue(0);               // Should report success in error code.
+
+      pb = ProcessTools.createJavaProcessBuilder(
+          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
+          "-XX:+PrintSharedArchiveAndExit");
+      output = new OutputAnalyzer(pb.start());
+      output.shouldContain("archive is valid");
+      output.shouldNotContain("Usage:");           // Should not print JVM help message
+      output.shouldHaveExitValue(0);               // Should report success in error code.
+
+      // (2) With an invalid archive (boot class path has been prepended)
+      pb = ProcessTools.createJavaProcessBuilder(
+          "-Xbootclasspath/p:foo.jar",
+          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
+          "-XX:+PrintSharedArchiveAndExit", "-version");
+      output = new OutputAnalyzer(pb.start());
+      output.shouldContain("archive is invalid");
+      output.shouldNotContain("java version");     // Should not print JVM version
+      output.shouldHaveExitValue(1);               // Should report failure in error code.
+
+      pb = ProcessTools.createJavaProcessBuilder(
+          "-Xbootclasspath/p:foo.jar",
+          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
+          "-XX:+PrintSharedArchiveAndExit");
+      output = new OutputAnalyzer(pb.start());
+      output.shouldContain("archive is invalid");
+      output.shouldNotContain("Usage:");           // Should not print JVM help message
+      output.shouldHaveExitValue(1);               // Should report failure in error code.
+    } catch (RuntimeException e) {
+      e.printStackTrace();
+      output.shouldContain("Unable to use shared archive");
+      output.shouldHaveExitValue(1);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/SharedArchiveFile/SharedBaseAddress.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test SharedBaseAddress
+ * @summary Test variety of values for SharedBaseAddress, making sure
+ *          VM handles normal values as well as edge values w/o a crash.
+ * @library /testlibrary
+ * @run main SharedBaseAddress
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class SharedBaseAddress {
+
+    // shared base address test table
+    private static final String[] testTable = {
+        "1g", "8g", "64g","512g", "4t",
+        "32t", "128t", "0",
+        "1", "64k", "64M"
+    };
+
+    public static void main(String[] args) throws Exception {
+        // Known issue on Solaris-Sparc
+        // @ignore JDK-8044600
+        if (Platform.isSolaris() && Platform.isSparc())
+            return;
+
+        for (String testEntry : testTable) {
+            System.out.println("sharedBaseAddress = " + testEntry);
+
+            ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+               "-XX:+UnlockDiagnosticVMOptions",
+               "-XX:SharedArchiveFile=test.jsa",
+               "-XX:SharedBaseAddress=" + testEntry,
+               "-Xshare:dump");
+
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+            output.shouldContain("Loading classes to share");
+
+            try {
+                pb = ProcessTools.createJavaProcessBuilder(
+                    "-XX:+UnlockDiagnosticVMOptions",
+                    "-XX:SharedArchiveFile=test.jsa",
+                    "-Xshare:on",
+                    "-version");
+                output = new OutputAnalyzer(pb.start());
+                output.shouldContain("sharing");
+                output.shouldHaveExitValue(0);
+            } catch (RuntimeException e) {
+                output.shouldContain("Unable to use shared archive");
+                output.shouldHaveExitValue(1);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/SharedArchiveFile/SpaceUtilizationCheck.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test SpaceUtilizationCheck
+ * @summary Check if the space utilization for shared spaces is adequate
+ * @library /testlibrary
+ * @run main SpaceUtilizationCheck
+ */
+
+import com.oracle.java.testlibrary.*;
+
+import java.util.regex.Pattern;
+import java.util.regex.Matcher;
+import java.util.ArrayList;
+import java.lang.Integer;
+
+public class SpaceUtilizationCheck {
+    // Minimum allowed utilization value (percent)
+    // The goal is to have this number to be 50% for RO and RW regions
+    // Once that feature is implemented, increase the MIN_UTILIZATION to 50
+    private static final int MIN_UTILIZATION = 30;
+
+    // Only RO and RW regions are considered for this check, since they
+    // currently account for the bulk of the shared space
+    private static final int NUMBER_OF_CHECKED_SHARED_REGIONS = 2;
+
+    public static void main(String[] args) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./test.jsa",
+           "-Xshare:dump");
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        String stdout = output.getStdout();
+        ArrayList<String> utilization = findUtilization(stdout);
+
+        if (utilization.size() != NUMBER_OF_CHECKED_SHARED_REGIONS )
+            throw new RuntimeException("The output format of sharing summary has changed");
+
+        for(String str : utilization) {
+            int value = Integer.parseInt(str);
+            if (value < MIN_UTILIZATION) {
+                System.out.println(stdout);
+                throw new RuntimeException("Utilization for one of the regions" +
+                    "is below a threshold of " + MIN_UTILIZATION + "%");
+            }
+        }
+    }
+
+    public static ArrayList<String> findUtilization(String input) {
+        ArrayList<String> regions = filterRegionsOfInterest(input.split("\n"));
+        return filterByPattern(filterByPattern(regions, "bytes \\[.*% used\\]"), "\\d+");
+    }
+
+    private static ArrayList<String> filterByPattern(Iterable<String> input, String pattern) {
+        ArrayList<String> result = new ArrayList<String>();
+        for (String str : input) {
+            Matcher matcher = Pattern.compile(pattern).matcher(str);
+            if (matcher.find()) {
+                result.add(matcher.group());
+            }
+        }
+        return result;
+    }
+
+    private static ArrayList<String> filterRegionsOfInterest(String[] inputLines) {
+        ArrayList<String> result = new ArrayList<String>();
+        for (String str : inputLines) {
+            if (str.contains("ro space:") || str.contains("rw space:")) {
+                result.add(str);
+            }
+        }
+        return result;
+    }
+}
--- a/test/runtime/lambda-features/InvokespecialInterface.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/runtime/lambda-features/InvokespecialInterface.java	Fri Apr 10 16:58:26 2015 -0700
@@ -33,11 +33,12 @@
 import java.util.function.*;
 import java.util.*;
 
+public class InvokespecialInterface {
 interface I {
   default void imethod() { System.out.println("I::imethod"); }
 }
 
-class C implements I {
+static class C implements I {
   public void foo() { I.super.imethod(); }  // invokespecial InterfaceMethod
   public void bar() { I i = this; i.imethod(); } // invokeinterface same
   public void doSomeInvokedynamic() {
@@ -48,7 +49,6 @@
   }
 }
 
-public class InvokespecialInterface {
   public static void main(java.lang.String[] unused) {
      // need to create C and call I::foo()
      C c = new C();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/lambda-features/TestInterfaceInit.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8034275
+ * @summary [JDK 8u40] Test interface initialization: only for interfaces declaring default methods
+ * @run main TestInterfaceInit
+ */
+import java.util.List;
+import java.util.Arrays;
+import java.util.ArrayList;
+
+public class TestInterfaceInit {
+
+   static List<Class<?>> cInitOrder = new ArrayList<>();
+
+   // Declares a default method and initializes
+   interface I {
+       boolean v = TestInterfaceInit.out(I.class);
+        default void x() {}
+   }
+
+   // Declares a default method and initializes
+   interface J extends I {
+       boolean v = TestInterfaceInit.out(J.class);
+       default void x() {}
+   }
+   // No default method, does not initialize
+   interface JN extends J {
+       boolean v = TestInterfaceInit.out(JN.class);
+   }
+
+   // Declares a default method and initializes
+   interface K extends I {
+       boolean v = TestInterfaceInit.out(K.class);
+        default void x() {}
+   }
+
+   // No default method, does not initialize
+   interface KN extends K {
+       boolean v = TestInterfaceInit.out(KN.class);
+   }
+
+   interface L extends JN, KN {
+       boolean v = TestInterfaceInit.out(L.class);
+        default void x() {}
+   }
+
+   public static void main(String[] args) {
+       // Trigger initialization
+       boolean v = L.v;
+
+       List<Class<?>> expectedCInitOrder = Arrays.asList(I.class,J.class,K.class,L.class);
+       if (!cInitOrder.equals(expectedCInitOrder)) {
+         throw new RuntimeException(String.format("Class initialization array %s not equal to expected array %s", cInitOrder, expectedCInitOrder));
+       }
+   }
+
+   static boolean out(Class c) {
+       System.out.println("#: initializing " + c.getName());
+       cInitOrder.add(c);
+       return true;
+   }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/lambda-features/TestInterfaceOrder.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8034275
+ * @summary [JDK 8u40] Test interface initialization order
+ * @run main TestInterfaceOrder
+ */
+
+import java.util.List;
+import java.util.Arrays;
+import java.util.ArrayList;
+
+public class TestInterfaceOrder {
+  static List<Class<?>> cInitOrder = new ArrayList<>();
+
+  public static void main(java.lang.String[] args) {
+    //Trigger initialization
+    C c = new C();
+
+    List<Class<?>> expectedCInitOrder = Arrays.asList(I.class, J.class, A.class, K.class, B.class, L.class, C.class);
+    if (!cInitOrder.equals(expectedCInitOrder)) {
+      throw new RuntimeException(String.format("Class initialization order %s not equal to expected order %s", cInitOrder, expectedCInitOrder));
+    }
+  }
+
+  interface I {
+    boolean v = TestInterfaceOrder.out(I.class);
+   default void i() {}
+  }
+
+  interface J extends I {
+    boolean v = TestInterfaceOrder.out(J.class);
+    default void j() {}
+  }
+
+  static class A implements J {
+    static boolean v = TestInterfaceOrder.out(A.class);
+  }
+
+  interface K extends I {
+    boolean v = TestInterfaceOrder.out(K.class);
+    default void k() {}
+  }
+
+  static class B extends A implements K {
+    static boolean v = TestInterfaceOrder.out(B.class);
+  }
+
+  interface L  {
+    boolean v = TestInterfaceOrder.out(L.class);
+    default void l() {}
+  }
+
+  static class C extends B implements L {
+    static boolean v = TestInterfaceOrder.out(C.class);
+  }
+
+
+   static boolean out(Class c) {
+       System.out.println("#: initializing " + c.getName());
+       cInitOrder.add(c);
+       return true;
+   }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/com/oracle/java/testlibrary/BuildHelper.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.java.testlibrary;
+
+import java.io.File;
+import java.io.FileReader;
+import java.util.Properties;
+
+public class BuildHelper {
+
+    /**
+     * Commercial builds should have the BUILD_TYPE set to commercial
+     * within the release file, found at the root of the JDK.
+     */
+    public static boolean isCommercialBuild() throws Exception {
+        String buildType = getReleaseProperty("BUILD_TYPE","notFound");
+        return buildType.equals("commercial");
+    }
+
+
+    /**
+     * Return the value for property key, or defaultValue if no property not found.
+     * If present, double quotes are trimmed.
+     */
+    public static String getReleaseProperty(String key, String defaultValue) throws Exception {
+        Properties properties = getReleaseProperties();
+        String value = properties.getProperty(key, defaultValue);
+        return trimDoubleQuotes(value);
+    }
+
+    /**
+     * Return the value for property key, or null if no property not found.
+     * If present, double quotes are trimmed.
+     */
+    public static String getReleaseProperty(String key) throws Exception {
+        return getReleaseProperty(key, null);
+    }
+
+    /**
+     * Get properties from the release file
+     */
+    public static Properties getReleaseProperties() throws Exception {
+        Properties properties = new Properties();
+        properties.load(new FileReader(getReleaseFile()));
+        return properties;
+    }
+
+    /**
+     * Every JDK has a release file in its root.
+     * @return A handler to the release file.
+     */
+    public static File getReleaseFile() throws Exception {
+        String jdkPath = getJDKRoot();
+        File releaseFile = new File(jdkPath,"release");
+        if ( ! releaseFile.canRead() ) {
+            throw new Exception("Release file is not readable, or it is absent: " +
+                    releaseFile.getCanonicalPath());
+        }
+        return releaseFile;
+    }
+
+    /**
+     * Returns path to the JDK under test.
+     * This path is obtained through the test.jdk property, usually set by JTREG.
+     */
+    public static String getJDKRoot() {
+        String jdkPath = System.getProperty("test.jdk");
+        if (jdkPath == null) {
+            throw new RuntimeException("System property 'test.jdk' not set. This property is normally set by jtreg. "
+                    + "When running test separately, set this property using '-Dtest.jdk=/path/to/jdk'.");
+        }
+        return jdkPath;
+    }
+
+    /**
+     * Trim double quotes from the beginning and the end of the given string.
+     * @param original string to trim.
+     * @return a new trimmed string.
+     */
+    public static String trimDoubleQuotes(String original) {
+        if (original == null) { return null; }
+        String trimmed = original.replaceAll("^\"+|\"+$", "");
+        return trimmed;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary/com/oracle/java/testlibrary/DynamicVMOption.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.java.testlibrary;
+
+import com.sun.management.HotSpotDiagnosticMXBean;
+import java.lang.management.ManagementFactory;
+
+/**
+ * A utility class to work with VM options which could be altered during
+ * execution.
+ *
+ * This class is a wrapper around {@code com.sun.management.VMOption}.
+ * It provides more convenient interface to read/write the values.
+ *
+ */
+public class DynamicVMOption {
+
+    private final HotSpotDiagnosticMXBean mxBean;
+
+    /**
+     * VM option name, like "MinHeapFreeRatio".
+     */
+    public final String name;
+
+    /**
+     * Creates an instance of DynamicVMOption.
+     *
+     * @param name the VM option name
+     */
+    public DynamicVMOption(String name) {
+        this.name = name;
+        mxBean = ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class);
+    }
+
+    /**
+     * Sets a new value for the option.
+     * Trying to set not applicable value will cause IllegalArgumentException.
+     * Behavior with null is undefined, most likely NPE will be thrown.
+     *
+     * @param newValue the value to be set
+     * @see #getValue()
+     * @throws IllegalArgumentException if newValue is not applicable to the option
+     */
+    public final void setValue(String newValue) {
+        mxBean.setVMOption(name, newValue);
+    }
+
+    /**
+     * Returns the value of option.
+     *
+     * @return the current option value
+     * @see #setValue(java.lang.String)
+     */
+    public final String getValue() {
+        return mxBean.getVMOption(name).getValue();
+    }
+
+    /**
+     * Returns true, if option is writable, false otherwise.
+     *
+     * @return true, if option is writable, false otherwise
+     */
+    public final boolean isWriteable() {
+        return mxBean.getVMOption(name).isWriteable();
+    }
+
+    /**
+     * Checks if the given value is applicable for the option.
+     *
+     * This method tries to set the option to the new value. If no exception
+     * has been thrown the value is treated as valid.
+     *
+     * Calling this method will not change the option value. After an attempt
+     * to set a new value, the option will be restored to its previous value.
+     *
+     * @param value the value to verify
+     * @return true if option could be set to the given value
+     */
+    public boolean isValidValue(String value) {
+        boolean isValid = true;
+        String oldValue = getValue();
+        try {
+            setValue(value);
+        } catch (NullPointerException e) {
+            if (value == null) {
+                isValid = false;
+            }
+        } catch (IllegalArgumentException e) {
+            isValid = false;
+        } finally {
+            setValue(oldValue);
+        }
+        return isValid;
+    }
+
+    /**
+     * Returns the value of the given VM option as String.
+     *
+     * This is a simple shortcut for {@code new DynamicVMOption(name).getValue()}
+     *
+     * @param name the name of VM option
+     * @return value as a string
+     * @see #getValue()
+     */
+    public static String getString(String name) {
+        return new DynamicVMOption(name).getValue();
+    }
+
+    /**
+     * Returns the value of the given option as int.
+     *
+     * @param name the name of VM option
+     * @return value parsed as integer
+     * @see #getString(java.lang.String)
+     *
+     */
+    public static int getInt(String name) {
+        return Integer.parseInt(getString(name));
+    }
+
+    /**
+     * Sets the VM option to a new value.
+     *
+     * This is a simple shortcut for {@code new DynamicVMOption(name).setValue(value)}
+     *
+     * @param name the name of VM option
+     * @param value the value to be set
+     * @see #setValue(java.lang.String)
+     */
+    public static void setString(String name, String value) {
+        new DynamicVMOption(name).setValue(value);
+    }
+
+    /**
+     * Sets the VM option value to a new integer value.
+     *
+     * @param name the name of VM option
+     * @param value the integer value to be set
+     * @see #setString(java.lang.String, java.lang.String)
+     */
+    public static void setInt(String name, int value) {
+        new DynamicVMOption(name).setValue(Integer.toString(value));
+    }
+
+}
--- a/test/testlibrary/com/oracle/java/testlibrary/DynamicVMOptionChecker.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.java.testlibrary;
-
-import com.sun.management.HotSpotDiagnosticMXBean;
-import com.sun.management.VMOption;
-import java.lang.management.ManagementFactory;
-
-/**
- * Simple class to check writeability, invalid and valid values for VMOption
- */
-public class DynamicVMOptionChecker {
-
-    /**
-     * Reads VM option from PlatformMXBean and parse it to integer value
-     *
-     * @param name of option
-     * @return parsed value
-     */
-    public static int getIntValue(String name) {
-
-        VMOption option = ManagementFactory.
-                getPlatformMXBean(HotSpotDiagnosticMXBean.class).
-                getVMOption(name);
-
-        return Integer.parseInt(option.getValue());
-    }
-
-    /**
-     * Sets VM option value
-     *
-     * @param name of option
-     * @param value to set
-     */
-    public static void setIntValue(String name, int value) {
-        ManagementFactory.getPlatformMXBean(HotSpotDiagnosticMXBean.class).setVMOption(name, Integer.toString(value));
-    }
-
-    /**
-     * Checks that VM option is dynamically writable
-     *
-     * @param name
-     * @throws RuntimeException if option if not writable
-     * @return always true
-     */
-    public static boolean checkIsWritable(String name) {
-        VMOption option = ManagementFactory.
-                getPlatformMXBean(HotSpotDiagnosticMXBean.class).
-                getVMOption(name);
-
-        if (!option.isWriteable()) {
-            throw new RuntimeException(name + " is not writable");
-        }
-
-        return true;
-    }
-
-    /**
-     * Checks that value cannot be set
-     *
-     * @param name of flag
-     * @param value string representation of value to set
-     * @throws RuntimeException on error - when expected exception hasn't been thrown
-     */
-    public static void checkInvalidValue(String name, String value) {
-        // should throw
-        try {
-            ManagementFactory.
-                    getPlatformMXBean(HotSpotDiagnosticMXBean.class).
-                    setVMOption(name, value);
-
-        } catch (IllegalArgumentException e) {
-            return;
-        }
-
-        throw new RuntimeException("Expected IllegalArgumentException was not thrown, " + name + "= " + value);
-    }
-
-    /**
-     * Checks that value can be set
-     *
-     * @param name of flag to set
-     * @param value string representation of value to set
-     * @throws RuntimeException on error - when value in VM is not equal to origin
-     */
-    public static void checkValidValue(String name, String value) {
-        ManagementFactory.
-                getPlatformMXBean(HotSpotDiagnosticMXBean.class).
-                setVMOption(name, value);
-
-        VMOption option = ManagementFactory.
-                getPlatformMXBean(HotSpotDiagnosticMXBean.class).
-                getVMOption(name);
-
-        if (!option.getValue().equals(value)) {
-            throw new RuntimeException("Actual value of " + name + " \"" + option.getValue()
-                    + "\" not equal origin \"" + value + "\"");
-        }
-    }
-
-}
--- a/test/testlibrary/com/oracle/java/testlibrary/TestDynamicVMOption.java	Fri Apr 10 16:55:38 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package com.oracle.java.testlibrary;
-
-/**
- * Simple class to check writeability, invalid and valid values for concrete VMOption
- */
-public class TestDynamicVMOption {
-
-    private final String name;
-    private final int value;
-
-    /**
-     * Constructor
-     *
-     * @param name of VM option to test
-     */
-    public TestDynamicVMOption(String name) {
-        this.name = name;
-        this.value = DynamicVMOptionChecker.getIntValue(name);
-        System.out.println(this.name + " = " + this.value);
-    }
-
-    /**
-     * Checks that this value can accept valid percentage values and cannot accept invalid percentage values
-     *
-     * @throws RuntimeException
-     */
-    public void testPercentageValues() {
-        checkInvalidValue(Integer.toString(Integer.MIN_VALUE));
-        checkInvalidValue(Integer.toString(Integer.MAX_VALUE));
-        checkInvalidValue("-10");
-        checkInvalidValue("190");
-    }
-
-    /**
-     * Reads VM option from PlatformMXBean and parse it to integer value
-     *
-     * @return value
-     */
-    public int getIntValue() {
-        return DynamicVMOptionChecker.getIntValue(this.name);
-    }
-
-    /**
-     * Sets VM option value
-     *
-     * @param value to set
-     */
-    public void setIntValue(int value) {
-        DynamicVMOptionChecker.setIntValue(this.name, value);
-    }
-
-    /**
-     * Checks that this VM option is dynamically writable
-     *
-     * @throws RuntimeException if option if not writable
-     * @return true
-     */
-    public boolean checkIsWritable() throws RuntimeException {
-        return DynamicVMOptionChecker.checkIsWritable(this.name);
-    }
-
-    /**
-     * Checks that value for this VM option cannot be set
-     *
-     * @param value to check
-     * @throws RuntimeException on error - when expected exception hasn't been thrown
-     */
-    public void checkInvalidValue(String value) {
-        DynamicVMOptionChecker.checkInvalidValue(this.name, value);
-    }
-
-    /**
-     * Checks that value for this VM option can be set
-     *
-     * @param value to check
-     * @throws RuntimeException on error - when value in VM is not equal to origin
-     */
-    public void checkValidValue(String value) {
-        DynamicVMOptionChecker.checkValidValue(this.name, value);
-    }
-
-}
--- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Apr 10 16:55:38 2015 -0700
+++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Apr 10 16:58:26 2015 -0700
@@ -25,7 +25,13 @@
 package sun.hotspot;
 
 import java.lang.reflect.Executable;
+import java.util.Arrays;
+import java.util.List;
+import java.util.function.Function;
+import java.util.stream.Stream;
 import java.security.BasicPermission;
+import java.net.URL;
+
 import sun.hotspot.parser.DiagnosticCommand;
 
 public class WhiteBox {
@@ -69,6 +75,8 @@
   // Memory
   public native long getObjectAddress(Object o);
   public native int  getHeapOopSize();
+  public native boolean isObjectInOldGen(Object o);
+  public native long getObjectSize(Object o);
 
   // Runtime
   // Make sure class name is in the correct format
@@ -77,6 +85,15 @@
   }
   private native boolean isClassAlive0(String name);
 
+  // Resource/Class Lookup Cache
+  public native boolean classKnownToNotExist(ClassLoader loader, String name);
+  public native URL[] getLookupCacheURLs(ClassLoader loader);
+  public native int[] getLookupCacheMatches(ClassLoader loader, String name);
+
+  // JVMTI
+  public native void addToBootstrapClassLoaderSearch(String segment);
+  public native void addToSystemClassLoaderSearch(String segment);
+
   // G1
   public native boolean g1InConcurrentMark();
   public native boolean g1IsHumongous(Object o);
@@ -91,8 +108,10 @@
   public native void NMTCommitMemory(long addr, long size);
   public native void NMTUncommitMemory(long addr, long size);
   public native void NMTReleaseMemory(long addr, long size);
-  public native boolean NMTWaitForDataMerge();
+  public native long NMTMallocWithPseudoStack(long size, int index);
   public native boolean NMTIsDetailSupported();
+  public native boolean NMTChangeTrackingLevel();
+  public native int NMTGetHashSize();
 
   // Compiler
   public native void    deoptimizeAll();
@@ -129,7 +148,7 @@
   }
   public native int     getCompileQueueSize(int compLevel);
   public native boolean testSetForceInlineMethod(Executable method, boolean value);
-  public boolean        enqueueMethodForCompilation(Executable method, int compLevel) {
+  public        boolean enqueueMethodForCompilation(Executable method, int compLevel) {
     return enqueueMethodForCompilation(method, compLevel, -1 /*InvocationEntryBci*/);
   }
   public native boolean enqueueMethodForCompilation(Executable method, int compLevel, int entry_bci);
@@ -142,6 +161,13 @@
 
   // Memory
   public native void readReservedMemory();
+  public native long allocateMetaspace(ClassLoader classLoader, long size);
+  public native void freeMetaspace(ClassLoader classLoader, long addr, long size);
+  public native long incMetaspaceCapacityUntilGC(long increment);
+  public native long metaspaceCapacityUntilGC();
+
+  // force Young GC
+  public native void youngGC();
 
   // force Full GC
   public native void fullGC();
@@ -150,8 +176,49 @@
   public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations);
   public native void runMemoryUnitTests();
   public native void readFromNoaccessArea();
+  public native long getThreadStackSize();
+  public native long getThreadRemainingStackSize();
 
   // CPU features
   public native String getCPUFeatures();
 
+  // Native extensions
+  public native long getHeapUsageForContext(int context);
+  public native long getHeapRegionCountForContext(int context);
+  public native int getContextForObject(Object obj);
+  public native void printRegionInfo(int context);
+
+  // VM flags
+  public native void    setBooleanVMFlag(String name, boolean value);
+  public native void    setIntxVMFlag(String name, long value);
+  public native void    setUintxVMFlag(String name, long value);
+  public native void    setUint64VMFlag(String name, long value);
+  public native void    setStringVMFlag(String name, String value);
+  public native void    setDoubleVMFlag(String name, double value);
+  public native Boolean getBooleanVMFlag(String name);
+  public native Long    getIntxVMFlag(String name);
+  public native Long    getUintxVMFlag(String name);
+  public native Long    getUint64VMFlag(String name);
+  public native String  getStringVMFlag(String name);
+  public native Double  getDoubleVMFlag(String name);
+  private final List<Function<String,Object>> flagsGetters = Arrays.asList(
+    this::getBooleanVMFlag, this::getIntxVMFlag, this::getUintxVMFlag,
+    this::getUint64VMFlag, this::getStringVMFlag, this::getDoubleVMFlag);
+
+  public Object getVMFlag(String name) {
+    return flagsGetters.stream()
+                       .map(f -> f.apply(name))
+                       .filter(x -> x != null)
+                       .findAny()
+                       .orElse(null);
+  }
+  public native int getOffsetForName0(String name);
+  public int getOffsetForName(String name) throws Exception {
+    int offset = getOffsetForName0(name);
+    if (offset == -1) {
+      throw new RuntimeException(name + " not found");
+    }
+    return offset;
+  }
+
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary_tests/whitebox/vm_flags/BooleanTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test BooleanTest
+ * @bug 8028756
+ * @library /testlibrary /testlibrary/whitebox
+ * @build BooleanTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI BooleanTest
+ * @summary testing of WB::set/getBooleanVMFlag()
+ * @author igor.ignatyev@oracle.com
+ */
+
+import sun.hotspot.WhiteBox;
+import com.oracle.java.testlibrary.*;
+import sun.management.*;
+import com.sun.management.*;
+
+public class BooleanTest {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static final Boolean[] TESTS = {true, false, true, true, false};
+    private static final String TEST_NAME = "BooleanTest";
+    private static final String FLAG_NAME = "PrintCompilation";
+    private static final String METHOD = TEST_NAME + "::method";
+    private static final String METHOD1 = METHOD + "1";
+    private static final String METHOD2 = METHOD + "2";
+
+    public static void main(String[] args) throws Exception {
+        if (args.length == 0) {
+            VmFlagTest.runTest(FLAG_NAME, TESTS,
+                VmFlagTest.WHITE_BOX::setBooleanVMFlag,
+                VmFlagTest.WHITE_BOX::getBooleanVMFlag);
+            testFunctional(false);
+            testFunctional(true);
+        } else {
+            boolean value = Boolean.valueOf(args[0]);
+            method1();
+            VmFlagTest.WHITE_BOX.setBooleanVMFlag(FLAG_NAME, value);
+            method2();
+        }
+    }
+
+    private static void testFunctional(boolean value) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-Xbootclasspath/a:.",
+            "-XX:+UnlockDiagnosticVMOptions",
+            "-XX:+WhiteBoxAPI",
+            "-Xcomp",
+            "-XX:CompileCommand=compileonly," + METHOD + "*",
+            "-XX:" + (value ? "-" : "+") + FLAG_NAME,
+            TEST_NAME,
+            "" + value);
+        OutputAnalyzer out = new OutputAnalyzer(pb.start());
+        if (value) {
+            out.shouldNotContain(METHOD1);
+            out.shouldContain(METHOD2);
+        } else {
+            out.shouldContain(METHOD1);
+            out.shouldNotContain(METHOD2);
+        }
+    }
+
+    private static void method1() { }
+    private static void method2() { }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary_tests/whitebox/vm_flags/DoubleTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test DoubleTest
+ * @bug 8028756
+ * @library /testlibrary /testlibrary/whitebox
+ * @build DoubleTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI DoubleTest
+ * @summary testing of WB::set/getDoubleVMFlag()
+ * @author igor.ignatyev@oracle.com
+ */
+
+public class DoubleTest {
+    private static final String FLAG_NAME = null;
+    private static final Double[] TESTS = {0d, -0d, -1d, 1d,
+            Double.MAX_VALUE, Double.MIN_VALUE, Double.NaN,
+            Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY};
+
+    public static void main(String[] args) throws Exception {
+        VmFlagTest.runTest(FLAG_NAME, TESTS,
+            VmFlagTest.WHITE_BOX::setDoubleVMFlag,
+            VmFlagTest.WHITE_BOX::getDoubleVMFlag);
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary_tests/whitebox/vm_flags/IntxTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test IntxTest
+ * @bug 8028756
+ * @library /testlibrary /testlibrary/whitebox
+ * @build IntxTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI IntxTest
+ * @summary testing of WB::set/getIntxVMFlag()
+ * @author igor.ignatyev@oracle.com
+ */
+
+public class IntxTest {
+    private static final String FLAG_NAME = "OnStackReplacePercentage";
+    private static final Long[] TESTS = {0L, 100L, -1L,
+            (long) Integer.MAX_VALUE, (long) Integer.MIN_VALUE};
+
+    public static void main(String[] args) throws Exception {
+        VmFlagTest.runTest(FLAG_NAME, TESTS,
+            VmFlagTest.WHITE_BOX::setIntxVMFlag,
+            VmFlagTest.WHITE_BOX::getIntxVMFlag);
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary_tests/whitebox/vm_flags/StringTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test StringTest
+ * @bug 8028756
+ * @library /testlibrary /testlibrary/whitebox
+ * @build StringTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI StringTest
+ * @summary testing of WB::set/getStringVMFlag()
+ * @author igor.ignatyev@oracle.com
+ */
+
+public class StringTest {
+    private static final String FLAG_NAME = "CompileOnly";
+    private static final String[] TESTS = {"StringTest::*", ""};
+
+    public static void main(String[] args) throws Exception {
+        VmFlagTest.runTest(FLAG_NAME, TESTS,
+            VmFlagTest.WHITE_BOX::setStringVMFlag,
+            VmFlagTest.WHITE_BOX::getStringVMFlag);
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary_tests/whitebox/vm_flags/Uint64Test.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test Uint64Test
+ * @bug 8028756
+ * @library /testlibrary /testlibrary/whitebox
+ * @build Uint64Test
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI Uint64Test
+ * @summary testing of WB::set/getUint64VMFlag()
+ * @author igor.ignatyev@oracle.com
+ */
+
+public class Uint64Test {
+    private static final String FLAG_NAME = "MaxRAM";
+    private static final Long[] TESTS = {0L, 100L, (long) Integer.MAX_VALUE,
+            -1L, Long.MAX_VALUE, Long.MIN_VALUE};
+
+    public static void main(String[] args) throws Exception {
+        VmFlagTest.runTest(FLAG_NAME, TESTS,
+            VmFlagTest.WHITE_BOX::setUint64VMFlag,
+            VmFlagTest.WHITE_BOX::getUint64VMFlag);
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary_tests/whitebox/vm_flags/UintxTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test UintxTest
+ * @bug 8028756
+ * @library /testlibrary /testlibrary/whitebox
+ * @build UintxTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI UintxTest
+ * @summary testing of WB::set/getUintxVMFlag()
+ * @author igor.ignatyev@oracle.com
+ */
+import com.oracle.java.testlibrary.Platform;
+
+public class UintxTest {
+    private static final String FLAG_NAME = "TypeProfileLevel";
+    private static final Long[] TESTS = {0L, 100L, (long) Integer.MAX_VALUE,
+        (1L << 32L) - 1L, 1L << 32L};
+    private static final Long[] EXPECTED_64 = TESTS;
+    private static final Long[] EXPECTED_32 = {0L, 100L,
+        (long) Integer.MAX_VALUE, (1L << 32L) - 1L, 0L};
+
+    public static void main(String[] args) throws Exception {
+        VmFlagTest.runTest(FLAG_NAME, TESTS,
+            Platform.is64bit() ? EXPECTED_64 : EXPECTED_32,
+            VmFlagTest.WHITE_BOX::setUintxVMFlag,
+            VmFlagTest.WHITE_BOX::getUintxVMFlag);
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/testlibrary_tests/whitebox/vm_flags/VmFlagTest.java	Fri Apr 10 16:58:26 2015 -0700
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.Objects;
+import java.util.function.BiConsumer;
+import java.util.function.Function;
+import sun.hotspot.WhiteBox;
+import sun.management.*;
+import com.sun.management.*;
+import com.oracle.java.testlibrary.*;
+
+public final class VmFlagTest<T> {
+    public static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+
+    private static final String NONEXISTENT_FLAG = "NonexistentFlag";
+    private final String flagName;
+    private final BiConsumer<T, T> test;
+    private final BiConsumer<String, T> set;
+    private final Function<String, T> get;
+
+    protected VmFlagTest(String flagName, BiConsumer<String, T> set,
+            Function<String, T> get, boolean isPositive) {
+        this.flagName = flagName;
+        this.set = set;
+        this.get = get;
+        if (isPositive) {
+            test = this::testPositive;
+        } else {
+            test = this::testNegative;
+        }
+    }
+
+    private void setNewValue(T value) {
+        set.accept(flagName, value);
+    }
+
+    private T getValue() {
+        T t = get.apply(flagName);
+        System.out.println("T = " + t);
+        return t;
+    }
+
+    protected static <T> void runTest(String existentFlag, T[] tests,
+            BiConsumer<String, T> set, Function<String, T> get) {
+        runTest(existentFlag, tests, tests, set, get);
+    }
+
+    protected static <T> void runTest(String existentFlag, T[] tests,
+            T[] results, BiConsumer<String, T> set, Function<String, T> get) {
+        if (existentFlag != null) {
+            new VmFlagTest(existentFlag, set, get, true).test(tests, results);
+        }
+        new VmFlagTest(NONEXISTENT_FLAG, set, get, false).test(tests, results);
+    }
+
+    public final void test(T[] tests, T[] results) {
+        Asserts.assertEQ(tests.length, results.length, "[TESTBUG] tests.length != results.length");
+        for (int i = 0, n = tests.length ; i < n; ++i) {
+            test.accept(tests[i], results[i]);
+        }
+    }
+
+    protected String getVMOptionAsString() {
+        HotSpotDiagnosticMXBean diagnostic
+                = ManagementFactoryHelper.getDiagnosticMXBean();
+        VMOption tmp;
+        try {
+            tmp = diagnostic.getVMOption(flagName);
+        } catch (IllegalArgumentException e) {
+            tmp = null;
+        }
+        return tmp == null ? null : tmp.getValue();
+    }
+
+    private void testPositive(T value, T expected) {
+        Asserts.assertEQ(getVMOptionAsString(), asString(getValue()));
+        setNewValue(value);
+        String newValue = getVMOptionAsString();
+        Asserts.assertEQ(newValue, asString(expected));
+        Asserts.assertEQ(getVMOptionAsString(), asString(getValue()));
+    }
+
+    private void testNegative(T value, T expected) {
+        String oldValue = getVMOptionAsString();
+        Asserts.assertEQ(oldValue, asString(getValue()));
+        setNewValue(value);
+        String newValue = getVMOptionAsString();
+        Asserts.assertEQ(oldValue, newValue);
+    }
+
+    private String asString(Object value) {
+        return value == null ? null : "" + value;
+    }
+}
+