changeset 1543:1a1603f975b5

Merge
author kvn
date Wed, 19 May 2010 10:22:39 -0700
parents eb79484f795f (current diff) b5fdf39b9749 (diff)
children 1a88d3c58e1d
files src/share/vm/opto/cfgnode.cpp
diffstat 282 files changed, 7091 insertions(+), 5194 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Mon Apr 05 10:17:15 2010 -0700
+++ b/.hgtags	Wed May 19 10:22:39 2010 -0700
@@ -86,3 +86,11 @@
 bf823ef06b4f211e66988d76a2e2669be5c0820e jdk7-b86
 07226e9eab8f74b37346b32715f829a2ef2c3188 hs18-b01
 e7e7e36ccdb5d56edd47e5744351202d38f3b7ad jdk7-b87
+4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b jdk7-b88
+15836273ac2494f36ef62088bc1cb6f3f011f565 jdk7-b89
+4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b hs18-b02
+605c9707a766ff518cd841fc04f9bb4b36a3a30b jdk7-b90
+e0a1a502e402dbe7bf2d9102b4084a7e79a99a9b jdk7-b91
+25f53b53aaa3eb8b2d5391a1e8de9a76ae1dd8a2 hs18-b03
+3221d1887d30341bedfdac1dbf365ea41beff20f jdk7-b92
+310cdbc355355a13aa53c002b6bde4a8c5ba67ff hs18-b04
--- a/agent/src/os/linux/ps_core.c	Mon Apr 05 10:17:15 2010 -0700
+++ b/agent/src/os/linux/ps_core.c	Wed May 19 10:22:39 2010 -0700
@@ -884,9 +884,12 @@
       }
 
       // read name of the shared object
-      if (read_string(ph, (uintptr_t) lib_name_addr, lib_name, sizeof(lib_name)) != true) {
+      lib_name[0] = '\0';
+      if (lib_name_addr != 0 &&
+          read_string(ph, (uintptr_t) lib_name_addr, lib_name, sizeof(lib_name)) != true) {
          print_debug("can't read shared object name\n");
-         return false;
+         // don't let failure to read the name stop opening the file.  If something is really wrong
+         // it will fail later.
       }
 
       if (lib_name[0] != '\0') {
--- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Mon Apr 05 10:17:15 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -494,6 +494,68 @@
                 }
             }
         },
+        new Command("revptrs", "revptrs address", false) {
+            public void doit(Tokens t) {
+                int tokens = t.countTokens();
+                if (tokens != 1 && (tokens != 2 || !t.nextToken().equals("-c"))) {
+                    usage();
+                    return;
+                }
+                boolean chase = tokens == 2;
+                ReversePtrs revptrs = VM.getVM().getRevPtrs();
+                if (revptrs == null) {
+                    out.println("Computing reverse pointers...");
+                    ReversePtrsAnalysis analysis = new ReversePtrsAnalysis();
+                    final boolean[] complete = new boolean[1];
+                    HeapProgressThunk thunk = new HeapProgressThunk() {
+                            public void heapIterationFractionUpdate(double d) {}
+                            public synchronized void heapIterationComplete() {
+                                complete[0] = true;
+                                notify();
+                            }
+                        };
+                    analysis.setHeapProgressThunk(thunk);
+                    analysis.run();
+                    while (!complete[0]) {
+                        synchronized (thunk) {
+                            try {
+                                thunk.wait();
+                            } catch (Exception e) {
+                            }
+                        }
+                    }
+                    revptrs = VM.getVM().getRevPtrs();
+                    out.println("Done.");
+                }
+                Address a = VM.getVM().getDebugger().parseAddress(t.nextToken());
+                if (VM.getVM().getUniverse().heap().isInReserved(a)) {
+                    OopHandle handle = a.addOffsetToAsOopHandle(0);
+                    Oop oop = VM.getVM().getObjectHeap().newOop(handle);
+                    ArrayList ptrs = revptrs.get(oop);
+                    if (ptrs == null) {
+                        out.println("no live references to " + a);
+                    } else {
+                        if (chase) {
+                            while (ptrs.size() == 1) {
+                                LivenessPathElement e = (LivenessPathElement)ptrs.get(0);
+                                ByteArrayOutputStream bos = new ByteArrayOutputStream();
+                                Oop.printOopValueOn(e.getObj(), new PrintStream(bos));
+                                out.println(bos.toString());
+                                ptrs = revptrs.get(e.getObj());
+                            }
+                        } else {
+                            for (int i = 0; i < ptrs.size(); i++) {
+                                LivenessPathElement e = (LivenessPathElement)ptrs.get(i);
+                                ByteArrayOutputStream bos = new ByteArrayOutputStream();
+                                Oop.printOopValueOn(e.getObj(), new PrintStream(bos));
+                                out.println(bos.toString());
+                                oop = e.getObj();
+                            }
+                        }
+                    }
+                }
+            }
+        },
         new Command("inspect", "inspect expression", false) {
             public void doit(Tokens t) {
                 if (t.countTokens() != 1) {
@@ -816,8 +878,24 @@
                     dumpType(type);
                 } else {
                     Iterator i = agent.getTypeDataBase().getTypes();
+                    // Make sure the types are emitted in an order than can be read back in
+                    HashSet emitted = new HashSet();
+                    Stack pending = new Stack();
                     while (i.hasNext()) {
-                        dumpType((Type)i.next());
+                        Type n = (Type)i.next();
+                        if (emitted.contains(n.getName())) {
+                            continue;
+                        }
+
+                        while (n != null && !emitted.contains(n.getName())) {
+                            pending.push(n);
+                            n = n.getSuperclass();
+                        }
+                        while (!pending.empty()) {
+                            n = (Type)pending.pop();
+                            dumpType(n);
+                            emitted.add(n.getName());
+                        }
                     }
                 }
             }
@@ -846,83 +924,105 @@
 
             }
         },
-        new Command("search", "search [ heap | codecache | threads ] value", false) {
+        new Command("search", "search [ heap | perm | rawheap | codecache | threads ] value", false) {
             public void doit(Tokens t) {
                 if (t.countTokens() != 2) {
                     usage();
-                } else {
-                    String type = t.nextToken();
-                    final Address value = VM.getVM().getDebugger().parseAddress(t.nextToken());
-                    final long stride = VM.getVM().getAddressSize();
-                    if (type.equals("threads")) {
-                        Threads threads = VM.getVM().getThreads();
-                        for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
-                            Address base = thread.getBaseOfStackPointer();
-                            Address end = thread.getLastJavaSP();
-                            if (end == null) continue;
-                            if (end.lessThan(base)) {
-                                Address tmp = base;
-                                base = end;
-                                end = tmp;
+                    return;
+                }
+                String type = t.nextToken();
+                final Address value = VM.getVM().getDebugger().parseAddress(t.nextToken());
+                final long stride = VM.getVM().getAddressSize();
+                if (type.equals("threads")) {
+                    Threads threads = VM.getVM().getThreads();
+                    for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
+                        Address base = thread.getBaseOfStackPointer();
+                        Address end = thread.getLastJavaSP();
+                        if (end == null) continue;
+                        if (end.lessThan(base)) {
+                            Address tmp = base;
+                            base = end;
+                            end = tmp;
+                        }
+                        out.println("Searching " + base + " " + end);
+                        while (base != null && base.lessThan(end)) {
+                            Address val = base.getAddressAt(0);
+                            if (AddressOps.equal(val, value)) {
+                                out.println(base);
+                            }
+                            base = base.addOffsetTo(stride);
+                        }
+                    }
+                } else if (type.equals("rawheap")) {
+                    RawHeapVisitor iterator = new RawHeapVisitor() {
+                            public void prologue(long used) {
                             }
-                            out.println("Searching " + base + " " + end);
-                            while (base != null && base.lessThan(end)) {
-                                Address val = base.getAddressAt(0);
+
+                            public void visitAddress(Address addr) {
+                                Address val = addr.getAddressAt(0);
                                 if (AddressOps.equal(val, value)) {
-                                    out.println(base);
+                                        out.println("found at " + addr);
                                 }
-                                base = base.addOffsetTo(stride);
+                            }
+                            public void visitCompOopAddress(Address addr) {
+                                Address val = addr.getCompOopAddressAt(0);
+                                if (AddressOps.equal(val, value)) {
+                                    out.println("found at " + addr);
+                                }
                             }
-                        }
-                    } else if (type.equals("heap")) {
-                        RawHeapVisitor iterator = new RawHeapVisitor() {
-                                public void prologue(long used) {
-                                }
-
-                                public void visitAddress(Address addr) {
-                                    Address val = addr.getAddressAt(0);
+                            public void epilogue() {
+                            }
+                        };
+                    VM.getVM().getObjectHeap().iterateRaw(iterator);
+                } else if (type.equals("heap") || type.equals("perm")) {
+                    HeapVisitor iterator = new DefaultHeapVisitor() {
+                            public boolean doObj(Oop obj) {
+                                int index = 0;
+                                Address start = obj.getHandle();
+                                long end = obj.getObjectSize();
+                                while (index < end) {
+                                    Address val = start.getAddressAt(index);
                                     if (AddressOps.equal(val, value)) {
-                                        out.println("found at " + addr);
+                                        out.println("found in " + obj.getHandle());
+                                        break;
                                     }
-                                }
-                                public void visitCompOopAddress(Address addr) {
-                                    Address val = addr.getCompOopAddressAt(0);
-                                    if (AddressOps.equal(val, value)) {
-                                        out.println("found at " + addr);
-                                    }
-                                }
-                                public void epilogue() {
-                                }
-                            };
-                        VM.getVM().getObjectHeap().iterateRaw(iterator);
-                    } else if (type.equals("codecache")) {
-                        CodeCacheVisitor v = new CodeCacheVisitor() {
-                                public void prologue(Address start, Address end) {
+                                    index += 4;
                                 }
-                                public void visit(CodeBlob blob) {
-                                    boolean printed = false;
-                                    Address base = blob.getAddress();
-                                    Address end = base.addOffsetTo(blob.getSize());
-                                    while (base != null && base.lessThan(end)) {
-                                        Address val = base.getAddressAt(0);
-                                        if (AddressOps.equal(val, value)) {
-                                            if (!printed) {
-                                                printed = true;
-                                                blob.printOn(out);
-                                            }
-                                            out.println("found at " + base + "\n");
+                                return false;
+                            }
+                        };
+                    if (type.equals("heap")) {
+                        VM.getVM().getObjectHeap().iterate(iterator);
+                    } else {
+                        VM.getVM().getObjectHeap().iteratePerm(iterator);
+                    }
+                } else if (type.equals("codecache")) {
+                    CodeCacheVisitor v = new CodeCacheVisitor() {
+                            public void prologue(Address start, Address end) {
+                            }
+                            public void visit(CodeBlob blob) {
+                                boolean printed = false;
+                                Address base = blob.getAddress();
+                                Address end = base.addOffsetTo(blob.getSize());
+                                while (base != null && base.lessThan(end)) {
+                                    Address val = base.getAddressAt(0);
+                                    if (AddressOps.equal(val, value)) {
+                                        if (!printed) {
+                                            printed = true;
+                                            blob.printOn(out);
                                         }
-                                        base = base.addOffsetTo(stride);
+                                        out.println("found at " + base + "\n");
                                     }
+                                    base = base.addOffsetTo(stride);
                                 }
-                                public void epilogue() {
-                                }
+                            }
+                            public void epilogue() {
+                            }
 
 
-                            };
-                        VM.getVM().getCodeCache().iterate(v);
+                        };
+                    VM.getVM().getCodeCache().iterate(v);
 
-                    }
                 }
             }
         },
@@ -957,12 +1057,19 @@
                     Threads threads = VM.getVM().getThreads();
                     boolean all = name.equals("-a");
                     for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
-                        StringWriter sw = new StringWriter();
                         ByteArrayOutputStream bos = new ByteArrayOutputStream();
                         thread.printThreadIDOn(new PrintStream(bos));
                         if (all || bos.toString().equals(name)) {
+                            out.println(bos.toString() + " = " + thread.getAddress());
                             HTMLGenerator gen = new HTMLGenerator(false);
-                            out.println(gen.genHTMLForJavaStackTrace(thread));
+                            try {
+                                out.println(gen.genHTMLForJavaStackTrace(thread));
+                            } catch (Exception e) {
+                                err.println("Error: " + e);
+                                if (verboseExceptions) {
+                                    e.printStackTrace(err);
+                                }
+                            }
                             if (!all) return;
                         }
                     }
@@ -970,6 +1077,26 @@
                 }
             }
         },
+        new Command("thread", "thread { -a | id }", false) {
+            public void doit(Tokens t) {
+                if (t.countTokens() != 1) {
+                    usage();
+                } else {
+                    String name = t.nextToken();
+                    Threads threads = VM.getVM().getThreads();
+                    boolean all = name.equals("-a");
+                    for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) {
+                        ByteArrayOutputStream bos = new ByteArrayOutputStream();
+                        thread.printThreadIDOn(new PrintStream(bos));
+                        if (all || bos.toString().equals(name)) {
+                            out.println(bos.toString() + " = " + thread.getAddress());
+                            if (!all) return;
+                        }
+                    }
+                    out.println("Couldn't find thread " + name);
+                }
+            }
+        },
 
         new Command("threads", false) {
             public void doit(Tokens t) {
@@ -1161,7 +1288,7 @@
         }
     }
 
-    static Pattern historyPattern = Pattern.compile("((!\\*)|(!\\$)|(!!-?)|(!-?[0-9][0-9]*))");
+    static Pattern historyPattern = Pattern.compile("((!\\*)|(!\\$)|(!!-?)|(!-?[0-9][0-9]*)|(![a-zA-Z][^ ]*))");
 
     public void executeCommand(String ln) {
         if (ln.indexOf('!') != -1) {
@@ -1195,14 +1322,37 @@
                         result.append(item.at(item.countTokens() - 1));
                     } else {
                         String tail = cmd.substring(1);
-                        int index = Integer.parseInt(tail);
-                        if (index < 0) {
-                            index = history.size() + index;
+                        switch (tail.charAt(0)) {
+                        case '0':
+                        case '1':
+                        case '2':
+                        case '3':
+                        case '4':
+                        case '5':
+                        case '6':
+                        case '7':
+                        case '8':
+                        case '9':
+                        case '-': {
+                            int index = Integer.parseInt(tail);
+                            if (index < 0) {
+                                index = history.size() + index;
+                            }
+                            if (index > size) {
+                                err.println("No such history item");
+                            } else {
+                                result.append((String)history.get(index));
+                            }
+                            break;
                         }
-                        if (index > size) {
-                            err.println("No such history item");
-                        } else {
-                            result.append((String)history.get(index));
+                        default: {
+                            for (int i = history.size() - 1; i >= 0; i--) {
+                                String s = (String)history.get(i);
+                                if (s.startsWith(tail)) {
+                                    result.append(s);
+                                }
+                            }
+                        }
                         }
                     }
                 }
--- a/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Mon Apr 05 10:17:15 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -985,6 +985,12 @@
               annoPanel.addAnnotation(new Annotation(curFrame.addressOfInterpreterFrameExpressionStack(),
                                                      curFrame.addressOfInterpreterFrameTOS(),
                                                      "Interpreter expression stack"));
+              Address monBegin = curFrame.interpreterFrameMonitorBegin().address();
+              Address monEnd = curFrame.interpreterFrameMonitorEnd().address();
+              if (!monBegin.equals(monEnd)) {
+                  annoPanel.addAnnotation(new Annotation(monBegin, monEnd,
+                                                         "BasicObjectLocks"));
+              }
               if (interpreterFrameMethod != null) {
                 // The offset is just to get the right stack slots highlighted in the output
                 int offset = 1;
--- a/agent/src/share/classes/sun/jvm/hotspot/bugspot/BugSpot.java	Mon Apr 05 10:17:15 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/bugspot/BugSpot.java	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2003 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -294,7 +294,7 @@
     attachDialog.setSize(400, 300);
     GraphicsUtilities.centerInContainer(attachDialog.getComponent(),
                                         getParentDimension(attachDialog.getComponent()));
-    attachDialog.show();
+    attachDialog.setVisible(true);
   }
 
   public void showThreadsDialog() {
@@ -321,7 +321,7 @@
                                            getParentDimension(threadsDialog.getComponent()));
     GraphicsUtilities.centerInContainer(threadsDialog.getComponent(),
                                         getParentDimension(threadsDialog.getComponent()));
-    threadsDialog.show();
+    threadsDialog.setVisible(true);
   }
 
   public void showMemoryDialog() {
@@ -341,7 +341,7 @@
                                            getParentDimension(memoryDialog.getComponent()));
     GraphicsUtilities.centerInContainer(memoryDialog.getComponent(),
                                         getParentDimension(memoryDialog.getComponent()));
-    memoryDialog.show();
+    memoryDialog.setVisible(true);
   }
 
   /** Changes the editor factory this debugger uses to display source
@@ -530,7 +530,7 @@
       addFrame(stackFrame);
       stackFrame.setSize(400, 200);
       GraphicsUtilities.moveToInContainer(stackFrame.getComponent(), 0.0f, 1.0f, 0, 20);
-      stackFrame.show();
+      stackFrame.setVisible(true);
 
       // Create register panel
       registerPanel = new RegisterPanel();
@@ -544,7 +544,7 @@
       registerFrame.setSize(225, 200);
       GraphicsUtilities.moveToInContainer(registerFrame.getComponent(),
                                           1.0f, 0.0f, 0, 0);
-      registerFrame.show();
+      registerFrame.setVisible(true);
 
       resetCurrentThread();
     } catch (DebuggerException e) {
@@ -979,7 +979,7 @@
                                                1.0f,
                                                0.85f,
                                                getParentDimension(editorFrame.getComponent()));
-        editorFrame.show();
+        editorFrame.setVisible(true);
         shown = true;
       }
       code.showLineNumber(lineNo);
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java	Mon Apr 05 10:17:15 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -96,10 +96,6 @@
       addBytecodeClass(Bytecodes._dstore, BytecodeStore.class);
       addBytecodeClass(Bytecodes._astore, BytecodeStore.class);
       addBytecodeClass(Bytecodes._tableswitch, BytecodeTableswitch.class);
-
-      // only special fast_xxx cases. others are handled differently.
-      addBytecodeClass(Bytecodes._fast_iaccess_0, BytecodeFastAAccess0.class);
-      addBytecodeClass(Bytecodes._fast_aaccess_0, BytecodeFastIAccess0.class);
    }
 
    public BytecodeDisassembler(Method method) {
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Mon Apr 05 10:17:15 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -263,11 +263,12 @@
     case JVM_CONSTANT_NameAndType:        return "JVM_CONSTANT_NameAndType";
     case JVM_CONSTANT_Invalid:            return "JVM_CONSTANT_Invalid";
     case JVM_CONSTANT_UnresolvedClass:    return "JVM_CONSTANT_UnresolvedClass";
+    case JVM_CONSTANT_UnresolvedClassInError:    return "JVM_CONSTANT_UnresolvedClassInError";
     case JVM_CONSTANT_ClassIndex:         return "JVM_CONSTANT_ClassIndex";
     case JVM_CONSTANT_UnresolvedString:   return "JVM_CONSTANT_UnresolvedString";
     case JVM_CONSTANT_StringIndex:        return "JVM_CONSTANT_StringIndex";
     }
-    throw new InternalError("unknown tag");
+    throw new InternalError("Unknown tag: " + tag);
   }
 
   public void iterateFields(OopVisitor visitor, boolean doVMFields) {
@@ -304,6 +305,7 @@
           index++;
           break;
 
+        case JVM_CONSTANT_UnresolvedClassInError:
         case JVM_CONSTANT_UnresolvedClass:
         case JVM_CONSTANT_Class:
         case JVM_CONSTANT_UnresolvedString:
@@ -409,6 +411,7 @@
               }
 
               // case JVM_CONSTANT_ClassIndex:
+              case JVM_CONSTANT_UnresolvedClassInError:
               case JVM_CONSTANT_UnresolvedClass: {
                   dos.writeByte(JVM_CONSTANT_Class);
                   String klassName = getSymbolAt(ci).asString();
@@ -464,6 +467,8 @@
                                           + ", type = " + signatureIndex);
                   break;
               }
+              default:
+                  throw new InternalError("unknown tag: " + cpConstType);
           } // switch
       }
       dos.flush();
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java	Mon Apr 05 10:17:15 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,6 +58,9 @@
     // Temporary tag while constructing constant pool
     public static final int JVM_CONSTANT_StringIndex        = 103;
 
+    // Temporary tag while constructing constant pool
+    public static final int JVM_CONSTANT_UnresolvedClassInError = 104;
+
     // 1.5 major/minor version numbers from JVM spec. 3rd edition
     public static final short MAJOR_VERSION = 49;
     public static final short MINOR_VERSION = 0;
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/SignatureIterator.java	Mon Apr 05 10:17:15 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/SignatureIterator.java	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -108,7 +108,7 @@
         return BasicTypeSize.getTArraySize();
       }
     }
-    throw new RuntimeException("Should not reach here");
+    throw new RuntimeException("Should not reach here: char " + (char)_signature.getByteAt(_index) + " @ " + _index + " in " + _signature.asString());
   }
   protected void checkSignatureEnd() {
     if (_index < _signature.getLength()) {
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Mon Apr 05 10:17:15 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -238,6 +238,7 @@
                 }
 
                 // case JVM_CONSTANT_ClassIndex:
+                case JVM_CONSTANT_UnresolvedClassInError:
                 case JVM_CONSTANT_UnresolvedClass: {
                      dos.writeByte(JVM_CONSTANT_Class);
                      String klassName = cpool.getSymbolAt(ci).asString();
@@ -296,6 +297,8 @@
                                         + ", type = " + signatureIndex);
                      break;
                 }
+                default:
+                  throw new InternalError("Unknown tag: " + cpConstType);
             } // switch
         }
     }
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/FrameWrapper.java	Mon Apr 05 10:17:15 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/ui/FrameWrapper.java	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,6 @@
   public void       setVisible(boolean visible);
   public void       setSize(int x, int y);
   public void       pack();
-  public void       show();
   public void       dispose();
   public void       setBackground(Color color);
   public void       setResizable(boolean resizable);
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Mon Apr 05 10:17:15 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -95,8 +95,10 @@
 
         // list tags
         void beginList()  { beginTag("ul"); nl(); }
+        void endList()    { endTag("ul"); nl();   }
+        void beginListItem() { beginTag("li"); }
+        void endListItem()   { endTag("li"); nl();   }
         void li(String s) { wrap("li", s); nl();  }
-        void endList()    { endTag("ul"); nl();   }
 
         // table tags
         void beginTable(int border) {
@@ -505,6 +507,11 @@
                buf.cell(cpool.getSymbolAt(index).asString());
                break;
 
+            case JVM_CONSTANT_UnresolvedClassInError:
+               buf.cell("JVM_CONSTANT_UnresolvedClassInError");
+               buf.cell(cpool.getSymbolAt(index).asString());
+               break;
+
             case JVM_CONSTANT_Class:
                buf.cell("JVM_CONSTANT_Class");
                Klass klass = (Klass) cpool.getObjAt(index);
@@ -564,6 +571,9 @@
                buf.cell("JVM_CONSTANT_StringIndex");
                buf.cell(Integer.toString(cpool.getIntAt(index)));
                break;
+
+            default:
+               throw new InternalError("unknown tag: " + ctag);
          }
 
          buf.endTag("tr");
@@ -671,7 +681,16 @@
                              buf.cell(Integer.toString(curBci) + spaces);
 
                              buf.beginTag("td");
-                             String instrStr = escapeHTMLSpecialChars(instr.toString());
+                             String instrStr = null;
+                             try {
+                                 instrStr = escapeHTMLSpecialChars(instr.toString());
+                             } catch (RuntimeException re) {
+                                 buf.append("exception during bytecode processing");
+                                 buf.endTag("td");
+                                 buf.endTag("tr");
+                                 re.printStackTrace();
+                                 return;
+                             }
 
                              if (instr instanceof BytecodeNew) {
                                 BytecodeNew newBytecode = (BytecodeNew) instr;
@@ -1396,9 +1415,7 @@
          final SymbolFinder symFinder = createSymbolFinder();
          final Disassembler disasm = createDisassembler(startPc, code);
          class NMethodVisitor implements InstructionVisitor {
-            boolean prevWasCall;
             public void prologue() {
-               prevWasCall = false;
             }
 
             public void visit(long currentPc, Instruction instr) {
@@ -1418,8 +1435,7 @@
 
                PCDesc pcDesc = (PCDesc) safepoints.get(longToAddress(currentPc));
 
-               boolean isSafepoint = (pcDesc != null);
-               if (isSafepoint && prevWasCall) {
+               if (pcDesc != null) {
                   buf.append(genSafepointInfo(nmethod, pcDesc));
                }
 
@@ -1435,11 +1451,6 @@
                }
 
                buf.br();
-               if (isSafepoint && !prevWasCall) {
-                 buf.append(genSafepointInfo(nmethod, pcDesc));
-               }
-
-               prevWasCall = instr.isCall();
             }
 
             public void epilogue() {
@@ -1783,22 +1794,20 @@
          buf.h3("Fields");
          buf.beginList();
          for (int f = 0; f < numFields; f += InstanceKlass.NEXT_OFFSET) {
-           int nameIndex = fields.getShortAt(f + InstanceKlass.NAME_INDEX_OFFSET);
-           int sigIndex  = fields.getShortAt(f + InstanceKlass.SIGNATURE_INDEX_OFFSET);
-           int genSigIndex = fields.getShortAt(f + InstanceKlass.GENERIC_SIGNATURE_INDEX_OFFSET);
-           Symbol f_name = cp.getSymbolAt(nameIndex);
-           Symbol f_sig  = cp.getSymbolAt(sigIndex);
-           Symbol f_genSig = (genSigIndex != 0)? cp.getSymbolAt(genSigIndex) : null;
-           AccessFlags acc = new AccessFlags(fields.getShortAt(f + InstanceKlass.ACCESS_FLAGS_OFFSET));
+           sun.jvm.hotspot.oops.Field field = klass.getFieldByIndex(f);
+           String f_name = ((NamedFieldIdentifier)field.getID()).getName();
+           Symbol f_sig  = field.getSignature();
+           Symbol f_genSig = field.getGenericSignature();
+           AccessFlags acc = field.getAccessFlagsObj();
 
-           buf.beginTag("li");
+           buf.beginListItem();
            buf.append(genFieldModifierString(acc));
            buf.append(' ');
            Formatter sigBuf = new Formatter(genHTML);
            new SignatureConverter(f_sig, sigBuf.getBuffer()).dispatchField();
            buf.append(sigBuf.toString().replace('/', '.'));
            buf.append(' ');
-           buf.append(f_name.asString());
+           buf.append(f_name);
            buf.append(';');
            // is it generic?
            if (f_genSig != null) {
@@ -1806,7 +1815,8 @@
               buf.append(escapeHTMLSpecialChars(f_genSig.asString()));
               buf.append("] ");
            }
-           buf.endTag("li");
+           buf.append(" (offset = " + field.getOffset() + ")");
+           buf.endListItem();
          }
          buf.endList();
       }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/Assert.java	Mon Apr 05 10:17:15 2010 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/Assert.java	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
   public static boolean ASSERTS_ENABLED = true;
 
   public static void that(boolean test, String message) {
-    if (!test) {
+    if (ASSERTS_ENABLED && !test) {
       throw new AssertionFailure(message);
     }
   }
--- a/make/hotspot_distro	Mon Apr 05 10:17:15 2010 -0700
+++ b/make/hotspot_distro	Wed May 19 10:22:39 2010 -0700
@@ -28,5 +28,5 @@
 
 # Don't put quotes (fail windows build).
 HOTSPOT_VM_DISTRO=Java HotSpot(TM)
-COMPANY_NAME=Sun Microsystems, Inc.
+COMPANY_NAME=Oracle Corporation
 PRODUCT_NAME=Java(TM) Platform SE
--- a/make/hotspot_version	Mon Apr 05 10:17:15 2010 -0700
+++ b/make/hotspot_version	Wed May 19 10:22:39 2010 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=18
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=02
+HS_BUILD_NUMBER=04
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/make/jprt.properties	Mon Apr 05 10:17:15 2010 -0700
+++ b/make/jprt.properties	Wed May 19 10:22:39 2010 -0700
@@ -51,6 +51,8 @@
 jprt.my.solaris.sparc.jdk6perf=solaris_sparc_5.8
 jprt.my.solaris.sparc.jdk6u10=solaris_sparc_5.8
 jprt.my.solaris.sparc.jdk6u14=solaris_sparc_5.8
+jprt.my.solaris.sparc.jdk6u18=solaris_sparc_5.8
+jprt.my.solaris.sparc.jdk6u20=solaris_sparc_5.8
 jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
 
 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
@@ -58,6 +60,8 @@
 jprt.my.solaris.sparcv9.jdk6perf=solaris_sparcv9_5.8
 jprt.my.solaris.sparcv9.jdk6u10=solaris_sparcv9_5.8
 jprt.my.solaris.sparcv9.jdk6u14=solaris_sparcv9_5.8
+jprt.my.solaris.sparcv9.jdk6u18=solaris_sparcv9_5.8
+jprt.my.solaris.sparcv9.jdk6u20=solaris_sparcv9_5.8
 jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
 
 jprt.my.solaris.i586.jdk7=solaris_i586_5.10
@@ -65,6 +69,8 @@
 jprt.my.solaris.i586.jdk6perf=solaris_i586_5.8
 jprt.my.solaris.i586.jdk6u10=solaris_i586_5.8
 jprt.my.solaris.i586.jdk6u14=solaris_i586_5.8
+jprt.my.solaris.i586.jdk6u18=solaris_i586_5.8
+jprt.my.solaris.i586.jdk6u20=solaris_i586_5.8
 jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
 
 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
@@ -72,6 +78,8 @@
 jprt.my.solaris.x64.jdk6perf=solaris_x64_5.10
 jprt.my.solaris.x64.jdk6u10=solaris_x64_5.10
 jprt.my.solaris.x64.jdk6u14=solaris_x64_5.10
+jprt.my.solaris.x64.jdk6u18=solaris_x64_5.10
+jprt.my.solaris.x64.jdk6u20=solaris_x64_5.10
 jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
 
 jprt.my.linux.i586.jdk7=linux_i586_2.6
@@ -79,6 +87,8 @@
 jprt.my.linux.i586.jdk6perf=linux_i586_2.4
 jprt.my.linux.i586.jdk6u10=linux_i586_2.4
 jprt.my.linux.i586.jdk6u14=linux_i586_2.4
+jprt.my.linux.i586.jdk6u18=linux_i586_2.4
+jprt.my.linux.i586.jdk6u20=linux_i586_2.4
 jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
 
 jprt.my.linux.x64.jdk7=linux_x64_2.6
@@ -86,6 +96,8 @@
 jprt.my.linux.x64.jdk6perf=linux_x64_2.4
 jprt.my.linux.x64.jdk6u10=linux_x64_2.4
 jprt.my.linux.x64.jdk6u14=linux_x64_2.4
+jprt.my.linux.x64.jdk6u18=linux_x64_2.4
+jprt.my.linux.x64.jdk6u20=linux_x64_2.4
 jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
 
 jprt.my.windows.i586.jdk7=windows_i586_5.0
@@ -93,6 +105,8 @@
 jprt.my.windows.i586.jdk6perf=windows_i586_5.0
 jprt.my.windows.i586.jdk6u10=windows_i586_5.0
 jprt.my.windows.i586.jdk6u14=windows_i586_5.0
+jprt.my.windows.i586.jdk6u18=windows_i586_5.0
+jprt.my.windows.i586.jdk6u20=windows_i586_5.0
 jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
 
 jprt.my.windows.x64.jdk7=windows_x64_5.2
@@ -100,6 +114,8 @@
 jprt.my.windows.x64.jdk6perf=windows_x64_5.2
 jprt.my.windows.x64.jdk6u10=windows_x64_5.2
 jprt.my.windows.x64.jdk6u14=windows_x64_5.2
+jprt.my.windows.x64.jdk6u18=windows_x64_5.2
+jprt.my.windows.x64.jdk6u20=windows_x64_5.2
 jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
 
 # Standard list of jprt build targets for this source tree
--- a/make/linux/makefiles/adlc.make	Mon Apr 05 10:17:15 2010 -0700
+++ b/make/linux/makefiles/adlc.make	Wed May 19 10:22:39 2010 -0700
@@ -127,6 +127,9 @@
 # Note that product files are updated via "mv", which is atomic.
 TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
 
+# Debuggable by default
+CFLAGS += -g
+
 # Pass -D flags into ADLC.
 ADLCFLAGS += $(SYSDEFS)
 
@@ -135,7 +138,7 @@
 
 # Normally, debugging is done directly on the ad_<arch>*.cpp files.
 # But -g will put #line directives in those files pointing back to <arch>.ad.
-#ADLCFLAGS += -g
+ADLCFLAGS += -g
 
 ifdef LP64
 ADLCFLAGS += -D_LP64
--- a/make/solaris/makefiles/adlc.make	Mon Apr 05 10:17:15 2010 -0700
+++ b/make/solaris/makefiles/adlc.make	Wed May 19 10:22:39 2010 -0700
@@ -147,6 +147,9 @@
 # Note that product files are updated via "mv", which is atomic.
 TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
 
+# Debuggable by default
+CFLAGS += -g
+
 # Pass -D flags into ADLC.
 ADLCFLAGS += $(SYSDEFS)
 
@@ -155,7 +158,7 @@
 
 # Normally, debugging is done directly on the ad_<arch>*.cpp files.
 # But -g will put #line directives in those files pointing back to <arch>.ad.
-#ADLCFLAGS += -g
+ADLCFLAGS += -g
 
 ifdef LP64
 ADLCFLAGS += -D_LP64
--- a/src/cpu/sparc/vm/assembler_sparc.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2333,6 +2333,18 @@
 #endif
 
 
+void MacroAssembler::load_sized_value(Address src, Register dst,
+                                      size_t size_in_bytes, bool is_signed) {
+  switch (size_in_bytes) {
+  case  8: ldx(src, dst); break;
+  case  4: ld( src, dst); break;
+  case  2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
+  case  1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
+  default: ShouldNotReachHere();
+  }
+}
+
+
 void MacroAssembler::float_cmp( bool is_float, int unordered_result,
                                 FloatRegister Fa, FloatRegister Fb,
                                 Register Rresult) {
@@ -2625,40 +2637,103 @@
 }
 
 
-void MacroAssembler::regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
-  assert(dest.register_or_noreg() != G0, "lost side effect");
-  if ((src.is_constant() && src.as_constant() == 0) ||
-      (src.is_register() && src.as_register() == G0)) {
-    // do nothing
-  } else if (dest.is_register()) {
-    add(dest.as_register(), ensure_simm13_or_reg(src, temp), dest.as_register());
-  } else if (src.is_constant()) {
-    intptr_t res = dest.as_constant() + src.as_constant();
-    dest = RegisterOrConstant(res); // side effect seen by caller
+RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
+  assert(d.register_or_noreg() != G0, "lost side effect");
+  if ((s2.is_constant() && s2.as_constant() == 0) ||
+      (s2.is_register() && s2.as_register() == G0)) {
+    // Do nothing, just move value.
+    if (s1.is_register()) {
+      if (d.is_constant())  d = temp;
+      mov(s1.as_register(), d.as_register());
+      return d;
+    } else {
+      return s1;
+    }
+  }
+
+  if (s1.is_register()) {
+    assert_different_registers(s1.as_register(), temp);
+    if (d.is_constant())  d = temp;
+    andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
+    return d;
   } else {
-    assert(temp != noreg, "cannot handle constant += register");
-    add(src.as_register(), ensure_simm13_or_reg(dest, temp), temp);
-    dest = RegisterOrConstant(temp); // side effect seen by caller
+    if (s2.is_register()) {
+      assert_different_registers(s2.as_register(), temp);
+      if (d.is_constant())  d = temp;
+      set(s1.as_constant(), temp);
+      andn(temp, s2.as_register(), d.as_register());
+      return d;
+    } else {
+      intptr_t res = s1.as_constant() & ~s2.as_constant();
+      return res;
+    }
   }
 }
 
-void MacroAssembler::regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src, Register temp ) {
-  assert(dest.register_or_noreg() != G0, "lost side effect");
-  if (!is_simm13(src.constant_or_zero()))
-    src = (src.as_constant() & 0xFF);
-  if ((src.is_constant() && src.as_constant() == 0) ||
-      (src.is_register() && src.as_register() == G0)) {
-    // do nothing
-  } else if (dest.is_register()) {
-    sll_ptr(dest.as_register(), src, dest.as_register());
-  } else if (src.is_constant()) {
-    intptr_t res = dest.as_constant() << src.as_constant();
-    dest = RegisterOrConstant(res); // side effect seen by caller
+RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
+  assert(d.register_or_noreg() != G0, "lost side effect");
+  if ((s2.is_constant() && s2.as_constant() == 0) ||
+      (s2.is_register() && s2.as_register() == G0)) {
+    // Do nothing, just move value.
+    if (s1.is_register()) {
+      if (d.is_constant())  d = temp;
+      mov(s1.as_register(), d.as_register());
+      return d;
+    } else {
+      return s1;
+    }
+  }
+
+  if (s1.is_register()) {
+    assert_different_registers(s1.as_register(), temp);
+    if (d.is_constant())  d = temp;
+    add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
+    return d;
   } else {
-    assert(temp != noreg, "cannot handle constant <<= register");
-    set(dest.as_constant(), temp);
-    sll_ptr(temp, src, temp);
-    dest = RegisterOrConstant(temp); // side effect seen by caller
+    if (s2.is_register()) {
+      assert_different_registers(s2.as_register(), temp);
+      if (d.is_constant())  d = temp;
+      add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
+      return d;
+    } else {
+      intptr_t res = s1.as_constant() + s2.as_constant();
+      return res;
+    }
+  }
+}
+
+RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
+  assert(d.register_or_noreg() != G0, "lost side effect");
+  if (!is_simm13(s2.constant_or_zero()))
+    s2 = (s2.as_constant() & 0xFF);
+  if ((s2.is_constant() && s2.as_constant() == 0) ||
+      (s2.is_register() && s2.as_register() == G0)) {
+    // Do nothing, just move value.
+    if (s1.is_register()) {
+      if (d.is_constant())  d = temp;
+      mov(s1.as_register(), d.as_register());
+      return d;
+    } else {
+      return s1;
+    }
+  }
+
+  if (s1.is_register()) {
+    assert_different_registers(s1.as_register(), temp);
+    if (d.is_constant())  d = temp;
+    sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
+    return d;
+  } else {
+    if (s2.is_register()) {
+      assert_different_registers(s2.as_register(), temp);
+      if (d.is_constant())  d = temp;
+      set(s1.as_constant(), temp);
+      sll_ptr(temp, s2.as_register(), d.as_register());
+      return d;
+    } else {
+      intptr_t res = s1.as_constant() << s2.as_constant();
+      return res;
+    }
   }
 }
 
@@ -2708,8 +2783,8 @@
 
   // Adjust recv_klass by scaled itable_index, so we can free itable_index.
   RegisterOrConstant itable_offset = itable_index;
-  regcon_sll_ptr(itable_offset, exact_log2(itableMethodEntry::size() * wordSize));
-  regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes());
+  itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
+  itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
   add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
 
   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
@@ -2805,7 +2880,7 @@
 
   assert_different_registers(sub_klass, super_klass, temp_reg);
   if (super_check_offset.is_register()) {
-    assert_different_registers(sub_klass, super_klass,
+    assert_different_registers(sub_klass, super_klass, temp_reg,
                                super_check_offset.as_register());
   } else if (must_load_sco) {
     assert(temp2_reg != noreg, "supply either a temp or a register offset");
@@ -2855,6 +2930,8 @@
     // The super check offset is always positive...
     lduw(super_klass, sco_offset, temp2_reg);
     super_check_offset = RegisterOrConstant(temp2_reg);
+    // super_check_offset is register.
+    assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
   }
   ld_ptr(sub_klass, super_check_offset, temp_reg);
   cmp(super_klass, temp_reg);
@@ -3014,11 +3091,10 @@
 }
 
 
-
-
 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
                                               Register temp_reg,
                                               Label& wrong_method_type) {
+  if (UseCompressedOops)  unimplemented("coop");  // field accesses must decode
   assert_different_registers(mtype_reg, mh_reg, temp_reg);
   // compare method type against that of the receiver
   RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg);
@@ -3029,10 +3105,33 @@
 }
 
 
-void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg) {
+// A method handle has a "vmslots" field which gives the size of its
+// argument list in JVM stack slots.  This field is either located directly
+// in every method handle, or else is indirectly accessed through the
+// method handle's MethodType.  This macro hides the distinction.
+void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
+                                                Register temp_reg) {
+  assert_different_registers(vmslots_reg, mh_reg, temp_reg);
+  if (UseCompressedOops)  unimplemented("coop");  // field accesses must decode
+  // load mh.type.form.vmslots
+  if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
+    // hoist vmslots into every mh to avoid dependent load chain
+    ld(    Address(mh_reg,    delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)),   vmslots_reg);
+  } else {
+    Register temp2_reg = vmslots_reg;
+    ld_ptr(Address(mh_reg,    delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)),      temp2_reg);
+    ld_ptr(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)),        temp2_reg);
+    ld(    Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
+  }
+}
+
+
+void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop) {
   assert(mh_reg == G3_method_handle, "caller must put MH object in G3");
   assert_different_registers(mh_reg, temp_reg);
 
+  if (UseCompressedOops)  unimplemented("coop");  // field accesses must decode
+
   // pick out the interpreted side of the handler
   ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
 
@@ -3043,17 +3142,18 @@
   // for the various stubs which take control at this point,
   // see MethodHandles::generate_method_handle_stub
 
-  // (Can any caller use this delay slot?  If so, add an option for supression.)
-  delayed()->nop();
+  // Some callers can fill the delay slot.
+  if (emit_delayed_nop) {
+    delayed()->nop();
+  }
 }
 
+
 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
                                                    int extra_slot_offset) {
   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
-  int stackElementSize = Interpreter::stackElementWords() * wordSize;
-  int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
-  int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
-  assert(offset1 - offset == stackElementSize, "correct arithmetic");
+  int stackElementSize = Interpreter::stackElementSize;
+  int offset = extra_slot_offset * stackElementSize;
   if (arg_slot.is_constant()) {
     offset += arg_slot.as_constant() * stackElementSize;
     return offset;
@@ -3067,6 +3167,11 @@
 }
 
 
+Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
+                                         int extra_slot_offset) {
+  return Address(Gargs, argument_offset(arg_slot, extra_slot_offset));
+}
+
 
 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
                                           Register temp_reg,
@@ -4082,7 +4187,7 @@
 // make it work.
 static void check_index(int ind) {
   assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0),
-         "Invariants.")
+         "Invariants.");
 }
 
 static void generate_satb_log_enqueue(bool with_frame) {
--- a/src/cpu/sparc/vm/assembler_sparc.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -661,9 +661,6 @@
     stx_op3      = 0x0e,
     swap_op3     = 0x0f,
 
-    lduwa_op3    = 0x10,
-    ldxa_op3     = 0x1b,
-
     stwa_op3     = 0x14,
     stxa_op3     = 0x1e,
 
@@ -1065,7 +1062,7 @@
   }
   void assert_not_delayed(const char* msg) {
 #ifdef CHECK_DELAY
-    assert_msg ( delay_state == no_delay, msg);
+    assert(delay_state == no_delay, msg);
 #endif
   }
 
@@ -1383,24 +1380,25 @@
 
   // pp 181
 
-  void and3(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3               ) | rs1(s1) | rs2(s2) ); }
-  void and3(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3               ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+  void and3(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3              ) | rs1(s1) | rs2(s2) ); }
+  void and3(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3              ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void andcc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3  | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   void andcc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3  | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void andn(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3             ) | rs1(s1) | rs2(s2) ); }
   void andn(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3             ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+  void andn(    Register s1, RegisterOrConstant s2, Register d);
   void andncc(  Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   void andncc(  Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-  void or3(      Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | rs2(s2) ); }
-  void or3(      Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+  void or3(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | rs2(s2) ); }
+  void or3(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void orcc(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3   | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   void orcc(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3   | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void orn(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2) ); }
   void orn(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void orncc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3  | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   void orncc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3  | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-  void xor3(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | rs2(s2) ); }
-  void xor3(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
+  void xor3(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | rs2(s2) ); }
+  void xor3(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void xorcc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3  | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   void xorcc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3  | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void xnor(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3             ) | rs1(s1) | rs2(s2) ); }
@@ -2029,8 +2027,8 @@
   inline void st_ptr(Register d, Register s1, ByteSize simm13a);
 #endif
 
-  // ld_long will perform ld for 32 bit VM's and ldx for 64 bit VM's
-  // st_long will perform st for 32 bit VM's and stx for 64 bit VM's
+  // ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
+  // st_long will perform std for 32 bit VM's and stx for 64 bit VM's
   inline void ld_long(Register s1, Register s2, Register d);
   inline void ld_long(Register s1, int simm13a, Register d);
   inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
@@ -2041,23 +2039,19 @@
   inline void st_long(Register d, const Address& a, int offset = 0);
 
   // Helpers for address formation.
-  // They update the dest in place, whether it is a register or constant.
-  // They emit no code at all if src is a constant zero.
-  // If dest is a constant and src is a register, the temp argument
-  // is required, and becomes the result.
-  // If dest is a register and src is a non-simm13 constant,
-  // the temp argument is required, and is used to materialize the constant.
-  void regcon_inc_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
-                       Register temp = noreg );
-  void regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
-                       Register temp = noreg );
-
-  RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant roc, Register Rtemp) {
-    guarantee(Rtemp != noreg, "constant offset overflow");
-    if (is_simm13(roc.constant_or_zero()))
-      return roc;               // register or short constant
-    set(roc.as_constant(), Rtemp);
-    return RegisterOrConstant(Rtemp);
+  // - They emit only a move if s2 is a constant zero.
+  // - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result.
+  // - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant.
+  RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
+  RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
+  RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
+
+  RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) {
+    if (is_simm13(src.constant_or_zero()))
+      return src;               // register or short constant
+    guarantee(temp != noreg, "constant offset overflow");
+    set(src.as_constant(), temp);
+    return temp;
   }
 
   // --------------------------------------------------
@@ -2306,6 +2300,9 @@
   void lcmp( Register Ra, Register Rb, Register Rresult);
 #endif
 
+  // Loading values by size and signed-ness
+  void load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed);
+
   void float_cmp( bool is_float, int unordered_result,
                   FloatRegister Fa, FloatRegister Fb,
                   Register Rresult);
@@ -2424,12 +2421,16 @@
   void check_method_handle_type(Register mtype_reg, Register mh_reg,
                                 Register temp_reg,
                                 Label& wrong_method_type);
-  void jump_to_method_handle_entry(Register mh_reg, Register temp_reg);
+  void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
+                                  Register temp_reg);
+  void jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop = true);
   // offset relative to Gargs of argument at tos[arg_slot].
   // (arg_slot == 0 means the last argument, not the first).
   RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
                                      int extra_slot_offset = 0);
-
+  // Address of Gargs and argument_offset.
+  Address            argument_address(RegisterOrConstant arg_slot,
+                                      int extra_slot_offset = 0);
 
   // Stack overflow checking
 
--- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Wed May 19 10:22:39 2010 -0700
@@ -206,12 +206,17 @@
 inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
 
 // form effective addresses this way:
-inline void Assembler::add(   Register s1, RegisterOrConstant s2, Register d, int offset) {
-  if (s2.is_register())  add(s1, s2.as_register(), d);
+inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
+  if (s2.is_register())  add(s1, s2.as_register(),          d);
   else                 { add(s1, s2.as_constant() + offset, d); offset = 0; }
   if (offset != 0)       add(d,  offset,                    d);
 }
 
+inline void Assembler::andn(Register s1, RegisterOrConstant s2, Register d) {
+  if (s2.is_register())  andn(s1, s2.as_register(), d);
+  else                   andn(s1, s2.as_constant(), d);
+}
+
 inline void Assembler::ldstub(  Register s1, Register s2, Register d) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::ldstub(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed May 19 10:22:39 2010 -0700
@@ -388,6 +388,60 @@
 }
 
 
+// Emit the code to remove the frame from the stack in the exception
+// unwind path.
+int LIR_Assembler::emit_unwind_handler() {
+#ifndef PRODUCT
+  if (CommentedAssembly) {
+    _masm->block_comment("Unwind handler");
+  }
+#endif
+
+  int offset = code_offset();
+
+  // Fetch the exception from TLS and clear out exception related thread state
+  __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
+  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
+  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
+
+  __ bind(_unwind_handler_entry);
+  __ verify_not_null_oop(O0);
+  if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
+    __ mov(O0, I0);  // Preserve the exception
+  }
+
+  // Preform needed unlocking
+  MonitorExitStub* stub = NULL;
+  if (method()->is_synchronized()) {
+    monitor_address(0, FrameMap::I1_opr);
+    stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
+    __ unlock_object(I3, I2, I1, *stub->entry());
+    __ bind(*stub->continuation());
+  }
+
+  if (compilation()->env()->dtrace_method_probes()) {
+    jobject2reg(method()->constant_encoding(), O0);
+    __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
+    __ delayed()->nop();
+  }
+
+  if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
+    __ mov(I0, O0);  // Restore the exception
+  }
+
+  // dispatch to the unwind logic
+  __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
+  __ delayed()->nop();
+
+  // Emit the slow path assembly
+  if (stub != NULL) {
+    stub->emit_code(this);
+  }
+
+  return offset;
+}
+
+
 int LIR_Assembler::emit_deopt_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
@@ -1728,9 +1782,13 @@
       ShouldNotReachHere();
     }
   } else if (code == lir_cmp_l2i) {
+#ifdef _LP64
+    __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
+#else
     __ lcmp(left->as_register_hi(),  left->as_register_lo(),
             right->as_register_hi(), right->as_register_lo(),
             dst->as_register());
+#endif
   } else {
     ShouldNotReachHere();
   }
@@ -2046,26 +2104,29 @@
 }
 
 
-void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) {
+void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
   assert(exceptionOop->as_register() == Oexception, "should match");
-  assert(unwind || exceptionPC->as_register() == Oissuing_pc, "should match");
+  assert(exceptionPC->as_register() == Oissuing_pc, "should match");
 
   info->add_register_oop(exceptionOop);
 
-  if (unwind) {
-    __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
-    __ delayed()->nop();
-  } else {
-    // reuse the debug info from the safepoint poll for the throw op itself
-    address pc_for_athrow  = __ pc();
-    int pc_for_athrow_offset = __ offset();
-    RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
-    __ set(pc_for_athrow, Oissuing_pc, rspec);
-    add_call_info(pc_for_athrow_offset, info); // for exception handler
-
-    __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
-    __ delayed()->nop();
-  }
+  // reuse the debug info from the safepoint poll for the throw op itself
+  address pc_for_athrow  = __ pc();
+  int pc_for_athrow_offset = __ offset();
+  RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
+  __ set(pc_for_athrow, Oissuing_pc, rspec);
+  add_call_info(pc_for_athrow_offset, info); // for exception handler
+
+  __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
+  __ delayed()->nop();
+}
+
+
+void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
+  assert(exceptionOop->as_register() == Oexception, "should match");
+
+  __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
+  __ delayed()->nop();
 }
 
 
@@ -2354,7 +2415,7 @@
   if (UseSlowPath ||
       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
-    __ br(Assembler::always, false, Assembler::pn, *op->stub()->entry());
+    __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
     __ delayed()->nop();
   } else {
     __ allocate_array(op->obj()->as_register(),
@@ -2849,7 +2910,7 @@
 
 
 void LIR_Assembler::align_backward_branch_target() {
-  __ align(16);
+  __ align(OptoLoopAlignment);
 }
 
 
--- a/src/cpu/sparc/vm/c2_globals_sparc.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed May 19 10:22:39 2010 -0700
@@ -60,9 +60,6 @@
 define_pd_global(intx, INTPRESSURE,                  48);  // large register set
 define_pd_global(intx, InteriorEntryAlignment,       16);  // = CodeEntryAlignment
 define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
-// The default setting 16/16 seems to work best.
-// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
-define_pd_global(intx, OptoLoopAlignment,            16);  // = 4*wordSize
 define_pd_global(intx, RegisterCostAreaRatio,        12000);
 define_pd_global(bool, UseTLAB,                      true);
 define_pd_global(bool, ResizeTLAB,                   true);
--- a/src/cpu/sparc/vm/cppInterpreter_sparc.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/cppInterpreter_sparc.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
   // fail with a guarantee ("not enough space for interpreter generation");
   // if too small.
   // Run with +PrintInterpreter to get the VM to print out the size.
-  // Max size with JVMTI and TaggedStackInterpreter
+  // Max size with JVMTI
 
   // QQQ this is proably way too large for c++ interpreter
 
--- a/src/cpu/sparc/vm/frame_sparc.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/frame_sparc.cpp	Wed May 19 10:22:39 2010 -0700
@@ -620,7 +620,7 @@
 
   // stack frames shouldn't be much larger than max_stack elements
 
-  if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) {
+  if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
     return false;
   }
 
--- a/src/cpu/sparc/vm/globals_sparc.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/globals_sparc.hpp	Wed May 19 10:22:39 2010 -0700
@@ -40,6 +40,9 @@
 define_pd_global(bool, UncommonNullCast,            true);  // Uncommon-trap NULLs past to check cast
 
 define_pd_global(intx, CodeEntryAlignment,    32);
+// The default setting 16/16 seems to work best.
+// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
+define_pd_global(intx, OptoLoopAlignment,     16);  // = 4*wordSize
 define_pd_global(intx, InlineFrequencyCount,  50);  // we can use more inlining on the SPARC
 define_pd_global(intx, InlineSmallCode,       1500);
 #ifdef _LP64
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Wed May 19 10:22:39 2010 -0700
@@ -50,7 +50,6 @@
   // Any changes should also be applied to CodeEmitter::emit_osr_entry().
   assert_different_registers(args_size, locals_size);
   // max_locals*2 for TAGS.  Assumes that args_size has already been adjusted.
-  if (TaggedStackInterpreter) sll(locals_size, 1, locals_size);
   subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words
   // Use br/mov combination because it works on both V8 and V9 and is
   // faster.
@@ -319,7 +318,7 @@
   ldf(FloatRegisterImpl::D, r1, offset, d);
 #else
   ldf(FloatRegisterImpl::S, r1, offset, d);
-  ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize(), d->successor());
+  ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
 #endif
 }
 
@@ -330,10 +329,10 @@
 #ifdef _LP64
   stf(FloatRegisterImpl::D, d, r1, offset);
   // store something more useful here
-  debug_only(stx(G0, r1, offset+Interpreter::stackElementSize());)
+  debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
 #else
   stf(FloatRegisterImpl::S, d, r1, offset);
-  stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize());
+  stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
 #endif
 }
 
@@ -345,7 +344,7 @@
   ldx(r1, offset, rd);
 #else
   ld(r1, offset, rd);
-  ld(r1, offset + Interpreter::stackElementSize(), rd->successor());
+  ld(r1, offset + Interpreter::stackElementSize, rd->successor());
 #endif
 }
 
@@ -356,138 +355,62 @@
 #ifdef _LP64
   stx(l, r1, offset);
   // store something more useful here
-  debug_only(stx(G0, r1, offset+Interpreter::stackElementSize());)
+  debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
 #else
   st(l, r1, offset);
-  st(l->successor(), r1, offset + Interpreter::stackElementSize());
+  st(l->successor(), r1, offset + Interpreter::stackElementSize);
 #endif
 }
 
-#ifdef ASSERT
-void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t,
-                                                 Register r,
-                                                 Register scratch) {
-  if (TaggedStackInterpreter) {
-    Label ok, long_ok;
-    ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(0), r);
-    if (t == frame::TagCategory2) {
-      cmp(r, G0);
-      brx(Assembler::equal, false, Assembler::pt, long_ok);
-      delayed()->ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(1), r);
-      stop("stack long/double tag value bad");
-      bind(long_ok);
-      cmp(r, G0);
-    } else if (t == frame::TagValue) {
-      cmp(r, G0);
-    } else {
-      assert_different_registers(r, scratch);
-      mov(t, scratch);
-      cmp(r, scratch);
-    }
-    brx(Assembler::equal, false, Assembler::pt, ok);
-    delayed()->nop();
-    // Also compare if the stack value is zero, then the tag might
-    // not have been set coming from deopt.
-    ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
-    cmp(r, G0);
-    brx(Assembler::equal, false, Assembler::pt, ok);
-    delayed()->nop();
-    stop("Stack tag value is bad");
-    bind(ok);
-  }
-}
-#endif // ASSERT
-
 void InterpreterMacroAssembler::pop_i(Register r) {
   assert_not_delayed();
-  // Uses destination register r for scratch
-  debug_only(verify_stack_tag(frame::TagValue, r));
   ld(Lesp, Interpreter::expr_offset_in_bytes(0), r);
-  inc(Lesp, Interpreter::stackElementSize());
+  inc(Lesp, Interpreter::stackElementSize);
   debug_only(verify_esp(Lesp));
 }
 
 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) {
   assert_not_delayed();
-  // Uses destination register r for scratch
-  debug_only(verify_stack_tag(frame::TagReference, r, scratch));
   ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
-  inc(Lesp, Interpreter::stackElementSize());
+  inc(Lesp, Interpreter::stackElementSize);
   debug_only(verify_esp(Lesp));
 }
 
 void InterpreterMacroAssembler::pop_l(Register r) {
   assert_not_delayed();
-  // Uses destination register r for scratch
-  debug_only(verify_stack_tag(frame::TagCategory2, r));
   load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r);
-  inc(Lesp, 2*Interpreter::stackElementSize());
+  inc(Lesp, 2*Interpreter::stackElementSize);
   debug_only(verify_esp(Lesp));
 }
 
 
 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) {
   assert_not_delayed();
-  debug_only(verify_stack_tag(frame::TagValue, scratch));
   ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f);
-  inc(Lesp, Interpreter::stackElementSize());
+  inc(Lesp, Interpreter::stackElementSize);
   debug_only(verify_esp(Lesp));
 }
 
 
 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) {
   assert_not_delayed();
-  debug_only(verify_stack_tag(frame::TagCategory2, scratch));
   load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f);
-  inc(Lesp, 2*Interpreter::stackElementSize());
+  inc(Lesp, 2*Interpreter::stackElementSize);
   debug_only(verify_esp(Lesp));
 }
 
 
-// (Note use register first, then decrement so dec can be done during store stall)
-void InterpreterMacroAssembler::tag_stack(Register r) {
-  if (TaggedStackInterpreter) {
-    st_ptr(r, Lesp, Interpreter::tag_offset_in_bytes());
-  }
-}
-
-void InterpreterMacroAssembler::tag_stack(frame::Tag t, Register r) {
-  if (TaggedStackInterpreter) {
-    assert (frame::TagValue == 0, "TagValue must be zero");
-    if (t == frame::TagValue) {
-      st_ptr(G0, Lesp, Interpreter::tag_offset_in_bytes());
-    } else if (t == frame::TagCategory2) {
-      st_ptr(G0, Lesp, Interpreter::tag_offset_in_bytes());
-      // Tag next slot down too
-      st_ptr(G0, Lesp, -Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes());
-    } else {
-      assert_different_registers(r, O3);
-      mov(t, O3);
-      st_ptr(O3, Lesp, Interpreter::tag_offset_in_bytes());
-    }
-  }
-}
-
 void InterpreterMacroAssembler::push_i(Register r) {
   assert_not_delayed();
   debug_only(verify_esp(Lesp));
-  tag_stack(frame::TagValue, r);
-  st(  r,    Lesp, Interpreter::value_offset_in_bytes());
-  dec( Lesp, Interpreter::stackElementSize());
+  st(r, Lesp, 0);
+  dec(Lesp, Interpreter::stackElementSize);
 }
 
 void InterpreterMacroAssembler::push_ptr(Register r) {
   assert_not_delayed();
-  tag_stack(frame::TagReference, r);
-  st_ptr(  r,    Lesp, Interpreter::value_offset_in_bytes());
-  dec( Lesp, Interpreter::stackElementSize());
-}
-
-void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
-  assert_not_delayed();
-  tag_stack(tag);
-  st_ptr(r, Lesp, Interpreter::value_offset_in_bytes());
-  dec( Lesp, Interpreter::stackElementSize());
+  st_ptr(r, Lesp, 0);
+  dec(Lesp, Interpreter::stackElementSize);
 }
 
 // remember: our convention for longs in SPARC is:
@@ -497,33 +420,28 @@
 void InterpreterMacroAssembler::push_l(Register r) {
   assert_not_delayed();
   debug_only(verify_esp(Lesp));
-  tag_stack(frame::TagCategory2, r);
-  // Longs are in stored in memory-correct order, even if unaligned.
-  // and may be separated by stack tags.
-  int offset = -Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
+  // Longs are stored in memory-correct order, even if unaligned.
+  int offset = -Interpreter::stackElementSize;
   store_unaligned_long(r, Lesp, offset);
-  dec(Lesp, 2 * Interpreter::stackElementSize());
+  dec(Lesp, 2 * Interpreter::stackElementSize);
 }
 
 
 void InterpreterMacroAssembler::push_f(FloatRegister f) {
   assert_not_delayed();
   debug_only(verify_esp(Lesp));
-  tag_stack(frame::TagValue, Otos_i);
-  stf(FloatRegisterImpl::S, f, Lesp, Interpreter::value_offset_in_bytes());
-  dec(Lesp, Interpreter::stackElementSize());
+  stf(FloatRegisterImpl::S, f, Lesp, 0);
+  dec(Lesp, Interpreter::stackElementSize);
 }
 
 
 void InterpreterMacroAssembler::push_d(FloatRegister d)   {
   assert_not_delayed();
   debug_only(verify_esp(Lesp));
-  tag_stack(frame::TagCategory2, Otos_i);
-  // Longs are in stored in memory-correct order, even if unaligned.
-  // and may be separated by stack tags.
-  int offset = -Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
+  // Longs are stored in memory-correct order, even if unaligned.
+  int offset = -Interpreter::stackElementSize;
   store_unaligned_double(d, Lesp, offset);
-  dec(Lesp, 2 * Interpreter::stackElementSize());
+  dec(Lesp, 2 * Interpreter::stackElementSize);
 }
 
 
@@ -561,30 +479,18 @@
 }
 
 
-// Tagged stack helpers for swap and dup
-void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
-                                                 Register tag) {
+// Helpers for swap and dup
+void InterpreterMacroAssembler::load_ptr(int n, Register val) {
   ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val);
-  if (TaggedStackInterpreter) {
-    ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(n), tag);
-  }
 }
-void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
-                                                  Register tag) {
+void InterpreterMacroAssembler::store_ptr(int n, Register val) {
   st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n));
-  if (TaggedStackInterpreter) {
-    st_ptr(tag, Lesp, Interpreter::expr_tag_offset_in_bytes(n));
-  }
 }
 
 
 void InterpreterMacroAssembler::load_receiver(Register param_count,
                                               Register recv) {
-
-  sll(param_count, Interpreter::logStackElementSize(), param_count);
-  if (TaggedStackInterpreter) {
-    add(param_count, Interpreter::value_offset_in_bytes(), param_count);  // get obj address
-  }
+  sll(param_count, Interpreter::logStackElementSize, param_count);
   ld_ptr(Lesp, param_count, recv);                      // gets receiver Oop
 }
 
@@ -605,7 +511,6 @@
 
   // Compute max expression stack+register save area
   lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size);  // Load max stack.
-  if (TaggedStackInterpreter) sll ( Gframe_size, 1, Gframe_size);  // max_stack * 2 for TAGS
   add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
 
   //
@@ -814,22 +719,39 @@
 }
 
 
-void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset) {
+void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
+                                                       int bcp_offset, bool giant_index) {
+  assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+  if (!giant_index) {
+    get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
+  } else {
+    assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
+    get_4_byte_integer_at_bcp(bcp_offset, cache, tmp);
+    assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
+    xor3(tmp, -1, tmp);  // convert to plain index
+  }
+}
+
+
+void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
+                                                           int bcp_offset, bool giant_index) {
   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
   assert_different_registers(cache, tmp);
   assert_not_delayed();
-  get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
-              // convert from field index to ConstantPoolCacheEntry index
-              // and from word index to byte offset
+  get_cache_index_at_bcp(cache, tmp, bcp_offset, giant_index);
+  // convert from field index to ConstantPoolCacheEntry index and from
+  // word index to byte offset
   sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
   add(LcpoolCache, tmp, cache);
 }
 
 
-void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset) {
+void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
+                                                               int bcp_offset, bool giant_index) {
   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
   assert_different_registers(cache, tmp);
   assert_not_delayed();
+  assert(!giant_index,"NYI");
   get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
               // convert from field index to ConstantPoolCacheEntry index
               // and from word index to byte offset
@@ -1675,15 +1597,31 @@
 // Count a virtual call in the bytecodes.
 
 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
-                                                     Register scratch) {
+                                                     Register scratch,
+                                                     bool receiver_can_be_null) {
   if (ProfileInterpreter) {
     Label profile_continue;
 
     // If no method data exists, go to profile_continue.
     test_method_data_pointer(profile_continue);
 
+
+    Label skip_receiver_profile;
+    if (receiver_can_be_null) {
+      Label not_null;
+      tst(receiver);
+      brx(Assembler::notZero, false, Assembler::pt, not_null);
+      delayed()->nop();
+      // We are making a call.  Increment the count for null receiver.
+      increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
+      ba(false, skip_receiver_profile);
+      delayed()->nop();
+      bind(not_null);
+    }
+
     // Record the receiver type.
     record_klass_in_profile(receiver, scratch, true);
+    bind(skip_receiver_profile);
 
     // The method data pointer needs to be updated to reflect the new target.
     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
@@ -1985,51 +1923,11 @@
 }
 
 // Locals
-#ifdef ASSERT
-void InterpreterMacroAssembler::verify_local_tag(frame::Tag t,
-                                                 Register base,
-                                                 Register scratch,
-                                                 int n) {
-  if (TaggedStackInterpreter) {
-    Label ok, long_ok;
-    // Use dst for scratch
-    assert_different_registers(base, scratch);
-    ld_ptr(base, Interpreter::local_tag_offset_in_bytes(n), scratch);
-    if (t == frame::TagCategory2) {
-      cmp(scratch, G0);
-      brx(Assembler::equal, false, Assembler::pt, long_ok);
-      delayed()->ld_ptr(base, Interpreter::local_tag_offset_in_bytes(n+1), scratch);
-      stop("local long/double tag value bad");
-      bind(long_ok);
-      // compare second half tag
-      cmp(scratch, G0);
-    } else if (t == frame::TagValue) {
-      cmp(scratch, G0);
-    } else {
-      assert_different_registers(O3, base, scratch);
-      mov(t, O3);
-      cmp(scratch, O3);
-    }
-    brx(Assembler::equal, false, Assembler::pt, ok);
-    delayed()->nop();
-    // Also compare if the local value is zero, then the tag might
-    // not have been set coming from deopt.
-    ld_ptr(base, Interpreter::local_offset_in_bytes(n), scratch);
-    cmp(scratch, G0);
-    brx(Assembler::equal, false, Assembler::pt, ok);
-    delayed()->nop();
-    stop("Local tag value is bad");
-    bind(ok);
-  }
-}
-#endif // ASSERT
-
 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(verify_local_tag(frame::TagReference, index, dst));
-  ld_ptr(index, Interpreter::value_offset_in_bytes(), dst);
+  ld_ptr(index, 0, dst);
   // Note:  index must hold the effective address--the iinc template uses it
 }
 
@@ -2037,27 +1935,24 @@
 void InterpreterMacroAssembler::access_local_returnAddress(Register index,
                                                            Register dst ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(verify_local_tag(frame::TagValue, index, dst));
-  ld_ptr(index, Interpreter::value_offset_in_bytes(), dst);
+  ld_ptr(index, 0, dst);
 }
 
 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(verify_local_tag(frame::TagValue, index, dst));
-  ld(index, Interpreter::value_offset_in_bytes(), dst);
+  ld(index, 0, dst);
   // Note:  index must hold the effective address--the iinc template uses it
 }
 
 
 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(verify_local_tag(frame::TagCategory2, index, dst));
   // First half stored at index n+1 (which grows down from Llocals[n])
   load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst);
 }
@@ -2065,18 +1960,16 @@
 
 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(verify_local_tag(frame::TagValue, index, G1_scratch));
-  ldf(FloatRegisterImpl::S, index, Interpreter::value_offset_in_bytes(), dst);
+  ldf(FloatRegisterImpl::S, index, 0, dst);
 }
 
 
 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(verify_local_tag(frame::TagCategory2, index, G1_scratch));
   load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
 }
 
@@ -2102,94 +1995,60 @@
 }
 #endif // ASSERT
 
-void InterpreterMacroAssembler::tag_local(frame::Tag t,
-                                          Register base,
-                                          Register src,
-                                          int n) {
-  if (TaggedStackInterpreter) {
-    // have to store zero because local slots can be reused (rats!)
-    if (t == frame::TagValue) {
-      st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n));
-    } else if (t == frame::TagCategory2) {
-      st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n));
-      st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n+1));
-    } else {
-      // assert that we don't stomp the value in 'src'
-      // O3 is arbitrary because it's not used.
-      assert_different_registers(src, base, O3);
-      mov( t, O3);
-      st_ptr(O3, base, Interpreter::local_tag_offset_in_bytes(n));
-    }
-  }
-}
-
 
 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  debug_only(check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);)
-  tag_local(frame::TagValue, index, src);
-  st(src, index, Interpreter::value_offset_in_bytes());
+  debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)
+  st(src, index, 0);
 }
 
-void InterpreterMacroAssembler::store_local_ptr( Register index, Register src,
-                                                 Register tag ) {
+void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  #ifdef ASSERT
-  check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);
-  #endif
-  st_ptr(src, index, Interpreter::value_offset_in_bytes());
-  // Store tag register directly
-  if (TaggedStackInterpreter) {
-    st_ptr(tag, index, Interpreter::tag_offset_in_bytes());
-  }
+#ifdef ASSERT
+  check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
+#endif
+  st_ptr(src, index, 0);
 }
 
 
 
-void InterpreterMacroAssembler::store_local_ptr( int n, Register src,
-                                                 Register tag ) {
-  st_ptr(src,  Llocals, Interpreter::local_offset_in_bytes(n));
-  if (TaggedStackInterpreter) {
-    st_ptr(tag, Llocals, Interpreter::local_tag_offset_in_bytes(n));
-  }
+void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) {
+  st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n));
 }
 
 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  #ifdef ASSERT
+#ifdef ASSERT
   check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
-  #endif
-  tag_local(frame::TagCategory2, index, src);
+#endif
   store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1
 }
 
 
 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  #ifdef ASSERT
-  check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);
-  #endif
-  tag_local(frame::TagValue, index, G1_scratch);
-  stf(FloatRegisterImpl::S, src, index, Interpreter::value_offset_in_bytes());
+#ifdef ASSERT
+  check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
+#endif
+  stf(FloatRegisterImpl::S, src, index, 0);
 }
 
 
 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) {
   assert_not_delayed();
-  sll(index, Interpreter::logStackElementSize(), index);
+  sll(index, Interpreter::logStackElementSize, index);
   sub(Llocals, index, index);
-  #ifdef ASSERT
+#ifdef ASSERT
   check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
-  #endif
-  tag_local(frame::TagCategory2, index, G1_scratch);
+#endif
   store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1));
 }
 
--- a/src/cpu/sparc/vm/interp_masm_sparc.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp	Wed May 19 10:22:39 2010 -0700
@@ -149,7 +149,6 @@
 
   void push_i(     Register r = Otos_i);
   void push_ptr(   Register r = Otos_i);
-  void push_ptr(   Register r, Register tag);
   void push_l(     Register r = Otos_l1);
   void push_f(FloatRegister f = Ftos_f);
   void push_d(FloatRegister f = Ftos_d1);
@@ -159,17 +158,9 @@
   void push(TosState state);           // transition state -> vtos
   void empty_expression_stack();       // resets both Lesp and SP
 
-  // Support for Tagged Stacks
-  void tag_stack(frame::Tag t, Register r);
-  void tag_stack(Register tag);
-  void tag_local(frame::Tag t, Register src, Register base, int n = 0);
-
 #ifdef ASSERT
   void verify_sp(Register Rsp, Register Rtemp);
   void verify_esp(Register Resp);      // verify that Lesp points to a word in the temp stack
-
-  void verify_stack_tag(frame::Tag t, Register r, Register scratch = G0);
-  void verify_local_tag(frame::Tag t, Register base, Register scr, int n = 0);
 #endif // ASSERT
 
  public:
@@ -191,8 +182,9 @@
                                   Register   Rdst,
                                   setCCOrNot should_set_CC = dont_set_CC );
 
-  void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset);
-  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset);
+  void get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
+  void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
+  void get_cache_index_at_bcp(Register cache, Register tmp, int bcp_offset, bool giant_index = false);
 
 
   // common code
@@ -241,17 +233,17 @@
   void check_for_regarea_stomp( Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1);
 #endif // ASSERT
   void store_local_int( Register index, Register src );
-  void store_local_ptr( Register index, Register src, Register tag = Otos_l2 );
-  void store_local_ptr( int n, Register src, Register tag = Otos_l2 );
+  void store_local_ptr( Register index, Register src );
+  void store_local_ptr( int n, Register src );
   void store_local_long( Register index, Register src );
   void store_local_float( Register index, FloatRegister src );
   void store_local_double( Register index, FloatRegister src );
 
-  // Tagged stack helpers for swap and dup
-  void load_ptr_and_tag(int n, Register val, Register tag);
-  void store_ptr_and_tag(int n, Register val, Register tag);
+  // Helpers for swap and dup
+  void load_ptr(int n, Register val);
+  void store_ptr(int n, Register val);
 
-  // Tagged stack helper for getting receiver in register.
+  // Helper for getting receiver in register.
   void load_receiver(Register param_count, Register recv);
 
   static int top_most_monitor_byte_offset(); // offset in bytes to top of monitor block
@@ -304,7 +296,7 @@
   void profile_not_taken_branch(Register scratch);
   void profile_call(Register scratch);
   void profile_final_call(Register scratch);
-  void profile_virtual_call(Register receiver, Register scratch);
+  void profile_virtual_call(Register receiver, Register scratch, bool receiver_can_be_null = false);
   void profile_ret(TosState state, Register return_bci, Register scratch);
   void profile_null_seen(Register scratch);
   void profile_typecheck(Register klass, Register scratch);
--- a/src/cpu/sparc/vm/interpreterRT_sparc.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/interpreterRT_sparc.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,19 +43,6 @@
   Argument  jni_arg(jni_offset(), false);
   Register  Rtmp = O0;
 
-#ifdef ASSERT
-  if (TaggedStackInterpreter) {
-    // check at least one tag is okay
-    Label ok;
-    __ ld_ptr(Llocals, Interpreter::local_tag_offset_in_bytes(offset() + 1), Rtmp);
-    __ cmp(Rtmp, G0);
-    __ brx(Assembler::equal, false, Assembler::pt, ok);
-    __ delayed()->nop();
-    __ stop("Native object has bad tag value");
-    __ bind(ok);
-  }
-#endif // ASSERT
-
 #ifdef _LP64
   __ ldx(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
   __ store_long_argument(Rtmp, jni_arg);
@@ -107,18 +94,6 @@
 
   Address     h_arg = Address(Llocals, Interpreter::local_offset_in_bytes(offset()));
   __ ld_ptr(h_arg, Rtmp1);
-#ifdef ASSERT
-  if (TaggedStackInterpreter) {
-    // check we have the obj and not the tag
-    Label ok;
-    __ mov(frame::TagReference, Rtmp3);
-    __ cmp(Rtmp1, Rtmp3);
-    __ brx(Assembler::notEqual, true, Assembler::pt, ok);
-    __ delayed()->nop();
-    __ stop("Native object passed tag by mistake");
-    __ bind(ok);
-  }
-#endif // ASSERT
   if (!do_NULL_check) {
     __ add(h_arg.base(), h_arg.disp(), Rtmp2);
   } else {
@@ -168,17 +143,9 @@
     long_sig   = 3
   };
 
-#ifdef ASSERT
-  void verify_tag(frame::Tag t) {
-    assert(!TaggedStackInterpreter ||
-           *(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
-  }
-#endif // ASSERT
-
   virtual void pass_int() {
     *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
     add_signature( non_float );
   }
 
@@ -186,31 +153,27 @@
     // pass address of from
     intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
     *_to++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr;
-    debug_only(verify_tag(frame::TagReference));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
     add_signature( non_float );
    }
 
 #ifdef _LP64
   virtual void pass_float()  {
     *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
     add_signature( float_sig );
    }
 
   virtual void pass_double() {
     *_to++ = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
    add_signature( double_sig );
    }
 
   virtual void pass_long() {
     _to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
-    debug_only(verify_tag(frame::TagValue));
     _to += 1;
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
     add_signature( long_sig );
   }
 #else
@@ -218,9 +181,8 @@
   virtual void pass_long() {
     _to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
     _to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
     _to += 2;
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
     add_signature( non_float );
   }
 #endif // _LP64
--- a/src/cpu/sparc/vm/interpreter_sparc.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/interpreter_sparc.cpp	Wed May 19 10:22:39 2010 -0700
@@ -235,19 +235,17 @@
 }
 
 
-
 // Method handle invoker
 // Dispatch a method of the form java.dyn.MethodHandles::invoke(...)
 address InterpreterGenerator::generate_method_handle_entry(void) {
   if (!EnableMethodHandles) {
     return generate_abstract_entry();
   }
-  return generate_abstract_entry(); //6815692//
+
+  return MethodHandles::generate_method_handle_interpreter_entry(_masm);
 }
 
 
-
-
 //----------------------------------------------------------------------------------------------------
 // Entry points & stack frame layout
 //
--- a/src/cpu/sparc/vm/interpreter_sparc.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/interpreter_sparc.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,33 +24,13 @@
 
  public:
 
-  // Support for Tagged Stacks
+  static int expr_offset_in_bytes(int i) { return stackElementSize * i + wordSize; }
 
   // Stack index relative to tos (which points at value)
-  static int expr_index_at(int i)     {
-    return stackElementWords() * i;
-  }
-
-  static int expr_tag_index_at(int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    // tag is one word above java stack element
-    return stackElementWords() * i + 1;
-  }
-
-  static int expr_offset_in_bytes(int i) { return stackElementSize()*i + wordSize; }
-  static int expr_tag_offset_in_bytes (int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    return expr_offset_in_bytes(i) + wordSize;
-  }
+  static int expr_index_at(int i)        { return stackElementWords * i; }
 
   // Already negated by c++ interpreter
-  static int local_index_at(int i)     {
-    assert(i<=0, "local direction already negated");
-    return stackElementWords() * i + (value_offset_in_bytes()/wordSize);
+  static int local_index_at(int i) {
+    assert(i <= 0, "local direction already negated");
+    return stackElementWords * i;
   }
-
-  static int local_tag_index_at(int i) {
-    assert(i<=0, "local direction already negated");
-    assert(TaggedStackInterpreter, "should not call this");
-    return stackElementWords() * i + (tag_offset_in_bytes()/wordSize);
-  }
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2008-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,9 @@
 
 address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
                                                 address interpreted_entry) {
+  // Just before the actual machine code entry point, allocate space
+  // for a MethodHandleEntry::Data record, so that we can manage everything
+  // from one base pointer.
   __ align(wordSize);
   address target = __ pc() + sizeof(Data);
   while (__ pc() < target) {
@@ -59,12 +62,891 @@
 
 // Code generation
 address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
-  ShouldNotReachHere(); //NYI, 6815692
-  return NULL;
+  // I5_savedSP: sender SP (must preserve)
+  // G4 (Gargs): incoming argument list (must preserve)
+  // G5_method:  invoke methodOop; becomes method type.
+  // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
+  // O0, O1: garbage temps, blown away
+  Register O0_argslot = O0;
+  Register O1_scratch = O1;
+
+  // emit WrongMethodType path first, to enable back-branch from main path
+  Label wrong_method_type;
+  __ bind(wrong_method_type);
+  __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
+  __ delayed()->nop();
+
+  // here's where control starts out:
+  __ align(CodeEntryAlignment);
+  address entry_point = __ pc();
+
+  // fetch the MethodType from the method handle into G5_method_type
+  {
+    Register tem = G5_method;
+    assert(tem == G5_method_type, "yes, it's the same register");
+    for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
+      __ ld_ptr(Address(tem, *pchase), G5_method_type);
+    }
+  }
+
+  // given the MethodType, find out where the MH argument is buried
+  __ ld_ptr(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)),        O0_argslot);
+  __ ldsw(  Address(O0_argslot,     __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot);
+  __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
+
+  __ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type);
+  __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+
+  return entry_point;
 }
 
+
+#ifdef ASSERT
+static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
+  // Verify that argslot lies within (Gargs, FP].
+  Label L_ok, L_bad;
+#ifdef _LP64
+  __ add(FP, STACK_BIAS, temp_reg);
+  __ cmp(argslot_reg, temp_reg);
+#else
+  __ cmp(argslot_reg, FP);
+#endif
+  __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
+  __ delayed()->nop();
+  __ cmp(Gargs, argslot_reg);
+  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
+  __ delayed()->nop();
+  __ bind(L_bad);
+  __ stop(error_message);
+  __ bind(L_ok);
+}
+#endif
+
+
+// Helper to insert argument slots into the stack.
+// arg_slots must be a multiple of stack_move_unit() and <= 0
+void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
+                                     RegisterOrConstant arg_slots,
+                                     int arg_mask,
+                                     Register argslot_reg,
+                                     Register temp_reg, Register temp2_reg, Register temp3_reg) {
+  assert(temp3_reg != noreg, "temp3 required");
+  assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
+                             (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
+
+#ifdef ASSERT
+  verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
+  if (arg_slots.is_register()) {
+    Label L_ok, L_bad;
+    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
+    __ br(Assembler::greater, false, Assembler::pn, L_bad);
+    __ delayed()->nop();
+    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
+    __ br(Assembler::zero, false, Assembler::pt, L_ok);
+    __ delayed()->nop();
+    __ bind(L_bad);
+    __ stop("assert arg_slots <= 0 and clear low bits");
+    __ bind(L_ok);
+  } else {
+    assert(arg_slots.as_constant() <= 0, "");
+    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
+  }
+#endif // ASSERT
+
+#ifdef _LP64
+  if (arg_slots.is_register()) {
+    // Was arg_slots register loaded as signed int?
+    Label L_ok;
+    __ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
+    __ sra(temp_reg, BitsPerInt, temp_reg);
+    __ cmp(arg_slots.as_register(), temp_reg);
+    __ br(Assembler::equal, false, Assembler::pt, L_ok);
+    __ delayed()->nop();
+    __ stop("arg_slots register not loaded as signed int");
+    __ bind(L_ok);
+  }
+#endif
+
+  // Make space on the stack for the inserted argument(s).
+  // Then pull down everything shallower than argslot_reg.
+  // The stacked return address gets pulled down with everything else.
+  // That is, copy [sp, argslot) downward by -size words.  In pseudo-code:
+  //   sp -= size;
+  //   for (temp = sp + size; temp < argslot; temp++)
+  //     temp[-size] = temp[0]
+  //   argslot -= size;
+  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
+
+  // Keep the stack pointer 2*wordSize aligned.
+  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
+  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
+  __ add(SP, masked_offset, SP);
+
+  __ mov(Gargs, temp_reg);  // source pointer for copy
+  __ add(Gargs, offset, Gargs);
+
+  {
+    Label loop;
+    __ bind(loop);
+    // pull one word down each time through the loop
+    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
+    __ st_ptr(temp2_reg, Address(temp_reg, offset));
+    __ add(temp_reg, wordSize, temp_reg);
+    __ cmp(temp_reg, argslot_reg);
+    __ brx(Assembler::less, false, Assembler::pt, loop);
+    __ delayed()->nop();  // FILLME
+  }
+
+  // Now move the argslot down, to point to the opened-up space.
+  __ add(argslot_reg, offset, argslot_reg);
+}
+
+
+// Helper to remove argument slots from the stack.
+// arg_slots must be a multiple of stack_move_unit() and >= 0
+void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
+                                     RegisterOrConstant arg_slots,
+                                     Register argslot_reg,
+                                     Register temp_reg, Register temp2_reg, Register temp3_reg) {
+  assert(temp3_reg != noreg, "temp3 required");
+  assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
+                             (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
+
+  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
+
+#ifdef ASSERT
+  // Verify that [argslot..argslot+size) lies within (Gargs, FP).
+  __ add(argslot_reg, offset, temp2_reg);
+  verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
+  if (arg_slots.is_register()) {
+    Label L_ok, L_bad;
+    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
+    __ br(Assembler::less, false, Assembler::pn, L_bad);
+    __ delayed()->nop();
+    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
+    __ br(Assembler::zero, false, Assembler::pt, L_ok);
+    __ delayed()->nop();
+    __ bind(L_bad);
+    __ stop("assert arg_slots >= 0 and clear low bits");
+    __ bind(L_ok);
+  } else {
+    assert(arg_slots.as_constant() >= 0, "");
+    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
+  }
+#endif // ASSERT
+
+  // Pull up everything shallower than argslot.
+  // Then remove the excess space on the stack.
+  // The stacked return address gets pulled up with everything else.
+  // That is, copy [sp, argslot) upward by size words.  In pseudo-code:
+  //   for (temp = argslot-1; temp >= sp; --temp)
+  //     temp[size] = temp[0]
+  //   argslot += size;
+  //   sp += size;
+  __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
+  {
+    Label loop;
+    __ bind(loop);
+    // pull one word up each time through the loop
+    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
+    __ st_ptr(temp2_reg, Address(temp_reg, offset));
+    __ sub(temp_reg, wordSize, temp_reg);
+    __ cmp(temp_reg, Gargs);
+    __ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
+    __ delayed()->nop();  // FILLME
+  }
+
+  // Now move the argslot up, to point to the just-copied block.
+  __ add(Gargs, offset, Gargs);
+  // And adjust the argslot address to point at the deletion point.
+  __ add(argslot_reg, offset, argslot_reg);
+
+  // Keep the stack pointer 2*wordSize aligned.
+  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
+  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
+  __ add(SP, masked_offset, SP);
+}
+
+
+#ifndef PRODUCT
+extern "C" void print_method_handle(oop mh);
+void trace_method_handle_stub(const char* adaptername,
+                              oop mh) {
+#if 0
+                              intptr_t* entry_sp,
+                              intptr_t* saved_sp,
+                              intptr_t* saved_bp) {
+  // called as a leaf from native code: do not block the JVM!
+  intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
+  intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
+  printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
+         adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
+  if (last_sp != saved_sp)
+    printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
+#endif
+
+  printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh);
+  print_method_handle(mh);
+}
+#endif // PRODUCT
+
+// which conversion op types are implemented here?
+int MethodHandles::adapter_conversion_ops_supported_mask() {
+  return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
+         //|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
+         );
+  // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
+}
+
+//------------------------------------------------------------------------------
+// MethodHandles::generate_method_handle_stub
+//
 // Generate an "entry" field for a method handle.
 // This determines how the method handle will respond to calls.
 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
-  ShouldNotReachHere(); //NYI, 6815692
+  // Here is the register state during an interpreted call,
+  // as set up by generate_method_handle_interpreter_entry():
+  // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
+  // - G3: receiver method handle
+  // - O5_savedSP: sender SP (must preserve)
+
+  Register O0_argslot = O0;
+  Register O1_scratch = O1;
+  Register O2_scratch = O2;
+  Register O3_scratch = O3;
+  Register G5_index   = G5;
+
+  guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
+
+  // Some handy addresses:
+  Address G5_method_fie(    G5_method,        in_bytes(methodOopDesc::from_interpreted_offset()));
+
+  Address G3_mh_vmtarget(   G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes());
+
+  Address G3_dmh_vmindex(   G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes());
+
+  Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes());
+  Address G3_bmh_argument(  G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes());
+
+  Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes());
+  Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes());
+  Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes());
+
+  const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
+
+  if (have_entry(ek)) {
+    __ nop();  // empty stubs make SG sick
+    return;
+  }
+
+  address interp_entry = __ pc();
+  if (UseCompressedOops)  __ unimplemented("UseCompressedOops");
+
+#ifndef PRODUCT
+  if (TraceMethodHandles) {
+    // save: Gargs, O5_savedSP
+    __ save(SP, -16*wordSize, SP);
+    __ set((intptr_t) entry_name(ek), O0);
+    __ mov(G3_method_handle, O1);
+    __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
+    __ restore(SP, 16*wordSize, SP);
+  }
+#endif // PRODUCT
+
+  switch ((int) ek) {
+  case _raise_exception:
+    {
+      // Not a real MH entry, but rather shared code for raising an
+      // exception.  Extra local arguments are passed in scratch
+      // registers, as required type in O3, failing object (or NULL)
+      // in O2, failing bytecode type in O1.
+
+      __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
+
+      // Push arguments as if coming from the interpreter.
+      Register O0_scratch = O0_argslot;
+      int stackElementSize = Interpreter::stackElementSize;
+
+      // Make space on the stack for the arguments.
+      __ sub(SP,    4*stackElementSize, SP);
+      __ sub(Gargs, 3*stackElementSize, Gargs);
+      //__ sub(Lesp,  3*stackElementSize, Lesp);
+
+      // void raiseException(int code, Object actual, Object required)
+      __ st(    O1_scratch, Address(Gargs, 2*stackElementSize));  // code
+      __ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize));  // actual
+      __ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize));  // required
+
+      Label no_method;
+      // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
+      __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
+      __ ld_ptr(Address(G5_method, 0), G5_method);
+      __ tst(G5_method);
+      __ brx(Assembler::zero, false, Assembler::pn, no_method);
+      __ delayed()->nop();
+
+      int jobject_oop_offset = 0;
+      __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
+      __ tst(G5_method);
+      __ brx(Assembler::zero, false, Assembler::pn, no_method);
+      __ delayed()->nop();
+
+      __ verify_oop(G5_method);
+      __ jump_indirect_to(G5_method_fie, O1_scratch);
+      __ delayed()->nop();
+
+      // If we get here, the Java runtime did not do its job of creating the exception.
+      // Do something that is at least causes a valid throw from the interpreter.
+      __ bind(no_method);
+      __ unimplemented("_raise_exception no method");
+    }
+    break;
+
+  case _invokestatic_mh:
+  case _invokespecial_mh:
+    {
+      __ ld_ptr(G3_mh_vmtarget, G5_method);  // target is a methodOop
+      __ verify_oop(G5_method);
+      // Same as TemplateTable::invokestatic or invokespecial,
+      // minus the CP setup and profiling:
+      if (ek == _invokespecial_mh) {
+        // Must load & check the first argument before entering the target method.
+        __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
+        __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
+        __ null_check(G3_method_handle);
+        __ verify_oop(G3_method_handle);
+      }
+      __ jump_indirect_to(G5_method_fie, O1_scratch);
+      __ delayed()->nop();
+    }
+    break;
+
+  case _invokevirtual_mh:
+    {
+      // Same as TemplateTable::invokevirtual,
+      // minus the CP setup and profiling:
+
+      // Pick out the vtable index and receiver offset from the MH,
+      // and then we can discard it:
+      __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
+      __ ldsw(G3_dmh_vmindex, G5_index);
+      // Note:  The verifier allows us to ignore G3_mh_vmtarget.
+      __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
+      __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
+
+      // Get receiver klass:
+      Register O0_klass = O0_argslot;
+      __ load_klass(G3_method_handle, O0_klass);
+      __ verify_oop(O0_klass);
+
+      // Get target methodOop & entry point:
+      const int base = instanceKlass::vtable_start_offset() * wordSize;
+      assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
+
+      __ sll_ptr(G5_index, LogBytesPerWord, G5_index);
+      __ add(O0_klass, G5_index, O0_klass);
+      Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
+      __ ld_ptr(vtable_entry_addr, G5_method);
+
+      __ verify_oop(G5_method);
+      __ jump_indirect_to(G5_method_fie, O1_scratch);
+      __ delayed()->nop();
+    }
+    break;
+
+  case _invokeinterface_mh:
+    {
+      // Same as TemplateTable::invokeinterface,
+      // minus the CP setup and profiling:
+      __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
+      Register O1_intf  = O1_scratch;
+      __ ld_ptr(G3_mh_vmtarget, O1_intf);
+      __ ldsw(G3_dmh_vmindex, G5_index);
+      __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
+      __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
+
+      // Get receiver klass:
+      Register O0_klass = O0_argslot;
+      __ load_klass(G3_method_handle, O0_klass);
+      __ verify_oop(O0_klass);
+
+      // Get interface:
+      Label no_such_interface;
+      __ verify_oop(O1_intf);
+      __ lookup_interface_method(O0_klass, O1_intf,
+                                 // Note: next two args must be the same:
+                                 G5_index, G5_method,
+                                 O2_scratch,
+                                 O3_scratch,
+                                 no_such_interface);
+
+      __ verify_oop(G5_method);
+      __ jump_indirect_to(G5_method_fie, O1_scratch);
+      __ delayed()->nop();
+
+      __ bind(no_such_interface);
+      // Throw an exception.
+      // For historical reasons, it will be IncompatibleClassChangeError.
+      __ unimplemented("not tested yet");
+      __ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch);  // required interface
+      __ mov(O0_klass, O2_scratch);  // bad receiver
+      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
+      __ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch);  // who is complaining?
+    }
+    break;
+
+  case _bound_ref_mh:
+  case _bound_int_mh:
+  case _bound_long_mh:
+  case _bound_ref_direct_mh:
+  case _bound_int_direct_mh:
+  case _bound_long_direct_mh:
+    {
+      const bool direct_to_method = (ek >= _bound_ref_direct_mh);
+      BasicType arg_type  = T_ILLEGAL;
+      int       arg_mask  = _INSERT_NO_MASK;
+      int       arg_slots = -1;
+      get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
+
+      // Make room for the new argument:
+      __ ldsw(G3_bmh_vmargslot, O0_argslot);
+      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+
+      insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
+
+      // Store bound argument into the new stack slot:
+      __ ld_ptr(G3_bmh_argument, O1_scratch);
+      if (arg_type == T_OBJECT) {
+        __ st_ptr(O1_scratch, Address(O0_argslot, 0));
+      } else {
+        Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
+        __ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type));
+        if (arg_slots == 2) {
+          __ unimplemented("not yet tested");
+#ifndef _LP64
+          __ signx(O2_scratch, O3_scratch);  // Sign extend
+#endif
+          __ st_long(O2_scratch, Address(O0_argslot, 0));  // Uses O2/O3 on !_LP64
+        } else {
+          __ st_ptr( O2_scratch, Address(O0_argslot, 0));
+        }
+      }
+
+      if (direct_to_method) {
+        __ ld_ptr(G3_mh_vmtarget, G5_method);  // target is a methodOop
+        __ verify_oop(G5_method);
+        __ jump_indirect_to(G5_method_fie, O1_scratch);
+        __ delayed()->nop();
+      } else {
+        __ ld_ptr(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
+        __ verify_oop(G3_method_handle);
+        __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+      }
+    }
+    break;
+
+  case _adapter_retype_only:
+  case _adapter_retype_raw:
+    // Immediately jump to the next MH layer:
+    __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+    __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    // This is OK when all parameter types widen.
+    // It is also OK when a return type narrows.
+    break;
+
+  case _adapter_check_cast:
+    {
+      // Temps:
+      Register G5_klass = G5_index;  // Interesting AMH data.
+
+      // Check a reference argument before jumping to the next layer of MH:
+      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      Address vmarg = __ argument_address(O0_argslot);
+
+      // What class are we casting to?
+      __ ld_ptr(G3_amh_argument, G5_klass);  // This is a Class object!
+      __ ld_ptr(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
+
+      Label done;
+      __ ld_ptr(vmarg, O1_scratch);
+      __ tst(O1_scratch);
+      __ brx(Assembler::zero, false, Assembler::pn, done);  // No cast if null.
+      __ delayed()->nop();
+      __ load_klass(O1_scratch, O1_scratch);
+
+      // Live at this point:
+      // - G5_klass        :  klass required by the target method
+      // - O1_scratch      :  argument klass to test
+      // - G3_method_handle:  adapter method handle
+      __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done);
+
+      // If we get here, the type check failed!
+      __ ldsw(G3_amh_vmargslot, O0_argslot);  // reload argslot field
+      __ ld_ptr(G3_amh_argument, O3_scratch);  // required class
+      __ ld_ptr(vmarg, O2_scratch);  // bad object
+      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
+      __ delayed()->mov(Bytecodes::_checkcast, O1_scratch);  // who is complaining?
+
+      __ bind(done);
+      // Get the new MH:
+      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    }
+    break;
+
+  case _adapter_prim_to_prim:
+  case _adapter_ref_to_prim:
+    // Handled completely by optimized cases.
+    __ stop("init_AdapterMethodHandle should not issue this");
+    break;
+
+  case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
+//case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
+  case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
+  case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
+    {
+      // Perform an in-place conversion to int or an int subword.
+      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      Address vmarg = __ argument_address(O0_argslot);
+      Address value;
+      bool value_left_justified = false;
+
+      switch (ek) {
+      case _adapter_opt_i2i:
+      case _adapter_opt_l2i:
+        __ unimplemented(entry_name(ek));
+        value = vmarg;
+        break;
+      case _adapter_opt_unboxi:
+        {
+          // Load the value up from the heap.
+          __ ld_ptr(vmarg, O1_scratch);
+          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
+#ifdef ASSERT
+          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
+            if (is_subword_type(BasicType(bt)))
+              assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
+          }
+#endif
+          __ null_check(O1_scratch, value_offset);
+          value = Address(O1_scratch, value_offset);
+#ifdef _BIG_ENDIAN
+          // Values stored in objects are packed.
+          value_left_justified = true;
+#endif
+        }
+        break;
+      default:
+        ShouldNotReachHere();
+      }
+
+      // This check is required on _BIG_ENDIAN
+      Register G5_vminfo = G5_index;
+      __ ldsw(G3_amh_conversion, G5_vminfo);
+      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
+
+      // Original 32-bit vmdata word must be of this form:
+      // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
+      __ lduw(value, O1_scratch);
+      if (!value_left_justified)
+        __ sll(O1_scratch, G5_vminfo, O1_scratch);
+      Label zero_extend, done;
+      __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
+      __ br(Assembler::zero, false, Assembler::pn, zero_extend);
+      __ delayed()->nop();
+
+      // this path is taken for int->byte, int->short
+      __ sra(O1_scratch, G5_vminfo, O1_scratch);
+      __ ba(false, done);
+      __ delayed()->nop();
+
+      __ bind(zero_extend);
+      // this is taken for int->char
+      __ srl(O1_scratch, G5_vminfo, O1_scratch);
+
+      __ bind(done);
+      __ st(O1_scratch, vmarg);
+
+      // Get the new MH:
+      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    }
+    break;
+
+  case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
+  case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
+    {
+      // Perform an in-place int-to-long or ref-to-long conversion.
+      __ ldsw(G3_amh_vmargslot, O0_argslot);
+
+      // On big-endian machine we duplicate the slot and store the MSW
+      // in the first slot.
+      __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
+
+      insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
+
+      Address arg_lsw(O0_argslot, 0);
+      Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
+
+      switch (ek) {
+      case _adapter_opt_i2l:
+        {
+          __ ldsw(arg_lsw, O2_scratch);      // Load LSW
+#ifndef _LP64
+          __ signx(O2_scratch, O3_scratch);  // Sign extend
+#endif
+          __ st_long(O2_scratch, arg_msw);   // Uses O2/O3 on !_LP64
+        }
+        break;
+      case _adapter_opt_unboxl:
+        {
+          // Load the value up from the heap.
+          __ ld_ptr(arg_lsw, O1_scratch);
+          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
+          assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
+          __ null_check(O1_scratch, value_offset);
+          __ ld_long(Address(O1_scratch, value_offset), O2_scratch);  // Uses O2/O3 on !_LP64
+          __ st_long(O2_scratch, arg_msw);
+        }
+        break;
+      default:
+        ShouldNotReachHere();
+      }
+
+      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    }
+    break;
+
+  case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
+  case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
+    {
+      // perform an in-place floating primitive conversion
+      __ unimplemented(entry_name(ek));
+    }
+    break;
+
+  case _adapter_prim_to_ref:
+    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
+    break;
+
+  case _adapter_swap_args:
+  case _adapter_rot_args:
+    // handled completely by optimized cases
+    __ stop("init_AdapterMethodHandle should not issue this");
+    break;
+
+  case _adapter_opt_swap_1:
+  case _adapter_opt_swap_2:
+  case _adapter_opt_rot_1_up:
+  case _adapter_opt_rot_1_down:
+  case _adapter_opt_rot_2_up:
+  case _adapter_opt_rot_2_down:
+    {
+      int swap_bytes = 0, rotate = 0;
+      get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
+
+      // 'argslot' is the position of the first argument to swap.
+      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+
+      // 'vminfo' is the second.
+      Register O1_destslot = O1_scratch;
+      __ ldsw(G3_amh_conversion, O1_destslot);
+      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
+      __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
+      __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
+
+      if (!rotate) {
+        for (int i = 0; i < swap_bytes; i += wordSize) {
+          __ ld_ptr(Address(O0_argslot,  i), O2_scratch);
+          __ ld_ptr(Address(O1_destslot, i), O3_scratch);
+          __ st_ptr(O3_scratch, Address(O0_argslot,  i));
+          __ st_ptr(O2_scratch, Address(O1_destslot, i));
+        }
+      } else {
+        // Save the first chunk, which is going to get overwritten.
+        switch (swap_bytes) {
+        case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
+        case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
+        case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
+        default: ShouldNotReachHere();
+        }
+
+        if (rotate > 0) {
+          // Rorate upward.
+          __ sub(O0_argslot, swap_bytes, O0_argslot);
+#if ASSERT
+          {
+            // Verify that argslot > destslot, by at least swap_bytes.
+            Label L_ok;
+            __ cmp(O0_argslot, O1_destslot);
+            __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
+            __ delayed()->nop();
+            __ stop("source must be above destination (upward rotation)");
+            __ bind(L_ok);
+          }
+#endif
+          // Work argslot down to destslot, copying contiguous data upwards.
+          // Pseudo-code:
+          //   argslot  = src_addr - swap_bytes
+          //   destslot = dest_addr
+          //   while (argslot >= destslot) {
+          //     *(argslot + swap_bytes) = *(argslot + 0);
+          //     argslot--;
+          //   }
+          Label loop;
+          __ bind(loop);
+          __ ld_ptr(Address(O0_argslot, 0), G5_index);
+          __ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
+          __ sub(O0_argslot, wordSize, O0_argslot);
+          __ cmp(O0_argslot, O1_destslot);
+          __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
+          __ delayed()->nop();  // FILLME
+        } else {
+          __ add(O0_argslot, swap_bytes, O0_argslot);
+#if ASSERT
+          {
+            // Verify that argslot < destslot, by at least swap_bytes.
+            Label L_ok;
+            __ cmp(O0_argslot, O1_destslot);
+            __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
+            __ delayed()->nop();
+            __ stop("source must be above destination (upward rotation)");
+            __ bind(L_ok);
+          }
+#endif
+          // Work argslot up to destslot, copying contiguous data downwards.
+          // Pseudo-code:
+          //   argslot  = src_addr + swap_bytes
+          //   destslot = dest_addr
+          //   while (argslot >= destslot) {
+          //     *(argslot - swap_bytes) = *(argslot + 0);
+          //     argslot++;
+          //   }
+          Label loop;
+          __ bind(loop);
+          __ ld_ptr(Address(O0_argslot, 0), G5_index);
+          __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
+          __ add(O0_argslot, wordSize, O0_argslot);
+          __ cmp(O0_argslot, O1_destslot);
+          __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
+          __ delayed()->nop();  // FILLME
+        }
+
+        // Store the original first chunk into the destination slot, now free.
+        switch (swap_bytes) {
+        case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
+        case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
+        case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
+        default: ShouldNotReachHere();
+        }
+      }
+
+      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    }
+    break;
+
+  case _adapter_dup_args:
+    {
+      // 'argslot' is the position of the first argument to duplicate.
+      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+
+      // 'stack_move' is negative number of words to duplicate.
+      Register G5_stack_move = G5_index;
+      __ ldsw(G3_amh_conversion, G5_stack_move);
+      __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
+
+      // Remember the old Gargs (argslot[0]).
+      Register O1_oldarg = O1_scratch;
+      __ mov(Gargs, O1_oldarg);
+
+      // Move Gargs down to make room for dups.
+      __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
+      __ add(Gargs, G5_stack_move, Gargs);
+
+      // Compute the new Gargs (argslot[0]).
+      Register O2_newarg = O2_scratch;
+      __ mov(Gargs, O2_newarg);
+
+      // Copy from oldarg[0...] down to newarg[0...]
+      // Pseude-code:
+      //   O1_oldarg  = old-Gargs
+      //   O2_newarg  = new-Gargs
+      //   O0_argslot = argslot
+      //   while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
+      Label loop;
+      __ bind(loop);
+      __ ld_ptr(Address(O0_argslot, 0), O3_scratch);
+      __ st_ptr(O3_scratch, Address(O2_newarg, 0));
+      __ add(O0_argslot, wordSize, O0_argslot);
+      __ add(O2_newarg,  wordSize, O2_newarg);
+      __ cmp(O2_newarg, O1_oldarg);
+      __ brx(Assembler::less, false, Assembler::pt, loop);
+      __ delayed()->nop();  // FILLME
+
+      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    }
+    break;
+
+  case _adapter_drop_args:
+    {
+      // 'argslot' is the position of the first argument to nuke.
+      __ ldsw(G3_amh_vmargslot, O0_argslot);
+      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
+
+      // 'stack_move' is number of words to drop.
+      Register G5_stack_move = G5_index;
+      __ ldsw(G3_amh_conversion, G5_stack_move);
+      __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
+
+      remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
+
+      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
+      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
+    }
+    break;
+
+  case _adapter_collect_args:
+    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
+    break;
+
+  case _adapter_spread_args:
+    // Handled completely by optimized cases.
+    __ stop("init_AdapterMethodHandle should not issue this");
+    break;
+
+  case _adapter_opt_spread_0:
+  case _adapter_opt_spread_1:
+  case _adapter_opt_spread_more:
+    {
+      // spread an array out into a group of arguments
+      __ unimplemented(entry_name(ek));
+    }
+    break;
+
+  case _adapter_flyby:
+  case _adapter_ricochet:
+    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
+    break;
+
+  default:
+    ShouldNotReachHere();
+  }
+
+  address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
+  __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
+
+  init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
 }
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed May 19 10:22:39 2010 -0700
@@ -547,17 +547,11 @@
   void set_Rdisp(Register r)  { Rdisp = r; }
 
   void patch_callers_callsite();
-  void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch);
 
   // base+st_off points to top of argument
-  int arg_offset(const int st_off) { return st_off + Interpreter::value_offset_in_bytes(); }
+  int arg_offset(const int st_off) { return st_off; }
   int next_arg_offset(const int st_off) {
-    return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
-  }
-
-  int tag_offset(const int st_off) { return st_off + Interpreter::tag_offset_in_bytes(); }
-  int next_tag_offset(const int st_off) {
-    return st_off - Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes();
+    return st_off - Interpreter::stackElementSize;
   }
 
   // Argument slot values may be loaded first into a register because
@@ -565,9 +559,6 @@
   RegisterOrConstant arg_slot(const int st_off);
   RegisterOrConstant next_arg_slot(const int st_off);
 
-  RegisterOrConstant tag_slot(const int st_off);
-  RegisterOrConstant next_tag_slot(const int st_off);
-
   // Stores long into offset pointed to by base
   void store_c2i_long(Register r, Register base,
                       const int st_off, bool is_stack);
@@ -653,23 +644,6 @@
   __ bind(L);
 }
 
-void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off,
-                 Register scratch) {
-  if (TaggedStackInterpreter) {
-    RegisterOrConstant slot = tag_slot(st_off);
-    // have to store zero because local slots can be reused (rats!)
-    if (t == frame::TagValue) {
-      __ st_ptr(G0, base, slot);
-    } else if (t == frame::TagCategory2) {
-      __ st_ptr(G0, base, slot);
-      __ st_ptr(G0, base, next_tag_slot(st_off));
-    } else {
-      __ mov(t, scratch);
-      __ st_ptr(scratch, base, slot);
-    }
-  }
-}
-
 
 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
   RegisterOrConstant roc(arg_offset(st_off));
@@ -682,17 +656,6 @@
 }
 
 
-RegisterOrConstant AdapterGenerator::tag_slot(const int st_off) {
-  RegisterOrConstant roc(tag_offset(st_off));
-  return __ ensure_simm13_or_reg(roc, Rdisp);
-}
-
-RegisterOrConstant AdapterGenerator::next_tag_slot(const int st_off) {
-  RegisterOrConstant roc(next_tag_offset(st_off));
-  return __ ensure_simm13_or_reg(roc, Rdisp);
-}
-
-
 // Stores long into offset pointed to by base
 void AdapterGenerator::store_c2i_long(Register r, Register base,
                                       const int st_off, bool is_stack) {
@@ -718,19 +681,16 @@
   }
 #endif // COMPILER2
 #endif // _LP64
-  tag_c2i_arg(frame::TagCategory2, base, st_off, r);
 }
 
 void AdapterGenerator::store_c2i_object(Register r, Register base,
                       const int st_off) {
   __ st_ptr (r, base, arg_slot(st_off));
-  tag_c2i_arg(frame::TagReference, base, st_off, r);
 }
 
 void AdapterGenerator::store_c2i_int(Register r, Register base,
                    const int st_off) {
   __ st (r, base, arg_slot(st_off));
-  tag_c2i_arg(frame::TagValue, base, st_off, r);
 }
 
 // Stores into offset pointed to by base
@@ -745,13 +705,11 @@
   __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
   __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
 #endif
-  tag_c2i_arg(frame::TagCategory2, base, st_off, G1_scratch);
 }
 
 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
                                        const int st_off) {
   __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
-  tag_c2i_arg(frame::TagValue, base, st_off, G1_scratch);
 }
 
 void AdapterGenerator::gen_c2i_adapter(
@@ -786,14 +744,14 @@
   // Since all args are passed on the stack, total_args_passed*wordSize is the
   // space we need.  Add in varargs area needed by the interpreter. Round up
   // to stack alignment.
-  const int arg_size = total_args_passed * Interpreter::stackElementSize();
+  const int arg_size = total_args_passed * Interpreter::stackElementSize;
   const int varargs_area =
                  (frame::varargs_offset - frame::register_save_words)*wordSize;
   const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
 
   int bias = STACK_BIAS;
   const int interp_arg_offset = frame::varargs_offset*wordSize +
-                        (total_args_passed-1)*Interpreter::stackElementSize();
+                        (total_args_passed-1)*Interpreter::stackElementSize;
 
   Register base = SP;
 
@@ -814,7 +772,7 @@
 
   // First write G1 (if used) to where ever it must go
   for (int i=0; i<total_args_passed; i++) {
-    const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
+    const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
     VMReg r_1 = regs[i].first();
     VMReg r_2 = regs[i].second();
     if (r_1 == G1_scratch->as_VMReg()) {
@@ -831,7 +789,7 @@
 
   // Now write the args into the outgoing interpreter space
   for (int i=0; i<total_args_passed; i++) {
-    const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
+    const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
     VMReg r_1 = regs[i].first();
     VMReg r_2 = regs[i].second();
     if (!r_1->is_valid()) {
@@ -900,7 +858,7 @@
 #endif // _LP64
 
   __ mov((frame::varargs_offset)*wordSize -
-         1*Interpreter::stackElementSize()+bias+BytesPerWord, G1);
+         1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
   // Jump to the interpreter just as if interpreter was doing it.
   __ jmpl(G3_scratch, 0, G0);
   // Setup Lesp for the call.  Cannot actually set Lesp as the current Lesp
@@ -1051,7 +1009,7 @@
     // ldx/lddf optimizations.
 
     // Load in argument order going down.
-    const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
+    const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
     set_Rdisp(G1_scratch);
 
     VMReg r_1 = regs[i].first();
@@ -1120,7 +1078,7 @@
   for (int i=0; i<total_args_passed; i++) {
     if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
       // Load in argument order going down
-      int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
+      int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
       // Need to marshal 64-bit value from misaligned Lesp loads
       Register r = regs[i].first()->as_Register()->after_restore();
       if (r == G1 || r == G4) {
@@ -3062,7 +3020,7 @@
           "test and remove; got more parms than locals");
   if (callee_locals < callee_parameters)
     return 0;                   // No adjustment for negative locals
-  int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords();
+  int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
   return round_to(diff, WordsPerLong);
 }
 
--- a/src/cpu/sparc/vm/sparc.ad	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/sparc.ad	Wed May 19 10:22:39 2010 -0700
@@ -471,6 +471,9 @@
 source %{
 #define __ _masm.
 
+// Block initializing store
+#define ASI_BLK_INIT_QUAD_LDD_P    0xE2
+
 // tertiary op of a LoadP or StoreP encoding
 #define REGP_OP true
 
@@ -920,38 +923,6 @@
 #endif
 }
 
-void emit_form3_mem_reg_asi(CodeBuffer &cbuf, const MachNode* n, int primary, int tertiary,
-                        int src1_enc, int disp32, int src2_enc, int dst_enc, int asi) {
-
-  uint instr;
-  instr = (Assembler::ldst_op << 30)
-        | (dst_enc        << 25)
-        | (primary        << 19)
-        | (src1_enc       << 14);
-
-  int disp = disp32;
-  int index    = src2_enc;
-
-  if (src1_enc == R_SP_enc || src1_enc == R_FP_enc)
-    disp += STACK_BIAS;
-
-  // We should have a compiler bailout here rather than a guarantee.
-  // Better yet would be some mechanism to handle variable-size matches correctly.
-  guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" );
-
-  if( disp != 0 ) {
-    // use reg-reg form
-    // set src2=R_O7 contains offset
-    index = R_O7_enc;
-    emit3_simm13( cbuf, Assembler::arith_op, index, Assembler::or_op3, 0, disp);
-  }
-  instr |= (asi << 5);
-  instr |= index;
-  uint *code = (uint*)cbuf.code_end();
-  *code = instr;
-  cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord);
-}
-
 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false, bool force_far_call = false) {
   // The method which records debug information at every safepoint
   // expects the call to be the first instruction in the snippet as
@@ -1951,11 +1922,6 @@
                        $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
   %}
 
-  enc_class form3_mem_reg_little( memory mem, iRegI dst) %{
-    emit_form3_mem_reg_asi(cbuf, this, $primary, -1,
-                     $mem$$base, $mem$$disp, $mem$$index, $dst$$reg, Assembler::ASI_PRIMARY_LITTLE);
-  %}
-
   enc_class form3_mem_prefetch_read( memory mem ) %{
     emit_form3_mem_reg(cbuf, this, $primary, -1,
                        $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/);
@@ -4308,8 +4274,8 @@
 // instructions for every form of operand when the instruction accepts
 // multiple operand types with the same basic encoding and format.  The classic
 // case of this is memory operands.
-// Indirect is not included since its use is limited to Compare & Swap
 opclass memory( indirect, indOffset13, indIndex );
+opclass indIndexMemory( indIndex );
 
 //----------PIPELINE-----------------------------------------------------------
 pipeline %{
@@ -6147,6 +6113,7 @@
 %}
 
 instruct prefetchw( memory mem ) %{
+  predicate(AllocatePrefetchStyle != 3 );
   match( PrefetchWrite mem );
   ins_cost(MEMORY_REF_COST);
 
@@ -6156,6 +6123,23 @@
   ins_pipe(iload_mem);
 %}
 
+// Use BIS instruction to prefetch.
+instruct prefetchw_bis( memory mem ) %{
+  predicate(AllocatePrefetchStyle == 3);
+  match( PrefetchWrite mem );
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STXA   G0,$mem\t! // Block initializing store" %}
+  ins_encode %{
+     Register base = as_Register($mem$$base);
+     int disp = $mem$$disp;
+     if (disp != 0) {
+       __ add(base, AllocatePrefetchStepSize, base);
+     }
+     __ stxa(G0, base, G0, ASI_BLK_INIT_QUAD_LDD_P);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
 
 //----------Store Instructions-------------------------------------------------
 // Store Byte
@@ -9645,84 +9629,179 @@
 
 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{
   match(Set dst (ReverseBytesI src));
-  effect(DEF dst, USE src);
+
+  // Op cost is artificially doubled to make sure that load or store
+  // instructions are preferred over this one which requires a spill
+  // onto a stack slot.
+  ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
+  format %{ "LDUWA  $src, $dst\t!asi=primary_little" %}
+
+  ins_encode %{
+    __ set($src$$disp + STACK_BIAS, O7);
+    __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
+  ins_pipe( iload_mem );
+%}
+
+instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{
+  match(Set dst (ReverseBytesL src));
 
   // Op cost is artificially doubled to make sure that load or store
   // instructions are preferred over this one which requires a spill
   // onto a stack slot.
   ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
-  format %{ "LDUWA  $src, $dst\t!asi=primary_little" %}
-  opcode(Assembler::lduwa_op3);
-  ins_encode( form3_mem_reg_little(src, dst) );
+  format %{ "LDXA   $src, $dst\t!asi=primary_little" %}
+
+  ins_encode %{
+    __ set($src$$disp + STACK_BIAS, O7);
+    __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
   ins_pipe( iload_mem );
 %}
 
-instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{
-  match(Set dst (ReverseBytesL src));
-  effect(DEF dst, USE src);
+instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{
+  match(Set dst (ReverseBytesUS src));
+
+  // Op cost is artificially doubled to make sure that load or store
+  // instructions are preferred over this one which requires a spill
+  // onto a stack slot.
+  ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
+  format %{ "LDUHA  $src, $dst\t!asi=primary_little\n\t" %}
+
+  ins_encode %{
+    // the value was spilled as an int so bias the load
+    __ set($src$$disp + STACK_BIAS + 2, O7);
+    __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
+  ins_pipe( iload_mem );
+%}
+
+instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{
+  match(Set dst (ReverseBytesS src));
 
   // Op cost is artificially doubled to make sure that load or store
   // instructions are preferred over this one which requires a spill
   // onto a stack slot.
   ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
-  format %{ "LDXA   $src, $dst\t!asi=primary_little" %}
-
-  opcode(Assembler::ldxa_op3);
-  ins_encode( form3_mem_reg_little(src, dst) );
+  format %{ "LDSHA  $src, $dst\t!asi=primary_little\n\t" %}
+
+  ins_encode %{
+    // the value was spilled as an int so bias the load
+    __ set($src$$disp + STACK_BIAS + 2, O7);
+    __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
   ins_pipe( iload_mem );
 %}
 
 // Load Integer reversed byte order
-instruct loadI_reversed(iRegI dst, memory src) %{
+instruct loadI_reversed(iRegI dst, indIndexMemory src) %{
   match(Set dst (ReverseBytesI (LoadI src)));
 
   ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
+  size(4);
   format %{ "LDUWA  $src, $dst\t!asi=primary_little" %}
 
-  opcode(Assembler::lduwa_op3);
-  ins_encode( form3_mem_reg_little( src, dst) );
+  ins_encode %{
+    __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
   ins_pipe(iload_mem);
 %}
 
 // Load Long - aligned and reversed
-instruct loadL_reversed(iRegL dst, memory src) %{
+instruct loadL_reversed(iRegL dst, indIndexMemory src) %{
   match(Set dst (ReverseBytesL (LoadL src)));
 
-  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
-  size(8);
+  ins_cost(MEMORY_REF_COST);
+  size(4);
   format %{ "LDXA   $src, $dst\t!asi=primary_little" %}
 
-  opcode(Assembler::ldxa_op3);
-  ins_encode( form3_mem_reg_little( src, dst ) );
+  ins_encode %{
+    __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+// Load unsigned short / char reversed byte order
+instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{
+  match(Set dst (ReverseBytesUS (LoadUS src)));
+
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+  format %{ "LDUHA  $src, $dst\t!asi=primary_little" %}
+
+  ins_encode %{
+    __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+// Load short reversed byte order
+instruct loadS_reversed(iRegI dst, indIndexMemory src) %{
+  match(Set dst (ReverseBytesS (LoadS src)));
+
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+  format %{ "LDSHA  $src, $dst\t!asi=primary_little" %}
+
+  ins_encode %{
+    __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
+  %}
   ins_pipe(iload_mem);
 %}
 
 // Store Integer reversed byte order
-instruct storeI_reversed(memory dst, iRegI src) %{
+instruct storeI_reversed(indIndexMemory dst, iRegI src) %{
   match(Set dst (StoreI dst (ReverseBytesI src)));
 
   ins_cost(MEMORY_REF_COST);
-  size(8);
+  size(4);
   format %{ "STWA   $src, $dst\t!asi=primary_little" %}
 
-  opcode(Assembler::stwa_op3);
-  ins_encode( form3_mem_reg_little( dst, src) );
+  ins_encode %{
+    __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
+  %}
   ins_pipe(istore_mem_reg);
 %}
 
 // Store Long reversed byte order
-instruct storeL_reversed(memory dst, iRegL src) %{
+instruct storeL_reversed(indIndexMemory dst, iRegL src) %{
   match(Set dst (StoreL dst (ReverseBytesL src)));
 
   ins_cost(MEMORY_REF_COST);
-  size(8);
+  size(4);
   format %{ "STXA   $src, $dst\t!asi=primary_little" %}
 
-  opcode(Assembler::stxa_op3);
-  ins_encode( form3_mem_reg_little( dst, src) );
+  ins_encode %{
+    __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+
+// Store unsighed short/char reversed byte order
+instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{
+  match(Set dst (StoreC dst (ReverseBytesUS src)));
+
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+  format %{ "STHA   $src, $dst\t!asi=primary_little" %}
+
+  ins_encode %{
+    __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+
+// Store short reversed byte order
+instruct storeS_reversed(indIndexMemory dst, iRegI src) %{
+  match(Set dst (StoreC dst (ReverseBytesS src)));
+
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+  format %{ "STHA   $src, $dst\t!asi=primary_little" %}
+
+  ins_encode %{
+    __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
+  %}
   ins_pipe(istore_mem_reg);
 %}
 
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed May 19 10:22:39 2010 -0700
@@ -139,7 +139,7 @@
       __ ld_ptr(parameter_size.as_address(), t);                // get parameter size (in words)
       __ add(t, frame::memory_parameter_word_sp_offset, t);     // add space for save area (in words)
       __ round_to(t, WordsPerLong);                             // make sure it is multiple of 2 (in words)
-      __ sll(t, Interpreter::logStackElementSize(), t);                    // compute number of bytes
+      __ sll(t, Interpreter::logStackElementSize, t);           // compute number of bytes
       __ neg(t);                                                // negate so it can be used with save
       __ save(SP, t, SP);                                       // setup new frame
     }
@@ -191,19 +191,13 @@
       // copy parameters if any
       Label loop;
       __ BIND(loop);
-      // Store tag first.
-      if (TaggedStackInterpreter) {
-        __ ld_ptr(src, 0, tmp);
-        __ add(src, BytesPerWord, src);  // get next
-        __ st_ptr(tmp, dst, Interpreter::tag_offset_in_bytes());
-      }
       // Store parameter value
       __ ld_ptr(src, 0, tmp);
       __ add(src, BytesPerWord, src);
-      __ st_ptr(tmp, dst, Interpreter::value_offset_in_bytes());
+      __ st_ptr(tmp, dst, 0);
       __ deccc(cnt);
       __ br(Assembler::greater, false, Assembler::pt, loop);
-      __ delayed()->sub(dst, Interpreter::stackElementSize(), dst);
+      __ delayed()->sub(dst, Interpreter::stackElementSize, dst);
 
       // done
       __ BIND(exit);
@@ -220,7 +214,7 @@
     // setup parameters
     const Register t = G3_scratch;
     __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
-    __ sll(t, Interpreter::logStackElementSize(), t);            // compute number of bytes
+    __ sll(t, Interpreter::logStackElementSize, t);    // compute number of bytes
     __ sub(FP, t, Gargs);                              // setup parameter pointer
 #ifdef _LP64
     __ add( Gargs, STACK_BIAS, Gargs );                // Account for LP64 stack bias
@@ -1148,7 +1142,7 @@
       __ andn(from, 7, from);     // Align address
       __ ldx(from, 0, O3);
       __ inc(from, 8);
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_loop);
       __ ldx(from, 0, O4);
       __ deccc(count, count_dec); // Can we do next iteration after this one?
@@ -1220,7 +1214,7 @@
     //
       __ andn(end_from, 7, end_from);     // Align address
       __ ldx(end_from, 0, O3);
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_loop);
       __ ldx(end_from, -8, O4);
       __ deccc(count, count_dec); // Can we do next iteration after this one?
@@ -1349,7 +1343,7 @@
     __ BIND(L_copy_byte);
       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
       __ delayed()->nop();
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_byte_loop);
       __ ldub(from, offset, O3);
       __ deccc(count);
@@ -1445,7 +1439,7 @@
                                         L_aligned_copy, L_copy_byte);
     }
     // copy 4 elements (16 bytes) at a time
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_aligned_copy);
       __ dec(end_from, 16);
       __ ldx(end_from, 8, O3);
@@ -1461,7 +1455,7 @@
     __ BIND(L_copy_byte);
       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
       __ delayed()->nop();
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_byte_loop);
       __ dec(end_from);
       __ dec(end_to);
@@ -1577,7 +1571,7 @@
     __ BIND(L_copy_2_bytes);
       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
       __ delayed()->nop();
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_2_bytes_loop);
       __ lduh(from, offset, O3);
       __ deccc(count);
@@ -1684,7 +1678,7 @@
                                         L_aligned_copy, L_copy_2_bytes);
     }
     // copy 4 elements (16 bytes) at a time
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_aligned_copy);
       __ dec(end_from, 16);
       __ ldx(end_from, 8, O3);
@@ -1781,7 +1775,7 @@
     // copy with shift 4 elements (16 bytes) at a time
       __ dec(count, 4);   // The cmp at the beginning guaranty count >= 4
 
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_16_bytes);
       __ ldx(from, 4, O4);
       __ deccc(count, 4); // Can we do next iteration after this one?
@@ -1907,7 +1901,7 @@
     // to form 2 aligned 8-bytes chunks to store.
     //
       __ ldx(end_from, -4, O3);
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_16_bytes);
       __ ldx(end_from, -12, O4);
       __ deccc(count, 4);
@@ -1929,7 +1923,7 @@
       __ delayed()->inc(count, 4);
 
     // copy 4 elements (16 bytes) at a time
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_aligned_copy);
       __ dec(end_from, 16);
       __ ldx(end_from, 8, O3);
@@ -2000,6 +1994,27 @@
   //      to:    O1
   //      count: O2 treated as signed
   //
+  // count -= 2;
+  // if ( count >= 0 ) { // >= 2 elements
+  //   if ( count > 6) { // >= 8 elements
+  //     count -= 6; // original count - 8
+  //     do {
+  //       copy_8_elements;
+  //       count -= 8;
+  //     } while ( count >= 0 );
+  //     count += 6;
+  //   }
+  //   if ( count >= 0 ) { // >= 2 elements
+  //     do {
+  //       copy_2_elements;
+  //     } while ( (count=count-2) >= 0 );
+  //   }
+  // }
+  // count += 2;
+  // if ( count != 0 ) { // 1 element left
+  //   copy_1_element;
+  // }
+  //
   void generate_disjoint_long_copy_core(bool aligned) {
     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
     const Register from    = O0;  // source array address
@@ -2012,7 +2027,39 @@
       __ mov(G0, offset0);   // offset from start of arrays (0)
       __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
       __ delayed()->add(offset0, 8, offset8);
-      __ align(16);
+
+    // Copy by 64 bytes chunks
+    Label L_copy_64_bytes;
+    const Register from64 = O3;  // source address
+    const Register to64   = G3;  // destination address
+      __ subcc(count, 6, O3);
+      __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
+      __ delayed()->mov(to,   to64);
+      // Now we can use O4(offset0), O5(offset8) as temps
+      __ mov(O3, count);
+      __ mov(from, from64);
+
+      __ align(OptoLoopAlignment);
+    __ BIND(L_copy_64_bytes);
+      for( int off = 0; off < 64; off += 16 ) {
+        __ ldx(from64,  off+0, O4);
+        __ ldx(from64,  off+8, O5);
+        __ stx(O4, to64,  off+0);
+        __ stx(O5, to64,  off+8);
+      }
+      __ deccc(count, 8);
+      __ inc(from64, 64);
+      __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_64_bytes);
+      __ delayed()->inc(to64, 64);
+
+      // Restore O4(offset0), O5(offset8)
+      __ sub(from64, from, offset0);
+      __ inccc(count, 6);
+      __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
+      __ delayed()->add(offset0, 8, offset8);
+
+      // Copy by 16 bytes chunks
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_16_bytes);
       __ ldx(from, offset0, O3);
       __ ldx(from, offset8, G3);
@@ -2023,6 +2070,7 @@
       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
       __ delayed()->inc(offset8, 16);
 
+      // Copy last 8 bytes
     __ BIND(L_copy_8_bytes);
       __ inccc(count, 2);
       __ brx(Assembler::zero, true, Assembler::pn, L_exit );
@@ -2085,7 +2133,7 @@
       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
       __ delayed()->sllx(count, LogBytesPerLong, offset8);
       __ sub(offset8, 8, offset0);
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_16_bytes);
       __ ldx(from, offset8, O2);
       __ ldx(from, offset0, O3);
@@ -2351,7 +2399,7 @@
     //   (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
     //   (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
     //   G3, G4, G5 --- current oop, oop.klass, oop.klass.super
-    __ align(16);
+    __ align(OptoLoopAlignment);
 
     __ BIND(store_element);
     __ deccc(G1_remain);                // decrement the count
@@ -2863,6 +2911,16 @@
     // arraycopy stubs used by compilers
     generate_arraycopy_stubs();
 
+    // generic method handle stubs
+    if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
+      for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
+           ek < MethodHandles::_EK_LIMIT;
+           ek = MethodHandles::EntryKind(1 + (int)ek)) {
+        StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
+        MethodHandles::generate_method_handle_stub(_masm, ek);
+      }
+    }
+
     // Don't initialize the platform math functions since sparc
     // doesn't have intrinsics for these operations.
   }
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed May 19 10:22:39 2010 -0700
@@ -151,8 +151,10 @@
 
 
 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
+  TosState incoming_state = state;
+
+  Label cont;
   address compiled_entry = __ pc();
-  Label cont;
 
   address entry = __ pc();
 #if !defined(_LP64) && defined(COMPILER2)
@@ -165,12 +167,11 @@
   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
 
-  if( state == ltos ) {
-    __ srl (G1, 0,O1);
-    __ srlx(G1,32,O0);
+  if (incoming_state == ltos) {
+    __ srl (G1,  0, O1);
+    __ srlx(G1, 32, O0);
   }
-#endif /* !_LP64 && COMPILER2 */
-
+#endif // !_LP64 && COMPILER2
 
   __ bind(cont);
 
@@ -182,17 +183,32 @@
 
   __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
 
-
+  Label L_got_cache, L_giant_index;
   const Register cache = G3_scratch;
   const Register size  = G1_scratch;
+  if (EnableInvokeDynamic) {
+    __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode.
+    __ cmp(G1_scratch, Bytecodes::_invokedynamic);
+    __ br(Assembler::equal, false, Assembler::pn, L_giant_index);
+    __ delayed()->nop();
+  }
   __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
+  __ bind(L_got_cache);
   __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
                    ConstantPoolCacheEntry::flags_offset(), size);
   __ and3(size, 0xFF, size);                   // argument size in words
-  __ sll(size, Interpreter::logStackElementSize(), size); // each argument size in bytes
+  __ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes
   __ add(Lesp, size, Lesp);                    // pop arguments
   __ dispatch_next(state, step);
 
+  // out of the main line of code...
+  if (EnableInvokeDynamic) {
+    __ bind(L_giant_index);
+    __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, true);
+    __ ba(false, L_got_cache);
+    __ delayed()->nop();
+  }
+
   return entry;
 }
 
@@ -479,7 +495,7 @@
   // Set the saved SP after the register window save
   //
   assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
-  __ sll(Glocals_size, Interpreter::logStackElementSize(), Otmp1);
+  __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
   __ add(Gargs, Otmp1, Gargs);
 
   if (native_call) {
@@ -495,7 +511,7 @@
     __ lduh( size_of_locals, Otmp1 );
     __ sub( Otmp1, Glocals_size, Glocals_size );
     __ round_to( Glocals_size, WordsPerLong );
-    __ sll( Glocals_size, Interpreter::logStackElementSize(), Glocals_size );
+    __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
 
     // see if the frame is greater than one page in size. If so,
     // then we need to verify there is enough stack space remaining
@@ -503,7 +519,7 @@
     __ lduh( max_stack, Gframe_size );
     __ add( Gframe_size, extra_space, Gframe_size );
     __ round_to( Gframe_size, WordsPerLong );
-    __ sll( Gframe_size, Interpreter::logStackElementSize(), Gframe_size);
+    __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
 
     // Add in java locals size for stack overflow check only
     __ add( Gframe_size, Glocals_size, Gframe_size );
@@ -1218,8 +1234,8 @@
   // be updated!
   __ lduh( size_of_locals, O2 );
   __ lduh( size_of_parameters, O1 );
-  __ sll( O2, Interpreter::logStackElementSize(), O2);
-  __ sll( O1, Interpreter::logStackElementSize(), O1 );
+  __ sll( O2, Interpreter::logStackElementSize, O2);
+  __ sll( O1, Interpreter::logStackElementSize, O1 );
   __ sub( Llocals, O2, O2 );
   __ sub( Llocals, O1, O1 );
 
@@ -1454,8 +1470,8 @@
        round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
   // callee_locals and max_stack are counts, not the size in frame.
   const int locals_size =
-       round_to(callee_extra_locals * Interpreter::stackElementWords(), WordsPerLong);
-  const int max_stack_words = max_stack * Interpreter::stackElementWords();
+       round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
+  const int max_stack_words = max_stack * Interpreter::stackElementWords;
   return (round_to((max_stack_words
                    //6815692//+ methodOopDesc::extra_stack_words()
                    + rounded_vm_local_words
@@ -1554,11 +1570,11 @@
 
     // preallocate stack space
     intptr_t*  esp = monitors - 1 -
-                     (tempcount * Interpreter::stackElementWords()) -
+                     (tempcount * Interpreter::stackElementWords) -
                      popframe_extra_args;
 
-    int local_words = method->max_locals() * Interpreter::stackElementWords();
-    int parm_words  = method->size_of_parameters() * Interpreter::stackElementWords();
+    int local_words = method->max_locals() * Interpreter::stackElementWords;
+    int parm_words  = method->size_of_parameters() * Interpreter::stackElementWords;
     NEEDS_CLEANUP;
     intptr_t* locals;
     if (caller->is_interpreted_frame()) {
@@ -1646,7 +1662,7 @@
     BasicObjectLock* mp = (BasicObjectLock*)monitors;
 
     assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
-    assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize())+Interpreter::value_offset_in_bytes()), "locals match");
+    assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
     assert(interpreter_frame->interpreter_frame_monitor_end()   == mp, "monitor_end matches");
     assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
     assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
@@ -1742,7 +1758,7 @@
 
     // Compute size of arguments for saving when returning to deoptimized caller
     __ lduh(Lmethod, in_bytes(methodOopDesc::size_of_parameters_offset()), Gtmp1);
-    __ sll(Gtmp1, Interpreter::logStackElementSize(), Gtmp1);
+    __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
     __ sub(Llocals, Gtmp1, Gtmp2);
     __ add(Gtmp2, wordSize, Gtmp2);
     // Save these arguments
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,8 @@
   // fail with a guarantee ("not enough space for interpreter generation");
   // if too small.
   // Run with +PrintInterpreter to get the VM to print out the size.
-  // Max size with JVMTI and TaggedStackInterpreter
+  // Max size with JVMTI
+
 #ifdef _LP64
   // The sethi() instruction generates lots more instructions when shell
   // stack limit is unlimited, so that's why this is much bigger.
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -580,7 +580,6 @@
 
 void TemplateTable::iload(int n) {
   transition(vtos, itos);
-  debug_only(__ verify_local_tag(frame::TagValue, Llocals, Otos_i, n));
   __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
 }
 
@@ -588,7 +587,6 @@
 void TemplateTable::lload(int n) {
   transition(vtos, ltos);
   assert(n+1 < Argument::n_register_parameters, "would need more code");
-  debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, Otos_l, n));
   __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
 }
 
@@ -596,7 +594,6 @@
 void TemplateTable::fload(int n) {
   transition(vtos, ftos);
   assert(n < Argument::n_register_parameters, "would need more code");
-  debug_only(__ verify_local_tag(frame::TagValue, Llocals, G3_scratch, n));
   __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n),     Ftos_f );
 }
 
@@ -604,14 +601,12 @@
 void TemplateTable::dload(int n) {
   transition(vtos, dtos);
   FloatRegister dst = Ftos_d;
-  debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, G3_scratch, n));
   __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
 }
 
 
 void TemplateTable::aload(int n) {
   transition(vtos, atos);
-  debug_only(__ verify_local_tag(frame::TagReference, Llocals, Otos_i, n));
   __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
 }
 
@@ -707,12 +702,11 @@
 
 void TemplateTable::astore() {
   transition(vtos, vtos);
-  // astore tos can also be a returnAddress, so load and store the tag too
-  __ load_ptr_and_tag(0, Otos_i, Otos_l2);
-  __ inc(Lesp, Interpreter::stackElementSize());
+  __ load_ptr(0, Otos_i);
+  __ inc(Lesp, Interpreter::stackElementSize);
   __ verify_oop_or_return_address(Otos_i, G3_scratch);
   locals_index(G3_scratch);
-  __ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
+  __ store_local_ptr(G3_scratch, Otos_i);
 }
 
 
@@ -750,12 +744,11 @@
 
 void TemplateTable::wide_astore() {
   transition(vtos, vtos);
-  // astore tos can also be a returnAddress, so load and store the tag too
-  __ load_ptr_and_tag(0, Otos_i, Otos_l2);
-  __ inc(Lesp, Interpreter::stackElementSize());
+  __ load_ptr(0, Otos_i);
+  __ inc(Lesp, Interpreter::stackElementSize);
   __ verify_oop_or_return_address(Otos_i, G3_scratch);
   locals_index_wide(G3_scratch);
-  __ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
+  __ store_local_ptr(G3_scratch, Otos_i);
 }
 
 
@@ -845,13 +838,13 @@
   do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
 
   __ ba(false,done);
-  __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
+  __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
 
   __ bind(is_null);
   do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
 
   __ profile_null_seen(G3_scratch);
-  __ inc(Lesp, 3* Interpreter::stackElementSize());     // adj sp (pops array, index and value)
+  __ inc(Lesp, 3* Interpreter::stackElementSize);     // adj sp (pops array, index and value)
   __ bind(done);
 }
 
@@ -884,7 +877,6 @@
 
 void TemplateTable::istore(int n) {
   transition(itos, vtos);
-  __ tag_local(frame::TagValue, Llocals, Otos_i, n);
   __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
 }
 
@@ -892,7 +884,6 @@
 void TemplateTable::lstore(int n) {
   transition(ltos, vtos);
   assert(n+1 < Argument::n_register_parameters, "only handle register cases");
-  __ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
   __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
 
 }
@@ -901,7 +892,6 @@
 void TemplateTable::fstore(int n) {
   transition(ftos, vtos);
   assert(n < Argument::n_register_parameters, "only handle register cases");
-  __ tag_local(frame::TagValue, Llocals, Otos_l, n);
   __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
 }
 
@@ -909,30 +899,28 @@
 void TemplateTable::dstore(int n) {
   transition(dtos, vtos);
   FloatRegister src = Ftos_d;
-  __ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
   __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
 }
 
 
 void TemplateTable::astore(int n) {
   transition(vtos, vtos);
-  // astore tos can also be a returnAddress, so load and store the tag too
-  __ load_ptr_and_tag(0, Otos_i, Otos_l2);
-  __ inc(Lesp, Interpreter::stackElementSize());
+  __ load_ptr(0, Otos_i);
+  __ inc(Lesp, Interpreter::stackElementSize);
   __ verify_oop_or_return_address(Otos_i, G3_scratch);
-  __ store_local_ptr( n, Otos_i, Otos_l2 );
+  __ store_local_ptr(n, Otos_i);
 }
 
 
 void TemplateTable::pop() {
   transition(vtos, vtos);
-  __ inc(Lesp, Interpreter::stackElementSize());
+  __ inc(Lesp, Interpreter::stackElementSize);
 }
 
 
 void TemplateTable::pop2() {
   transition(vtos, vtos);
-  __ inc(Lesp, 2 * Interpreter::stackElementSize());
+  __ inc(Lesp, 2 * Interpreter::stackElementSize);
 }
 
 
@@ -940,8 +928,8 @@
   transition(vtos, vtos);
   // stack: ..., a
   // load a and tag
-  __ load_ptr_and_tag(0, Otos_i, Otos_l2);
-  __ push_ptr(Otos_i, Otos_l2);
+  __ load_ptr(0, Otos_i);
+  __ push_ptr(Otos_i);
   // stack: ..., a, a
 }
 
@@ -949,11 +937,11 @@
 void TemplateTable::dup_x1() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(1, G3_scratch, G4_scratch);   // get a
-  __ load_ptr_and_tag(0, Otos_l1, Otos_l2);         // get b
-  __ store_ptr_and_tag(1, Otos_l1, Otos_l2);        // put b
-  __ store_ptr_and_tag(0, G3_scratch, G4_scratch);  // put a - like swap
-  __ push_ptr(Otos_l1, Otos_l2);                    // push b
+  __ load_ptr( 1, G3_scratch);  // get a
+  __ load_ptr( 0, Otos_l1);     // get b
+  __ store_ptr(1, Otos_l1);     // put b
+  __ store_ptr(0, G3_scratch);  // put a - like swap
+  __ push_ptr(Otos_l1);         // push b
   // stack: ..., b, a, b
 }
 
@@ -962,27 +950,27 @@
   transition(vtos, vtos);
   // stack: ..., a, b, c
   // get c and push on stack, reuse registers
-  __ load_ptr_and_tag(0, G3_scratch, G4_scratch);     // get c
-  __ push_ptr(G3_scratch, G4_scratch);               // push c with tag
+  __ load_ptr( 0, G3_scratch);  // get c
+  __ push_ptr(G3_scratch);      // push c with tag
   // stack: ..., a, b, c, c  (c in reg)  (Lesp - 4)
   // (stack offsets n+1 now)
-  __ load_ptr_and_tag(3, Otos_l1, Otos_l2);          // get a
-  __ store_ptr_and_tag(3, G3_scratch, G4_scratch);   // put c at 3
+  __ load_ptr( 3, Otos_l1);     // get a
+  __ store_ptr(3, G3_scratch);  // put c at 3
   // stack: ..., c, b, c, c  (a in reg)
-  __ load_ptr_and_tag(2, G3_scratch, G4_scratch);    // get b
-  __ store_ptr_and_tag(2, Otos_l1, Otos_l2);         // put a at 2
+  __ load_ptr( 2, G3_scratch);  // get b
+  __ store_ptr(2, Otos_l1);     // put a at 2
   // stack: ..., c, a, c, c  (b in reg)
-  __ store_ptr_and_tag(1, G3_scratch, G4_scratch);   // put b at 1
+  __ store_ptr(1, G3_scratch);  // put b at 1
   // stack: ..., c, a, b, c
 }
 
 
 void TemplateTable::dup2() {
   transition(vtos, vtos);
-  __ load_ptr_and_tag(1, G3_scratch, G4_scratch);     // get a
-  __ load_ptr_and_tag(0, Otos_l1, Otos_l2);           // get b
-  __ push_ptr(G3_scratch, G4_scratch);                // push a
-  __ push_ptr(Otos_l1, Otos_l2);                      // push b
+  __ load_ptr(1, G3_scratch);  // get a
+  __ load_ptr(0, Otos_l1);     // get b
+  __ push_ptr(G3_scratch);     // push a
+  __ push_ptr(Otos_l1);        // push b
   // stack: ..., a, b, a, b
 }
 
@@ -990,17 +978,17 @@
 void TemplateTable::dup2_x1() {
   transition(vtos, vtos);
   // stack: ..., a, b, c
-  __ load_ptr_and_tag(1, Lscratch, G1_scratch);       // get b
-  __ load_ptr_and_tag(2, Otos_l1, Otos_l2);           // get a
-  __ store_ptr_and_tag(2, Lscratch, G1_scratch);      // put b at a
+  __ load_ptr( 1, Lscratch);    // get b
+  __ load_ptr( 2, Otos_l1);     // get a
+  __ store_ptr(2, Lscratch);    // put b at a
   // stack: ..., b, b, c
-  __ load_ptr_and_tag(0, G3_scratch, G4_scratch);     // get c
-  __ store_ptr_and_tag(1, G3_scratch, G4_scratch);    // put c at b
+  __ load_ptr( 0, G3_scratch);  // get c
+  __ store_ptr(1, G3_scratch);  // put c at b
   // stack: ..., b, c, c
-  __ store_ptr_and_tag(0, Otos_l1, Otos_l2);          // put a at c
+  __ store_ptr(0, Otos_l1);     // put a at c
   // stack: ..., b, c, a
-  __ push_ptr(Lscratch, G1_scratch);                  // push b
-  __ push_ptr(G3_scratch, G4_scratch);                // push c
+  __ push_ptr(Lscratch);        // push b
+  __ push_ptr(G3_scratch);      // push c
   // stack: ..., b, c, a, b, c
 }
 
@@ -1010,18 +998,18 @@
 void TemplateTable::dup2_x2() {
   transition(vtos, vtos);
   // stack: ..., a, b, c, d
-  __ load_ptr_and_tag(1, Lscratch, G1_scratch);       // get c
-  __ load_ptr_and_tag(3, Otos_l1, Otos_l2);           // get a
-  __ store_ptr_and_tag(3, Lscratch, G1_scratch);      // put c at 3
-  __ store_ptr_and_tag(1, Otos_l1, Otos_l2);          // put a at 1
+  __ load_ptr( 1, Lscratch);    // get c
+  __ load_ptr( 3, Otos_l1);     // get a
+  __ store_ptr(3, Lscratch);    // put c at 3
+  __ store_ptr(1, Otos_l1);     // put a at 1
   // stack: ..., c, b, a, d
-  __ load_ptr_and_tag(2, G3_scratch, G4_scratch);     // get b
-  __ load_ptr_and_tag(0, Otos_l1, Otos_l2);           // get d
-  __ store_ptr_and_tag(0, G3_scratch, G4_scratch);    // put b at 0
-  __ store_ptr_and_tag(2, Otos_l1, Otos_l2);          // put d at 2
+  __ load_ptr( 2, G3_scratch);  // get b
+  __ load_ptr( 0, Otos_l1);     // get d
+  __ store_ptr(0, G3_scratch);  // put b at 0
+  __ store_ptr(2, Otos_l1);     // put d at 2
   // stack: ..., c, d, a, b
-  __ push_ptr(Lscratch, G1_scratch);                  // push c
-  __ push_ptr(Otos_l1, Otos_l2);                      // push d
+  __ push_ptr(Lscratch);        // push c
+  __ push_ptr(Otos_l1);         // push d
   // stack: ..., c, d, a, b, c, d
 }
 
@@ -1029,10 +1017,10 @@
 void TemplateTable::swap() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(1, G3_scratch, G4_scratch);     // get a
-  __ load_ptr_and_tag(0, Otos_l1, Otos_l2);           // get b
-  __ store_ptr_and_tag(0, G3_scratch, G4_scratch);    // put b
-  __ store_ptr_and_tag(1, Otos_l1, Otos_l2);          // put a
+  __ load_ptr( 1, G3_scratch);  // get a
+  __ load_ptr( 0, Otos_l1);     // get b
+  __ store_ptr(0, G3_scratch);  // put b
+  __ store_ptr(1, Otos_l1);     // put a
   // stack: ..., b, a
 }
 
@@ -1045,9 +1033,9 @@
    case  sub:  __  sub(O1, Otos_i, Otos_i);  break;
      // %%%%% Mul may not exist: better to call .mul?
    case  mul:  __ smul(O1, Otos_i, Otos_i);  break;
-   case _and:  __  and3(O1, Otos_i, Otos_i);  break;
-   case  _or:  __   or3(O1, Otos_i, Otos_i);  break;
-   case _xor:  __  xor3(O1, Otos_i, Otos_i);  break;
+   case _and:  __ and3(O1, Otos_i, Otos_i);  break;
+   case  _or:  __  or3(O1, Otos_i, Otos_i);  break;
+   case _xor:  __ xor3(O1, Otos_i, Otos_i);  break;
    case  shl:  __  sll(O1, Otos_i, Otos_i);  break;
    case  shr:  __  sra(O1, Otos_i, Otos_i);  break;
    case ushr:  __  srl(O1, Otos_i, Otos_i);  break;
@@ -1061,17 +1049,17 @@
   __ pop_l(O2);
   switch (op) {
 #ifdef _LP64
-   case  add:  __ add(O2, Otos_l, Otos_l);  break;
-   case  sub:  __ sub(O2, Otos_l, Otos_l);  break;
-   case _and:  __ and3( O2, Otos_l, Otos_l);  break;
-   case  _or:  __  or3( O2, Otos_l, Otos_l);  break;
-   case _xor:  __ xor3( O2, Otos_l, Otos_l);  break;
+   case  add:  __  add(O2, Otos_l, Otos_l);  break;
+   case  sub:  __  sub(O2, Otos_l, Otos_l);  break;
+   case _and:  __ and3(O2, Otos_l, Otos_l);  break;
+   case  _or:  __  or3(O2, Otos_l, Otos_l);  break;
+   case _xor:  __ xor3(O2, Otos_l, Otos_l);  break;
 #else
    case  add:  __ addcc(O3, Otos_l2, Otos_l2);  __ addc(O2, Otos_l1, Otos_l1);  break;
    case  sub:  __ subcc(O3, Otos_l2, Otos_l2);  __ subc(O2, Otos_l1, Otos_l1);  break;
-   case _and:  __ and3(  O3, Otos_l2, Otos_l2);  __ and3( O2, Otos_l1, Otos_l1);  break;
-   case  _or:  __  or3(  O3, Otos_l2, Otos_l2);  __  or3( O2, Otos_l1, Otos_l1);  break;
-   case _xor:  __ xor3(  O3, Otos_l2, Otos_l2);  __ xor3( O2, Otos_l1, Otos_l1);  break;
+   case _and:  __  and3(O3, Otos_l2, Otos_l2);  __ and3(O2, Otos_l1, Otos_l1);  break;
+   case  _or:  __   or3(O3, Otos_l2, Otos_l2);  __  or3(O2, Otos_l1, Otos_l1);  break;
+   case _xor:  __  xor3(O3, Otos_l2, Otos_l2);  __ xor3(O2, Otos_l1, Otos_l1);  break;
 #endif
    default: ShouldNotReachHere();
   }
@@ -1307,7 +1295,7 @@
   __ ldsb(Lbcp, 2, O2);  // load constant
   __ access_local_int(G3_scratch, Otos_i);
   __ add(Otos_i, O2, Otos_i);
-  __ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes());    // access_local_int puts E.A. in G3_scratch
+  __ st(Otos_i, G3_scratch, 0);    // access_local_int puts E.A. in G3_scratch
 }
 
 
@@ -1317,7 +1305,7 @@
   __ get_2_byte_integer_at_bcp( 4,  O2, O3, InterpreterMacroAssembler::Signed);
   __ access_local_int(G3_scratch, Otos_i);
   __ add(Otos_i, O3, Otos_i);
-  __ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes());    // access_local_int puts E.A. in G3_scratch
+  __ st(Otos_i, G3_scratch, 0);    // access_local_int puts E.A. in G3_scratch
 }
 
 
@@ -1555,7 +1543,7 @@
     // Bump Lbcp to target of JSR
     __ add(Lbcp, O1_disp, Lbcp);
     // Push returnAddress for "ret" on stack
-    __ push_ptr(Otos_i, G0); // push ptr sized thing plus 0 for tag.
+    __ push_ptr(Otos_i);
     // And away we go!
     __ dispatch_next(vtos);
     return;
@@ -1963,19 +1951,30 @@
 // ----------------------------------------------------------------------------
 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
   assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
+  bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
+
   // Depends on cpCacheOop layout!
   const int shift_count = (1 + byte_no)*BitsPerByte;
   Label resolved;
 
-  __ get_cache_and_index_at_bcp(Rcache, index, 1);
-  __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
-                    ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
-
-  __ srl(  Lbyte_code, shift_count, Lbyte_code );
-  __ and3( Lbyte_code,        0xFF, Lbyte_code );
-  __ cmp(  Lbyte_code, (int)bytecode());
-  __ br(   Assembler::equal, false, Assembler::pt, resolved);
-  __ delayed()->set((int)bytecode(), O1);
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
+  if (is_invokedynamic) {
+    // We are resolved if the f1 field contains a non-null CallSite object.
+    __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
+              ConstantPoolCacheEntry::f1_offset(), Lbyte_code);
+    __ tst(Lbyte_code);
+    __ br(Assembler::notEqual, false, Assembler::pt, resolved);
+    __ delayed()->set((int)bytecode(), O1);
+  } else {
+    __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
+              ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
+
+    __ srl(  Lbyte_code, shift_count, Lbyte_code );
+    __ and3( Lbyte_code,        0xFF, Lbyte_code );
+    __ cmp(  Lbyte_code, (int)bytecode());
+    __ br(   Assembler::equal, false, Assembler::pt, resolved);
+    __ delayed()->set((int)bytecode(), O1);
+  }
 
   address entry;
   switch (bytecode()) {
@@ -1987,12 +1986,13 @@
     case Bytecodes::_invokespecial  : // fall through
     case Bytecodes::_invokestatic   : // fall through
     case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);  break;
+    case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);  break;
     default                         : ShouldNotReachHere();                                 break;
   }
   // first time invocation - must resolve first
   __ call_VM(noreg, entry, O1);
   // Update registers with resolved info
-  __ get_cache_and_index_at_bcp(Rcache, index, 1);
+  __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
   __ bind(resolved);
 }
 
@@ -2742,7 +2742,7 @@
   Register Rflags  = G4_scratch;
   Register Rreceiver = Lscratch;
 
-  __ ld_ptr(Llocals, Interpreter::value_offset_in_bytes(), Rreceiver);
+  __ ld_ptr(Llocals, 0, Rreceiver);
 
   // access constant pool cache  (is resolved)
   __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
@@ -3130,7 +3130,42 @@
     return;
   }
 
-  __ stop("invokedynamic NYI");//6815692//
+  // G5: CallSite object (f1)
+  // XX: unused (f2)
+  // G3: receiver address
+  // XX: flags (unused)
+
+  Register G5_callsite = G5_method;
+  Register Rscratch    = G3_scratch;
+  Register Rtemp       = G1_scratch;
+  Register Rret        = Lscratch;
+
+  load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, false);
+  __ mov(SP, O5_savedSP);  // record SP that we wanted the callee to restore
+
+  __ verify_oop(G5_callsite);
+
+  // profile this call
+  __ profile_call(O4);
+
+  // get return address
+  AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
+  __ set(table, Rtemp);
+  __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret);  // get return type
+  // Make sure we don't need to mask Rret for tosBits after the above shift
+  ConstantPoolCacheEntry::verify_tosBits();
+  __ sll(Rret, LogBytesPerWord, Rret);
+  __ ld_ptr(Rtemp, Rret, Rret);  // get return address
+
+  __ ld_ptr(G5_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
+  __ null_check(G3_method_handle);
+
+  // Adjust Rret first so Llast_SP can be same as Rret
+  __ add(Rret, -frame::pc_return_offset, O7);
+  __ add(Lesp, BytesPerWord, Gargs);  // setup parameter pointer
+  __ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false);
+  // Record SP so we can remove any stack space allocated by adapter transition
+  __ delayed()->mov(SP, Llast_SP);
 }
 
 
@@ -3649,7 +3684,7 @@
   transition(vtos, atos);
      // put ndims * wordSize into Lscratch
   __ ldub( Lbcp,     3,               Lscratch);
-  __ sll(  Lscratch, Interpreter::logStackElementSize(), Lscratch);
+  __ sll(  Lscratch, Interpreter::logStackElementSize, Lscratch);
      // Lesp points past last_dim, so set to O1 to first_dim address
   __ add(  Lesp,     Lscratch,        O1);
      call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed May 19 10:22:39 2010 -0700
@@ -86,14 +86,24 @@
     if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) {
       FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
     }
+    if (is_niagara1_plus()) {
+      if (AllocatePrefetchStyle > 0 && FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
+        // Use BIS instruction for allocation prefetch.
+        FLAG_SET_DEFAULT(AllocatePrefetchStyle, 3);
+        if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
+          // Use smaller prefetch distance on N2 with BIS
+          FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64);
+        }
+      }
+      if (AllocatePrefetchStyle != 3 && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
+        // Use different prefetch distance without BIS
+        FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
+      }
+    }
+#endif
     if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
       FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
     }
-    if (is_niagara1_plus() && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
-      // Use smaller prefetch distance on N2
-      FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
-    }
-#endif
   }
 
   // Use hardware population count instruction if available.
--- a/src/cpu/x86/vm/assembler_x86.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3365,6 +3365,13 @@
 
 #else // LP64
 
+void Assembler::set_byte_if_not_zero(Register dst) {
+  int enc = prefix_and_encode(dst->encoding(), true);
+  emit_byte(0x0F);
+  emit_byte(0x95);
+  emit_byte(0xE0 | enc);
+}
+
 // 64bit only pieces of the assembler
 // This should only be used by 64bit instructions that can use rip-relative
 // it cannot be used by instructions that want an immediate value.
@@ -6485,24 +6492,19 @@
 }
 
 void MacroAssembler::load_sized_value(Register dst, Address src,
-                                      int size_in_bytes, bool is_signed) {
-  switch (size_in_bytes ^ (is_signed ? -1 : 0)) {
+                                      size_t size_in_bytes, bool is_signed) {
+  switch (size_in_bytes) {
 #ifndef _LP64
   // For case 8, caller is responsible for manually loading
   // the second word into another register.
-  case ~8:  // fall through:
-  case  8:  movl(                dst, src ); break;
+  case  8: movl(dst, src); break;
 #else
-  case ~8:  // fall through:
-  case  8:  movq(                dst, src ); break;
+  case  8: movq(dst, src); break;
 #endif
-  case ~4:  // fall through:
-  case  4:  movl(                dst, src ); break;
-  case ~2:  load_signed_short(   dst, src ); break;
-  case  2:  load_unsigned_short( dst, src ); break;
-  case ~1:  load_signed_byte(    dst, src ); break;
-  case  1:  load_unsigned_byte(  dst, src ); break;
-  default:  ShouldNotReachHere();
+  case  4: movl(dst, src); break;
+  case  2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
+  case  1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
+  default: ShouldNotReachHere();
   }
 }
 
@@ -7699,6 +7701,7 @@
 // method handle's MethodType.  This macro hides the distinction.
 void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
                                                 Register temp_reg) {
+  assert_different_registers(vmslots_reg, mh_reg, temp_reg);
   if (UseCompressedOops)  unimplemented();  // field accesses must decode
   // load mh.type.form.vmslots
   if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
@@ -7737,7 +7740,7 @@
 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
                                          int extra_slot_offset) {
   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
-  int stackElementSize = Interpreter::stackElementSize();
+  int stackElementSize = Interpreter::stackElementSize;
   int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
 #ifdef ASSERT
   int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
@@ -7968,7 +7971,7 @@
       case 2: return "special";
       case 3: return "empty";
     }
-    ShouldNotReachHere()
+    ShouldNotReachHere();
     return NULL;
   }
 
--- a/src/cpu/x86/vm/assembler_x86.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1511,7 +1511,7 @@
   void extend_sign(Register hi, Register lo);
 
   // Loading values by size and signed-ness
-  void load_sized_value(Register dst, Address src, int size_in_bytes, bool is_signed);
+  void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
 
   // Support for inc/dec with optimal instruction selection depending on value
 
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed May 19 10:22:39 2010 -0700
@@ -455,6 +455,60 @@
 }
 
 
+// Emit the code to remove the frame from the stack in the exception
+// unwind path.
+int LIR_Assembler::emit_unwind_handler() {
+#ifndef PRODUCT
+  if (CommentedAssembly) {
+    _masm->block_comment("Unwind handler");
+  }
+#endif
+
+  int offset = code_offset();
+
+  // Fetch the exception from TLS and clear out exception related thread state
+  __ get_thread(rsi);
+  __ movptr(rax, Address(rsi, JavaThread::exception_oop_offset()));
+  __ movptr(Address(rsi, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
+  __ movptr(Address(rsi, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
+
+  __ bind(_unwind_handler_entry);
+  __ verify_not_null_oop(rax);
+  if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
+    __ mov(rsi, rax);  // Preserve the exception
+  }
+
+  // Preform needed unlocking
+  MonitorExitStub* stub = NULL;
+  if (method()->is_synchronized()) {
+    monitor_address(0, FrameMap::rax_opr);
+    stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
+    __ unlock_object(rdi, rbx, rax, *stub->entry());
+    __ bind(*stub->continuation());
+  }
+
+  if (compilation()->env()->dtrace_method_probes()) {
+    __ movoop(Address(rsp, 0), method()->constant_encoding());
+    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
+  }
+
+  if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
+    __ mov(rax, rsi);  // Restore the exception
+  }
+
+  // remove the activation and dispatch to the unwind handler
+  __ remove_frame(initial_frame_size_in_bytes());
+  __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
+
+  // Emit the slow path assembly
+  if (stub != NULL) {
+    stub->emit_code(this);
+  }
+
+  return offset;
+}
+
+
 int LIR_Assembler::emit_deopt_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
@@ -1190,8 +1244,7 @@
       break;
 #endif // _L64
     case T_INT:
-      // %%% could this be a movl? this is safer but longer instruction
-      __ movl2ptr(dest->as_register(), from_addr);
+      __ movl(dest->as_register(), from_addr);
       break;
 
     case T_LONG: {
@@ -1249,7 +1302,6 @@
         __ shll(dest_reg, 24);
         __ sarl(dest_reg, 24);
       }
-      // These are unsigned so the zero extension on 64bit is just what we need
       break;
     }
 
@@ -1261,8 +1313,6 @@
       } else {
         __ movw(dest_reg, from_addr);
       }
-      // This is unsigned so the zero extension on 64bit is just what we need
-      // __ movl2ptr(dest_reg, dest_reg);
       break;
     }
 
@@ -1275,8 +1325,6 @@
         __ shll(dest_reg, 16);
         __ sarl(dest_reg, 16);
       }
-      // Might not be needed in 64bit but certainly doesn't hurt (except for code size)
-      __ movl2ptr(dest_reg, dest_reg);
       break;
     }
 
@@ -2690,19 +2738,14 @@
   } else {
     assert(code == lir_cmp_l2i, "check");
 #ifdef _LP64
-      Register dest = dst->as_register();
-      __ xorptr(dest, dest);
-      Label high, done;
-      __ cmpptr(left->as_register_lo(), right->as_register_lo());
-      __ jcc(Assembler::equal, done);
-      __ jcc(Assembler::greater, high);
-      __ decrement(dest);
-      __ jmp(done);
-      __ bind(high);
-      __ increment(dest);
-
-      __ bind(done);
-
+    Label done;
+    Register dest = dst->as_register();
+    __ cmpptr(left->as_register_lo(), right->as_register_lo());
+    __ movl(dest, -1);
+    __ jccb(Assembler::less, done);
+    __ set_byte_if_not_zero(dest);
+    __ movzbl(dest, dest);
+    __ bind(done);
 #else
     __ lcmp2int(left->as_register_hi(),
                 left->as_register_lo(),
@@ -2795,47 +2838,48 @@
   // On 64bit this will die since it will take a movq & jmp, must be only a jmp
   __ jump(RuntimeAddress(__ pc()));
 
-  assert(__ offset() - start <= call_stub_size, "stub too big")
+  assert(__ offset() - start <= call_stub_size, "stub too big");
   __ end_a_stub();
 }
 
 
-void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) {
+void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
   assert(exceptionOop->as_register() == rax, "must match");
-  assert(unwind || exceptionPC->as_register() == rdx, "must match");
+  assert(exceptionPC->as_register() == rdx, "must match");
 
   // exception object is not added to oop map by LinearScan
   // (LinearScan assumes that no oops are in fixed registers)
   info->add_register_oop(exceptionOop);
   Runtime1::StubID unwind_id;
 
-  if (!unwind) {
-    // get current pc information
-    // pc is only needed if the method has an exception handler, the unwind code does not need it.
-    int pc_for_athrow_offset = __ offset();
-    InternalAddress pc_for_athrow(__ pc());
-    __ lea(exceptionPC->as_register(), pc_for_athrow);
-    add_call_info(pc_for_athrow_offset, info); // for exception handler
-
-    __ verify_not_null_oop(rax);
-    // search an exception handler (rax: exception oop, rdx: throwing pc)
-    if (compilation()->has_fpu_code()) {
-      unwind_id = Runtime1::handle_exception_id;
-    } else {
-      unwind_id = Runtime1::handle_exception_nofpu_id;
-    }
-    __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
+  // get current pc information
+  // pc is only needed if the method has an exception handler, the unwind code does not need it.
+  int pc_for_athrow_offset = __ offset();
+  InternalAddress pc_for_athrow(__ pc());
+  __ lea(exceptionPC->as_register(), pc_for_athrow);
+  add_call_info(pc_for_athrow_offset, info); // for exception handler
+
+  __ verify_not_null_oop(rax);
+  // search an exception handler (rax: exception oop, rdx: throwing pc)
+  if (compilation()->has_fpu_code()) {
+    unwind_id = Runtime1::handle_exception_id;
   } else {
-    // remove the activation
-    __ remove_frame(initial_frame_size_in_bytes());
-    __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
+    unwind_id = Runtime1::handle_exception_nofpu_id;
   }
+  __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
 
   // enough room for two byte trap
   __ nop();
 }
 
 
+void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
+  assert(exceptionOop->as_register() == rax, "must match");
+
+  __ jmp(_unwind_handler_entry);
+}
+
+
 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
 
   // optimized version for linear scan:
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed May 19 10:22:39 2010 -0700
@@ -781,7 +781,7 @@
 
   // Restore SP from BP if the exception PC is a MethodHandle call site.
   NOT_LP64(__ get_thread(thread);)
-  __ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0);
+  __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
   __ cmovptr(Assembler::notEqual, rsp, rbp);
 
   // continue at exception handler (return address removed)
--- a/src/cpu/x86/vm/c2_globals_x86.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/c2_globals_x86.hpp	Wed May 19 10:22:39 2010 -0700
@@ -80,7 +80,6 @@
 // Ergonomics related flags
 define_pd_global(uint64_t,MaxRAM,                    4ULL*G);
 #endif // AMD64
-define_pd_global(intx, OptoLoopAlignment,            16);
 define_pd_global(intx, RegisterCostAreaRatio,        16000);
 
 // Peephole and CISC spilling both break the graph, and so makes the
--- a/src/cpu/x86/vm/cppInterpreter_x86.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/cppInterpreter_x86.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,6 @@
   // Size of interpreter code.  Increase if too small.  Interpreter will
   // fail with a guarantee ("not enough space for interpreter generation");
   // if too small.
-  // Run with +PrintInterpreterSize to get the VM to print out the size.
-  // Max size with JVMTI and TaggedStackInterpreter
+  // Run with +PrintInterpreter to get the VM to print out the size.
+  // Max size with JVMTI
   const static int InterpreterCodeSize = 168 * 1024;
--- a/src/cpu/x86/vm/frame_x86.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/frame_x86.cpp	Wed May 19 10:22:39 2010 -0700
@@ -291,8 +291,8 @@
 BasicObjectLock* frame::interpreter_frame_monitor_end() const {
   BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
   // make sure the pointer points inside the frame
-  assert((intptr_t) fp() >  (intptr_t) result, "result must <  than frame pointer");
-  assert((intptr_t) sp() <= (intptr_t) result, "result must >= than stack pointer");
+  assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
+  assert((intptr_t*) result < fp(),  "monitor end should be strictly below the frame pointer");
   return result;
 }
 
@@ -502,7 +502,7 @@
   // When unpacking an optimized frame the frame pointer is
   // adjusted with:
   int diff = (method->max_locals() - method->size_of_parameters()) *
-             Interpreter::stackElementWords();
+             Interpreter::stackElementWords;
   return _fp == (fp - diff);
 }
 
@@ -542,7 +542,7 @@
 
   // stack frames shouldn't be much larger than max_stack elements
 
-  if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) {
+  if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
     return false;
   }
 
@@ -594,7 +594,7 @@
 #ifdef AMD64
       // This is times two because we do a push(ltos) after pushing XMM0
       // and that takes two interpreter stack slots.
-      tos_addr += 2 * Interpreter::stackElementWords();
+      tos_addr += 2 * Interpreter::stackElementWords;
 #else
       tos_addr += 2;
 #endif // AMD64
--- a/src/cpu/x86/vm/globals_x86.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/globals_x86.hpp	Wed May 19 10:22:39 2010 -0700
@@ -45,6 +45,7 @@
 #else
 define_pd_global(intx, CodeEntryAlignment,       16);
 #endif // COMPILER2
+define_pd_global(intx, OptoLoopAlignment,        16);
 define_pd_global(intx, InlineFrequencyCount,     100);
 define_pd_global(intx, InlineSmallCode,          1000);
 
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed May 19 10:22:39 2010 -0700
@@ -265,89 +265,30 @@
 
 // Java Expression Stack
 
-#ifdef ASSERT
-void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
-  if (TaggedStackInterpreter) {
-    Label okay;
-    cmpptr(Address(rsp, wordSize), (int32_t)t);
-    jcc(Assembler::equal, okay);
-    // Also compare if the stack value is zero, then the tag might
-    // not have been set coming from deopt.
-    cmpptr(Address(rsp, 0), 0);
-    jcc(Assembler::equal, okay);
-    stop("Java Expression stack tag value is bad");
-    bind(okay);
-  }
-}
-#endif // ASSERT
-
 void InterpreterMacroAssembler::pop_ptr(Register r) {
-  debug_only(verify_stack_tag(frame::TagReference));
   pop(r);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
-}
-
-void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
-  pop(r);
-  // Tag may not be reference for jsr, can be returnAddress
-  if (TaggedStackInterpreter) pop(tag);
 }
 
 void InterpreterMacroAssembler::pop_i(Register r) {
-  debug_only(verify_stack_tag(frame::TagValue));
   pop(r);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
 }
 
 void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
-  debug_only(verify_stack_tag(frame::TagValue));
   pop(lo);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
-  debug_only(verify_stack_tag(frame::TagValue));
   pop(hi);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
 }
 
 void InterpreterMacroAssembler::pop_f() {
-  debug_only(verify_stack_tag(frame::TagValue));
   fld_s(Address(rsp, 0));
   addptr(rsp, 1 * wordSize);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
 }
 
 void InterpreterMacroAssembler::pop_d() {
-  // Write double to stack contiguously and load into ST0
-  pop_dtos_to_rsp();
   fld_d(Address(rsp, 0));
   addptr(rsp, 2 * wordSize);
 }
 
 
-// Pop the top of the java expression stack to execution stack (which
-// happens to be the same place).
-void InterpreterMacroAssembler::pop_dtos_to_rsp() {
-  if (TaggedStackInterpreter) {
-    // Pop double value into scratch registers
-    debug_only(verify_stack_tag(frame::TagValue));
-    pop(rax);
-    addptr(rsp, 1* wordSize);
-    debug_only(verify_stack_tag(frame::TagValue));
-    pop(rdx);
-    addptr(rsp, 1* wordSize);
-    push(rdx);
-    push(rax);
-  }
-}
-
-void InterpreterMacroAssembler::pop_ftos_to_rsp() {
-  if (TaggedStackInterpreter) {
-    debug_only(verify_stack_tag(frame::TagValue));
-    pop(rax);
-    addptr(rsp, 1 * wordSize);
-    push(rax);  // ftos is at rsp
-  }
-}
-
 void InterpreterMacroAssembler::pop(TosState state) {
   switch (state) {
     case atos: pop_ptr(rax);                                 break;
@@ -365,54 +306,28 @@
 }
 
 void InterpreterMacroAssembler::push_ptr(Register r) {
-  if (TaggedStackInterpreter) push(frame::TagReference);
-  push(r);
-}
-
-void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
-  if (TaggedStackInterpreter) push(tag);  // tag first
   push(r);
 }
 
 void InterpreterMacroAssembler::push_i(Register r) {
-  if (TaggedStackInterpreter) push(frame::TagValue);
   push(r);
 }
 
 void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
-  if (TaggedStackInterpreter) push(frame::TagValue);
   push(hi);
-  if (TaggedStackInterpreter) push(frame::TagValue);
   push(lo);
 }
 
 void InterpreterMacroAssembler::push_f() {
-  if (TaggedStackInterpreter) push(frame::TagValue);
   // Do not schedule for no AGI! Never write beyond rsp!
   subptr(rsp, 1 * wordSize);
   fstp_s(Address(rsp, 0));
 }
 
 void InterpreterMacroAssembler::push_d(Register r) {
-  if (TaggedStackInterpreter) {
-    // Double values are stored as:
-    //   tag
-    //   high
-    //   tag
-    //   low
-    push(frame::TagValue);
-    subptr(rsp, 3 * wordSize);
-    fstp_d(Address(rsp, 0));
-    // move high word up to slot n-1
-    movl(r, Address(rsp, 1*wordSize));
-    movl(Address(rsp, 2*wordSize), r);
-    // move tag
-    movl(Address(rsp, 1*wordSize), frame::TagValue);
-  } else {
-    // Do not schedule for no AGI! Never write beyond rsp!
-    subptr(rsp, 2 * wordSize);
-    fstp_d(Address(rsp, 0));
-  }
+  // Do not schedule for no AGI! Never write beyond rsp!
+  subptr(rsp, 2 * wordSize);
+  fstp_d(Address(rsp, 0));
 }
 
 
@@ -433,118 +348,15 @@
 }
 
 
-// Tagged stack helpers for swap and dup
-void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
-                                                 Register tag) {
+// Helpers for swap and dup
+void InterpreterMacroAssembler::load_ptr(int n, Register val) {
   movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
-  if (TaggedStackInterpreter) {
-    movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
-  }
-}
-
-void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
-                                                  Register tag) {
-  movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
-  if (TaggedStackInterpreter) {
-    movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
-  }
-}
-
-
-// Tagged local support
-void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
-  if (TaggedStackInterpreter) {
-    if (tag == frame::TagCategory2) {
-      movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)frame::TagValue);
-      movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)frame::TagValue);
-    } else {
-      movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
-    }
-  }
-}
-
-void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
-  if (TaggedStackInterpreter) {
-    if (tag == frame::TagCategory2) {
-      movptr(Address(rdi, idx, Interpreter::stackElementScale(),
-                  Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
-      movptr(Address(rdi, idx, Interpreter::stackElementScale(),
-                    Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
-    } else {
-      movptr(Address(rdi, idx, Interpreter::stackElementScale(),
-                               Interpreter::local_tag_offset_in_bytes(0)), (int32_t)tag);
-    }
-  }
-}
-
-void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
-  if (TaggedStackInterpreter) {
-    // can only be TagValue or TagReference
-    movptr(Address(rdi, idx, Interpreter::stackElementScale(),
-                           Interpreter::local_tag_offset_in_bytes(0)), tag);
-  }
 }
 
-
-void InterpreterMacroAssembler::tag_local(Register tag, int n) {
-  if (TaggedStackInterpreter) {
-    // can only be TagValue or TagReference
-    movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), tag);
-  }
+void InterpreterMacroAssembler::store_ptr(int n, Register val) {
+  movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
 }
 
-#ifdef ASSERT
-void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
-  if (TaggedStackInterpreter) {
-     frame::Tag t = tag;
-    if (tag == frame::TagCategory2) {
-      Label nbl;
-      t = frame::TagValue;  // change to what is stored in locals
-      cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
-      jcc(Assembler::equal, nbl);
-      stop("Local tag is bad for long/double");
-      bind(nbl);
-    }
-    Label notBad;
-    cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
-    jcc(Assembler::equal, notBad);
-    // Also compare if the local value is zero, then the tag might
-    // not have been set coming from deopt.
-    cmpptr(Address(rdi, Interpreter::local_offset_in_bytes(n)), 0);
-    jcc(Assembler::equal, notBad);
-    stop("Local tag is bad");
-    bind(notBad);
-  }
-}
-
-void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
-  if (TaggedStackInterpreter) {
-    frame::Tag t = tag;
-    if (tag == frame::TagCategory2) {
-      Label nbl;
-      t = frame::TagValue;  // change to what is stored in locals
-      cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
-                  Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
-      jcc(Assembler::equal, nbl);
-      stop("Local tag is bad for long/double");
-      bind(nbl);
-    }
-    Label notBad;
-    cmpl(Address(rdi, idx, Interpreter::stackElementScale(),
-                  Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
-    jcc(Assembler::equal, notBad);
-    // Also compare if the local value is zero, then the tag might
-    // not have been set coming from deopt.
-    cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
-                  Interpreter::local_offset_in_bytes(0)), 0);
-    jcc(Assembler::equal, notBad);
-    stop("Local tag is bad");
-    bind(notBad);
-
-  }
-}
-#endif // ASSERT
-
 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
   MacroAssembler::call_VM_leaf_base(entry_point, 0);
 }
--- a/src/cpu/x86/vm/interp_masm_x86_32.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -85,16 +85,12 @@
   void d2ieee();                                           // truncate dtos to 64bits
 
   void pop_ptr(Register r = rax);
-  void pop_ptr(Register r, Register tag);
   void pop_i(Register r = rax);
   void pop_l(Register lo = rax, Register hi = rdx);
   void pop_f();
   void pop_d();
-  void pop_ftos_to_rsp();
-  void pop_dtos_to_rsp();
 
   void push_ptr(Register r = rax);
-  void push_ptr(Register r, Register tag);
   void push_i(Register r = rax);
   void push_l(Register lo = rax, Register hi = rdx);
   void push_d(Register r = rax);
@@ -112,33 +108,15 @@
   void pop(void* v ); // Add unimplemented ambiguous method
   void push(void* v );   // Add unimplemented ambiguous method
 
-  DEBUG_ONLY(void verify_stack_tag(frame::Tag t);)
-
-#endif // CC_INTERP
-
-#ifndef CC_INTERP
-
-  void empty_expression_stack()                            {
-       movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
-      // NULL last_sp until next java call
-      movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+  void empty_expression_stack() {
+    movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
+    // NULL last_sp until next java call
+    movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
   }
 
-  // Tagged stack helpers for swap and dup
-  void load_ptr_and_tag(int n, Register val, Register tag);
-  void store_ptr_and_tag(int n, Register val, Register tag);
-
-  // Tagged Local support
-
-  void tag_local(frame::Tag tag, int n);
-  void tag_local(Register tag, int n);
-  void tag_local(frame::Tag tag, Register idx);
-  void tag_local(Register tag, Register idx);
-
-#ifdef ASSERT
-  void verify_local_tag(frame::Tag tag, int n);
-  void verify_local_tag(frame::Tag tag, Register idx);
-#endif // ASSERT
+  // Helpers for swap and dup
+  void load_ptr(int n, Register val);
+  void store_ptr(int n, Register val);
 
   // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
   void super_call_VM_leaf(address entry_point);
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed May 19 10:22:39 2010 -0700
@@ -264,113 +264,51 @@
 
 // Java Expression Stack
 
-#ifdef ASSERT
-// Verifies that the stack tag matches.  Must be called before the stack
-// value is popped off the stack.
-void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
-  if (TaggedStackInterpreter) {
-    frame::Tag tag = t;
-    if (t == frame::TagCategory2) {
-      tag = frame::TagValue;
-      Label hokay;
-      cmpptr(Address(rsp, 3*wordSize), (int32_t)tag);
-      jcc(Assembler::equal, hokay);
-      stop("Java Expression stack tag high value is bad");
-      bind(hokay);
-    }
-    Label okay;
-    cmpptr(Address(rsp, wordSize), (int32_t)tag);
-    jcc(Assembler::equal, okay);
-    // Also compare if the stack value is zero, then the tag might
-    // not have been set coming from deopt.
-    cmpptr(Address(rsp, 0), 0);
-    jcc(Assembler::equal, okay);
-    stop("Java Expression stack tag value is bad");
-    bind(okay);
-  }
-}
-#endif // ASSERT
-
 void InterpreterMacroAssembler::pop_ptr(Register r) {
-  debug_only(verify_stack_tag(frame::TagReference));
   pop(r);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
-}
-
-void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
-  pop(r);
-  if (TaggedStackInterpreter) pop(tag);
 }
 
 void InterpreterMacroAssembler::pop_i(Register r) {
   // XXX can't use pop currently, upper half non clean
-  debug_only(verify_stack_tag(frame::TagValue));
   movl(r, Address(rsp, 0));
   addptr(rsp, wordSize);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
 }
 
 void InterpreterMacroAssembler::pop_l(Register r) {
-  debug_only(verify_stack_tag(frame::TagCategory2));
   movq(r, Address(rsp, 0));
-  addptr(rsp, 2 * Interpreter::stackElementSize());
+  addptr(rsp, 2 * Interpreter::stackElementSize);
 }
 
 void InterpreterMacroAssembler::pop_f(XMMRegister r) {
-  debug_only(verify_stack_tag(frame::TagValue));
   movflt(r, Address(rsp, 0));
   addptr(rsp, wordSize);
-  if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
 }
 
 void InterpreterMacroAssembler::pop_d(XMMRegister r) {
-  debug_only(verify_stack_tag(frame::TagCategory2));
   movdbl(r, Address(rsp, 0));
-  addptr(rsp, 2 * Interpreter::stackElementSize());
+  addptr(rsp, 2 * Interpreter::stackElementSize);
 }
 
 void InterpreterMacroAssembler::push_ptr(Register r) {
-  if (TaggedStackInterpreter) push(frame::TagReference);
-  push(r);
-}
-
-void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
-  if (TaggedStackInterpreter) push(tag);
   push(r);
 }
 
 void InterpreterMacroAssembler::push_i(Register r) {
-  if (TaggedStackInterpreter) push(frame::TagValue);
   push(r);
 }
 
 void InterpreterMacroAssembler::push_l(Register r) {
-  if (TaggedStackInterpreter) {
-    push(frame::TagValue);
-    subptr(rsp, 1 * wordSize);
-    push(frame::TagValue);
-    subptr(rsp, 1 * wordSize);
-  } else {
-    subptr(rsp, 2 * wordSize);
-  }
+  subptr(rsp, 2 * wordSize);
   movq(Address(rsp, 0), r);
 }
 
 void InterpreterMacroAssembler::push_f(XMMRegister r) {
-  if (TaggedStackInterpreter) push(frame::TagValue);
   subptr(rsp, wordSize);
   movflt(Address(rsp, 0), r);
 }
 
 void InterpreterMacroAssembler::push_d(XMMRegister r) {
-  if (TaggedStackInterpreter) {
-    push(frame::TagValue);
-    subptr(rsp, 1 * wordSize);
-    push(frame::TagValue);
-    subptr(rsp, 1 * wordSize);
-  } else {
-    subptr(rsp, 2 * wordSize);
-  }
+  subptr(rsp, 2 * wordSize);
   movdbl(Address(rsp, 0), r);
 }
 
@@ -407,117 +345,15 @@
 }
 
 
-
-
-// Tagged stack helpers for swap and dup
-void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
-                                                 Register tag) {
+// Helpers for swap and dup
+void InterpreterMacroAssembler::load_ptr(int n, Register val) {
   movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
-  if (TaggedStackInterpreter) {
-    movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
-  }
-}
-
-void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
-                                                  Register tag) {
-  movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
-  if (TaggedStackInterpreter) {
-    movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
-  }
-}
-
-
-// Tagged local support
-void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
-  if (TaggedStackInterpreter) {
-    if (tag == frame::TagCategory2) {
-      movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)),
-           (int32_t)frame::TagValue);
-      movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)),
-           (int32_t)frame::TagValue);
-    } else {
-      movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
-    }
-  }
-}
-
-void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
-  if (TaggedStackInterpreter) {
-    if (tag == frame::TagCategory2) {
-      movptr(Address(r14, idx, Address::times_8,
-                  Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
-      movptr(Address(r14, idx, Address::times_8,
-                  Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
-    } else {
-      movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)),
-           (int32_t)tag);
-    }
-  }
-}
-
-void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
-  if (TaggedStackInterpreter) {
-    // can only be TagValue or TagReference
-    movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag);
-  }
 }
 
-
-void InterpreterMacroAssembler::tag_local(Register tag, int n) {
-  if (TaggedStackInterpreter) {
-    // can only be TagValue or TagReference
-    movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag);
-  }
+void InterpreterMacroAssembler::store_ptr(int n, Register val) {
+  movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
 }
 
-#ifdef ASSERT
-void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
-  if (TaggedStackInterpreter) {
-     frame::Tag t = tag;
-    if (tag == frame::TagCategory2) {
-      Label nbl;
-      t = frame::TagValue;  // change to what is stored in locals
-      cmpptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
-      jcc(Assembler::equal, nbl);
-      stop("Local tag is bad for long/double");
-      bind(nbl);
-    }
-    Label notBad;
-    cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
-    jcc(Assembler::equal, notBad);
-    // Also compare if the local value is zero, then the tag might
-    // not have been set coming from deopt.
-    cmpptr(Address(r14, Interpreter::local_offset_in_bytes(n)), 0);
-    jcc(Assembler::equal, notBad);
-    stop("Local tag is bad");
-    bind(notBad);
-  }
-}
-
-void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
-  if (TaggedStackInterpreter) {
-    frame::Tag t = tag;
-    if (tag == frame::TagCategory2) {
-      Label nbl;
-      t = frame::TagValue;  // change to what is stored in locals
-      cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
-      jcc(Assembler::equal, nbl);
-      stop("Local tag is bad for long/double");
-      bind(nbl);
-    }
-    Label notBad;
-    cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
-    jcc(Assembler::equal, notBad);
-    // Also compare if the local value is zero, then the tag might
-    // not have been set coming from deopt.
-    cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0);
-    jcc(Assembler::equal, notBad);
-    stop("Local tag is bad");
-    bind(notBad);
-  }
-}
-#endif // ASSERT
-
 
 void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
   MacroAssembler::call_VM_leaf_base(entry_point, 0);
--- a/src/cpu/x86/vm/interp_masm_x86_64.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -120,38 +120,16 @@
   void pop(TosState state); // transition vtos -> state
   void push(TosState state); // transition state -> vtos
 
-  // Tagged stack support, pop and push both tag and value.
-  void pop_ptr(Register r, Register tag);
-  void push_ptr(Register r, Register tag);
-#endif // CC_INTERP
-
-  DEBUG_ONLY(void verify_stack_tag(frame::Tag t);)
-
-#ifndef CC_INTERP
-
-  // Tagged stack helpers for swap and dup
-  void load_ptr_and_tag(int n, Register val, Register tag);
-  void store_ptr_and_tag(int n, Register val, Register tag);
-
-  // Tagged Local support
-  void tag_local(frame::Tag tag, int n);
-  void tag_local(Register tag, int n);
-  void tag_local(frame::Tag tag, Register idx);
-  void tag_local(Register tag, Register idx);
-
-#ifdef ASSERT
-  void verify_local_tag(frame::Tag tag, int n);
-  void verify_local_tag(frame::Tag tag, Register idx);
-#endif // ASSERT
-
-
-  void empty_expression_stack()
-  {
+  void empty_expression_stack() {
     movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
     // NULL last_sp until next java call
     movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
   }
 
+  // Helpers for swap and dup
+  void load_ptr(int n, Register val);
+  void store_ptr(int n, Register val);
+
   // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
   void super_call_VM_leaf(address entry_point);
   void super_call_VM_leaf(address entry_point, Register arg_1);
--- a/src/cpu/x86/vm/interpreterRT_x86_32.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/interpreterRT_x86_32.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -86,33 +86,23 @@
   address   _from;
   intptr_t* _to;
 
-#ifdef ASSERT
-  void verify_tag(frame::Tag t) {
-    assert(!TaggedStackInterpreter ||
-           *(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
-  }
-#endif // ASSERT
-
   virtual void pass_int() {
     *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
   }
 
   virtual void pass_long() {
     _to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
     _to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
     _to += 2;
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
   }
 
   virtual void pass_object() {
     // pass address of from
     intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0));
     *_to++ = (*(intptr_t*)from_addr == 0) ? NULL_WORD : from_addr;
-    debug_only(verify_tag(frame::TagReference));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
    }
 
  public:
--- a/src/cpu/x86/vm/interpreterRT_x86_64.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/interpreterRT_x86_64.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -293,18 +293,10 @@
   intptr_t* _fp_identifiers;
   unsigned int _num_args;
 
-#ifdef ASSERT
-  void verify_tag(frame::Tag t) {
-    assert(!TaggedStackInterpreter ||
-           *(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
-  }
-#endif // ASSERT
-
   virtual void pass_int()
   {
     jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
 
     if (_num_args < Argument::n_int_register_parameters_c-1) {
       *_reg_args++ = from_obj;
@@ -317,8 +309,7 @@
   virtual void pass_long()
   {
     intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
 
     if (_num_args < Argument::n_int_register_parameters_c-1) {
       *_reg_args++ = from_obj;
@@ -331,8 +322,7 @@
   virtual void pass_object()
   {
     intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagReference));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
     if (_num_args < Argument::n_int_register_parameters_c-1) {
       *_reg_args++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr;
       _num_args++;
@@ -344,8 +334,7 @@
   virtual void pass_float()
   {
     jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
 
     if (_num_args < Argument::n_float_register_parameters_c-1) {
       *_reg_args++ = from_obj;
@@ -359,8 +348,7 @@
   virtual void pass_double()
   {
     intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
 
     if (_num_args < Argument::n_float_register_parameters_c-1) {
       *_reg_args++ = from_obj;
@@ -397,18 +385,10 @@
   unsigned int _num_int_args;
   unsigned int _num_fp_args;
 
-#ifdef ASSERT
-  void verify_tag(frame::Tag t) {
-    assert(!TaggedStackInterpreter ||
-           *(intptr_t*)(_from+Interpreter::local_tag_offset_in_bytes(0)) == t, "wrong tag");
-  }
-#endif // ASSERT
-
   virtual void pass_int()
   {
     jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
 
     if (_num_int_args < Argument::n_int_register_parameters_c-1) {
       *_int_args++ = from_obj;
@@ -421,8 +401,7 @@
   virtual void pass_long()
   {
     intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
 
     if (_num_int_args < Argument::n_int_register_parameters_c-1) {
       *_int_args++ = from_obj;
@@ -435,8 +414,7 @@
   virtual void pass_object()
   {
     intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagReference));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
 
     if (_num_int_args < Argument::n_int_register_parameters_c-1) {
       *_int_args++ = (*from_addr == 0) ? NULL : (intptr_t)from_addr;
@@ -449,8 +427,7 @@
   virtual void pass_float()
   {
     jint from_obj = *(jint*)(_from+Interpreter::local_offset_in_bytes(0));
-    debug_only(verify_tag(frame::TagValue));
-    _from -= Interpreter::stackElementSize();
+    _from -= Interpreter::stackElementSize;
 
     if (_num_fp_args < Argument::n_float_register_parameters_c) {
       *_fp_args++ = from_obj;
@@ -463,7 +440,7 @@
   virtual void pass_double()
   {
     intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
-    _from -= 2*Interpreter::stackElementSize();
+    _from -= 2*Interpreter::stackElementSize;
 
     if (_num_fp_args < Argument::n_float_register_parameters_c) {
       *_fp_args++ = from_obj;
--- a/src/cpu/x86/vm/interpreter_x86.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/interpreter_x86.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,39 +31,16 @@
   // the fpu stack.
   static const int return_sentinel;
 
-
-  static Address::ScaleFactor stackElementScale() {
-    return TaggedStackInterpreter? Address::times_8 : Address::times_4;
-  }
+  static Address::ScaleFactor stackElementScale() { return Address::times_4; }
 
   // Offset from rsp (which points to the last stack element)
-  static int expr_offset_in_bytes(int i) { return stackElementSize()*i ; }
-  static int expr_tag_offset_in_bytes(int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    return expr_offset_in_bytes(i) + wordSize;
-  }
-
-  // Support for Tagged Stacks
+  static int expr_offset_in_bytes(int i) { return stackElementSize * i; }
 
   // Stack index relative to tos (which points at value)
-  static int expr_index_at(int i)     {
-    return stackElementWords() * i;
-  }
-
-  static int expr_tag_index_at(int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    // tag is one word above java stack element
-    return stackElementWords() * i + 1;
-  }
+  static int expr_index_at(int i)        { return stackElementWords * i; }
 
   // Already negated by c++ interpreter
-  static int local_index_at(int i)     {
-    assert(i<=0, "local direction already negated");
-    return stackElementWords() * i + (value_offset_in_bytes()/wordSize);
+  static int local_index_at(int i) {
+    assert(i <= 0, "local direction already negated");
+    return stackElementWords * i;
   }
-
-  static int local_tag_index_at(int i) {
-    assert(i<=0, "local direction already negated");
-    assert(TaggedStackInterpreter, "should not call this");
-    return stackElementWords() * i + (tag_offset_in_bytes()/wordSize);
-  }
--- a/src/cpu/x86/vm/interpreter_x86_32.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/interpreter_x86_32.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -131,14 +131,7 @@
   //       java methods.  Interpreter::method_kind(...) will select
   //       this entry point for the corresponding methods in JDK 1.3.
   // get argument
-  if (TaggedStackInterpreter) {
-    __ pushl(Address(rsp, 3*wordSize));  // push hi (and note rsp -= wordSize)
-    __ pushl(Address(rsp, 2*wordSize));  // push lo
-    __ fld_d(Address(rsp, 0));           // get double in ST0
-    __ addptr(rsp, 2*wordSize);
-  } else {
-    __ fld_d(Address(rsp, 1*wordSize));
-  }
+  __ fld_d(Address(rsp, 1*wordSize));
   switch (kind) {
     case Interpreter::java_lang_math_sin :
         __ trigfunc('s');
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Wed May 19 10:22:39 2010 -0700
@@ -127,7 +127,8 @@
                                      RegisterOrConstant arg_slots,
                                      int arg_mask,
                                      Register rax_argslot,
-                                     Register rbx_temp, Register rdx_temp) {
+                                     Register rbx_temp, Register rdx_temp, Register temp3_reg) {
+  assert(temp3_reg == noreg, "temp3 not required");
   assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
                              (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
 
@@ -185,7 +186,8 @@
 void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
                                     RegisterOrConstant arg_slots,
                                     Register rax_argslot,
-                                    Register rbx_temp, Register rdx_temp) {
+                                     Register rbx_temp, Register rdx_temp, Register temp3_reg) {
+  assert(temp3_reg == noreg, "temp3 not required");
   assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
                              (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
 
@@ -260,6 +262,22 @@
 }
 #endif //PRODUCT
 
+// which conversion op types are implemented here?
+int MethodHandles::adapter_conversion_ops_supported_mask() {
+  return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
+         |(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
+         //|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
+         );
+  // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
+}
+
 // Generate an "entry" field for a method handle.
 // This determines how the method handle will respond to calls.
 void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
@@ -498,7 +516,7 @@
 #ifndef _LP64
         if (arg_slots == 2) {
           __ movl(rdx_temp, prim_value_addr.plus_disp(wordSize));
-          __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rdx_temp);
+          __ movl(Address(rax_argslot, Interpreter::stackElementSize), rdx_temp);
         }
 #endif //_LP64
       }
@@ -594,7 +612,7 @@
           __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
           remove_arg_slots(_masm, -stack_move_unit(),
                            rax_argslot, rbx_temp, rdx_temp);
-          vmarg = Address(rax_argslot, -Interpreter::stackElementSize());
+          vmarg = Address(rax_argslot, -Interpreter::stackElementSize);
           __ movl(rdx_temp, vmarg);
         }
         break;
@@ -663,8 +681,8 @@
       __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
       insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
                        rax_argslot, rbx_temp, rdx_temp);
-      Address vmarg1(rax_argslot, -Interpreter::stackElementSize());
-      Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize());
+      Address vmarg1(rax_argslot, -Interpreter::stackElementSize);
+      Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize);
 
       switch (ek) {
       case _adapter_opt_i2l:
@@ -716,7 +734,7 @@
         insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
                          rax_argslot, rbx_temp, rdx_temp);
       }
-      Address vmarg(rax_argslot, -Interpreter::stackElementSize());
+      Address vmarg(rax_argslot, -Interpreter::stackElementSize);
 
 #ifdef _LP64
       if (ek == _adapter_opt_f2d) {
@@ -1014,7 +1032,7 @@
       // Array length checks out.  Now insert any required stack slots.
       if (length_constant == -1) {
         // Form a pointer to the end of the affected region.
-        __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize()));
+        __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
         // 'stack_move' is negative number of words to insert
         Register rdi_stack_move = rdi;
         __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
@@ -1052,7 +1070,7 @@
         __ movptr(rbx_temp, Address(rsi_source, 0));
         __ movptr(Address(rax_argslot, 0), rbx_temp);
         __ addptr(rsi_source, type2aelembytes(elem_type));
-        __ addptr(rax_argslot, Interpreter::stackElementSize());
+        __ addptr(rax_argslot, Interpreter::stackElementSize);
         __ cmpptr(rax_argslot, rdx_argslot_limit);
         __ jccb(Assembler::less, loop);
       } else if (length_constant == 0) {
@@ -1065,7 +1083,7 @@
           __ movptr(rbx_temp, Address(rsi_array, elem_offset));
           __ movptr(Address(rax_argslot, slot_offset), rbx_temp);
           elem_offset += type2aelembytes(elem_type);
-           slot_offset += Interpreter::stackElementSize();
+           slot_offset += Interpreter::stackElementSize;
         }
       }
 
--- a/src/cpu/x86/vm/runtime_x86_32.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/runtime_x86_32.cpp	Wed May 19 10:22:39 2010 -0700
@@ -115,8 +115,8 @@
 
   // rax: exception handler for given <exception oop/exception pc>
 
-  // Restore SP from BP if the exception PC is a MethodHandle call.
-  __ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0);
+  // Restore SP from BP if the exception PC is a MethodHandle call site.
+  __ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0);
   __ cmovptr(Assembler::notEqual, rsp, rbp);
 
   // We have a handler in rax, (could be deopt blob)
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed May 19 10:22:39 2010 -0700
@@ -503,34 +503,9 @@
 }
 
 
-// Helper function to put tags in interpreter stack.
-static void  tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) {
-  if (TaggedStackInterpreter) {
-    int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
-    if (sig == T_OBJECT || sig == T_ARRAY) {
-      __ movptr(Address(rsp, tag_offset), frame::TagReference);
-    } else if (sig == T_LONG || sig == T_DOUBLE) {
-      int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
-      __ movptr(Address(rsp, next_tag_offset), frame::TagValue);
-      __ movptr(Address(rsp, tag_offset), frame::TagValue);
-    } else {
-      __ movptr(Address(rsp, tag_offset), frame::TagValue);
-    }
-  }
-}
-
-// Double and long values with Tagged stacks are not contiguous.
 static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
-  int next_off = st_off - Interpreter::stackElementSize();
-  if (TaggedStackInterpreter) {
-   __ movdbl(Address(rsp, next_off), r);
-   // Move top half up and put tag in the middle.
-   __ movl(rdi, Address(rsp, next_off+wordSize));
-   __ movl(Address(rsp, st_off), rdi);
-   tag_stack(masm, T_DOUBLE, next_off);
-  } else {
-   __ movdbl(Address(rsp, next_off), r);
-  }
+  int next_off = st_off - Interpreter::stackElementSize;
+  __ movdbl(Address(rsp, next_off), r);
 }
 
 static void gen_c2i_adapter(MacroAssembler *masm,
@@ -560,7 +535,7 @@
   // Since all args are passed on the stack, total_args_passed * interpreter_
   // stack_element_size  is the
   // space we need.
-  int extraspace = total_args_passed * Interpreter::stackElementSize();
+  int extraspace = total_args_passed * Interpreter::stackElementSize;
 
   // Get return address
   __ pop(rax);
@@ -578,8 +553,8 @@
     }
 
     // st_off points to lowest address on stack.
-    int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize();
-    int next_off = st_off - Interpreter::stackElementSize();
+    int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
+    int next_off = st_off - Interpreter::stackElementSize;
 
     // Say 4 args:
     // i   st_off
@@ -601,7 +576,6 @@
       if (!r_2->is_valid()) {
         __ movl(rdi, Address(rsp, ld_off));
         __ movptr(Address(rsp, st_off), rdi);
-        tag_stack(masm, sig_bt[i], st_off);
       } else {
 
         // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
@@ -619,13 +593,11 @@
         __ movptr(Address(rsp, st_off), rax);
 #endif /* ASSERT */
 #endif // _LP64
-        tag_stack(masm, sig_bt[i], next_off);
       }
     } else if (r_1->is_Register()) {
       Register r = r_1->as_Register();
       if (!r_2->is_valid()) {
         __ movl(Address(rsp, st_off), r);
-        tag_stack(masm, sig_bt[i], st_off);
       } else {
         // long/double in gpr
         NOT_LP64(ShouldNotReachHere());
@@ -639,17 +611,14 @@
           __ movptr(Address(rsp, st_off), rax);
 #endif /* ASSERT */
           __ movptr(Address(rsp, next_off), r);
-          tag_stack(masm, sig_bt[i], next_off);
         } else {
           __ movptr(Address(rsp, st_off), r);
-          tag_stack(masm, sig_bt[i], st_off);
         }
       }
     } else {
       assert(r_1->is_XMMRegister(), "");
       if (!r_2->is_valid()) {
         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
-        tag_stack(masm, sig_bt[i], st_off);
       } else {
         assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
         move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
@@ -665,20 +634,9 @@
 }
 
 
-// For tagged stacks, double or long value aren't contiguous on the stack
-// so get them contiguous for the xmm load
 static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
-  int next_val_off = ld_off - Interpreter::stackElementSize();
-  if (TaggedStackInterpreter) {
-    // use tag slot temporarily for MSW
-    __ movptr(rsi, Address(saved_sp, ld_off));
-    __ movptr(Address(saved_sp, next_val_off+wordSize), rsi);
-    __ movdbl(r, Address(saved_sp, next_val_off));
-    // restore tag
-    __ movptr(Address(saved_sp, next_val_off+wordSize), frame::TagValue);
-  } else {
-    __ movdbl(r, Address(saved_sp, next_val_off));
-  }
+  int next_val_off = ld_off - Interpreter::stackElementSize;
+  __ movdbl(r, Address(saved_sp, next_val_off));
 }
 
 static void gen_i2c_adapter(MacroAssembler *masm,
@@ -797,9 +755,9 @@
     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
             "scrambled load targets?");
     // Load in argument order going down.
-    int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
+    int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
     // Point to interpreter value (vs. tag)
-    int next_off = ld_off - Interpreter::stackElementSize();
+    int next_off = ld_off - Interpreter::stackElementSize;
     //
     //
     //
@@ -2322,7 +2280,7 @@
 // this function returns the adjust size (in number of words) to a c2i adapter
 // activation for use during deoptimization
 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
-  return (callee_locals - callee_parameters) * Interpreter::stackElementWords();
+  return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
 }
 
 
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed May 19 10:22:39 2010 -0700
@@ -452,22 +452,6 @@
   __ bind(L);
 }
 
-// Helper function to put tags in interpreter stack.
-static void  tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) {
-  if (TaggedStackInterpreter) {
-    int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
-    if (sig == T_OBJECT || sig == T_ARRAY) {
-      __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagReference);
-    } else if (sig == T_LONG || sig == T_DOUBLE) {
-      int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
-      __ movptr(Address(rsp, next_tag_offset), (int32_t) frame::TagValue);
-      __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue);
-    } else {
-      __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue);
-    }
-  }
-}
-
 
 static void gen_c2i_adapter(MacroAssembler *masm,
                             int total_args_passed,
@@ -489,7 +473,7 @@
   // we also account for the return address location since
   // we store it first rather than hold it in rax across all the shuffling
 
-  int extraspace = (total_args_passed * Interpreter::stackElementSize()) + wordSize;
+  int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
 
   // stack is aligned, keep it that way
   extraspace = round_to(extraspace, 2*wordSize);
@@ -513,9 +497,8 @@
     }
 
     // offset to start parameters
-    int st_off   = (total_args_passed - i) * Interpreter::stackElementSize() +
-                   Interpreter::value_offset_in_bytes();
-    int next_off = st_off - Interpreter::stackElementSize();
+    int st_off   = (total_args_passed - i) * Interpreter::stackElementSize;
+    int next_off = st_off - Interpreter::stackElementSize;
 
     // Say 4 args:
     // i   st_off
@@ -543,7 +526,6 @@
         // sign extend??
         __ movl(rax, Address(rsp, ld_off));
         __ movptr(Address(rsp, st_off), rax);
-        tag_stack(masm, sig_bt[i], st_off);
 
       } else {
 
@@ -560,10 +542,8 @@
           __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
           __ movptr(Address(rsp, st_off), rax);
 #endif /* ASSERT */
-          tag_stack(masm, sig_bt[i], next_off);
         } else {
           __ movq(Address(rsp, st_off), rax);
-          tag_stack(masm, sig_bt[i], st_off);
         }
       }
     } else if (r_1->is_Register()) {
@@ -572,7 +552,6 @@
         // must be only an int (or less ) so move only 32bits to slot
         // why not sign extend??
         __ movl(Address(rsp, st_off), r);
-        tag_stack(masm, sig_bt[i], st_off);
       } else {
         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
         // T_DOUBLE and T_LONG use two slots in the interpreter
@@ -584,10 +563,8 @@
           __ movptr(Address(rsp, st_off), rax);
 #endif /* ASSERT */
           __ movq(Address(rsp, next_off), r);
-          tag_stack(masm, sig_bt[i], next_off);
         } else {
           __ movptr(Address(rsp, st_off), r);
-          tag_stack(masm, sig_bt[i], st_off);
         }
       }
     } else {
@@ -595,7 +572,6 @@
       if (!r_2->is_valid()) {
         // only a float use just part of the slot
         __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
-        tag_stack(masm, sig_bt[i], st_off);
       } else {
 #ifdef ASSERT
         // Overwrite the unused slot with known junk
@@ -603,7 +579,6 @@
         __ movptr(Address(rsp, st_off), rax);
 #endif /* ASSERT */
         __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
-        tag_stack(masm, sig_bt[i], next_off);
       }
     }
   }
@@ -688,9 +663,9 @@
     assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
             "scrambled load targets?");
     // Load in argument order going down.
-    int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
+    int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
     // Point to interpreter value (vs. tag)
-    int next_off = ld_off - Interpreter::stackElementSize();
+    int next_off = ld_off - Interpreter::stackElementSize;
     //
     //
     //
@@ -2535,7 +2510,7 @@
 // this function returns the adjust size (in number of words) to a c2i adapter
 // activation for use during deoptimization
 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
-  return (callee_locals - callee_parameters) * Interpreter::stackElementWords();
+  return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
 }
 
 
@@ -3328,8 +3303,8 @@
 
   // rax: exception handler
 
-  // Restore SP from BP if the exception PC is a MethodHandle call.
-  __ cmpl(Address(r15_thread, JavaThread::is_method_handle_exception_offset()), 0);
+  // Restore SP from BP if the exception PC is a MethodHandle call site.
+  __ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
   __ cmovptr(Assembler::notEqual, rsp, rbp);
 
   // We have a handler in rax (could be deopt blob).
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed May 19 10:22:39 2010 -0700
@@ -139,7 +139,7 @@
     // stub code
     __ enter();
     __ movptr(rcx, parameter_size);              // parameter counter
-    __ shlptr(rcx, Interpreter::logStackElementSize()); // convert parameter count to bytes
+    __ shlptr(rcx, Interpreter::logStackElementSize); // convert parameter count to bytes
     __ addptr(rcx, locals_count_in_bytes);       // reserve space for register saves
     __ subptr(rsp, rcx);
     __ andptr(rsp, -(StackAlignmentInBytes));    // Align stack
@@ -194,12 +194,6 @@
     __ xorptr(rbx, rbx);
 
     __ BIND(loop);
-    if (TaggedStackInterpreter) {
-      __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(),
-                      -2*wordSize));                          // get tag
-      __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(),
-                      Interpreter::expr_tag_offset_in_bytes(0)), rax);     // store tag
-    }
 
     // get parameter
     __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize));
@@ -430,7 +424,7 @@
     __ verify_oop(exception_oop);
 
     // Restore SP from BP if the exception PC is a MethodHandle call site.
-    __ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0);
+    __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
     __ cmovptr(Assembler::notEqual, rsp, rbp);
 
     // continue at exception handler (return address removed)
@@ -812,7 +806,7 @@
     Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
     // Copy 64-byte chunks
     __ jmpb(L_copy_64_bytes);
-    __ align(16);
+    __ align(OptoLoopAlignment);
   __ BIND(L_copy_64_bytes_loop);
 
     if(UseUnalignedLoadStores) {
@@ -874,7 +868,7 @@
     Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
     // Copy 64-byte chunks
     __ jmpb(L_copy_64_bytes);
-    __ align(16);
+    __ align(OptoLoopAlignment);
   __ BIND(L_copy_64_bytes_loop);
     __ movq(mmx0, Address(from, 0));
     __ movq(mmx1, Address(from, 8));
@@ -1144,7 +1138,7 @@
       __ movl(Address(to, count, sf, 0), rdx);
       __ jmpb(L_copy_8_bytes);
 
-      __ align(16);
+      __ align(OptoLoopAlignment);
       // Move 8 bytes
     __ BIND(L_copy_8_bytes_loop);
       if (UseXMMForArrayCopy) {
@@ -1235,7 +1229,7 @@
       }
     } else {
       __ jmpb(L_copy_8_bytes);
-      __ align(16);
+      __ align(OptoLoopAlignment);
     __ BIND(L_copy_8_bytes_loop);
       __ fild_d(Address(from, 0));
       __ fistp_d(Address(from, to_from, Address::times_1));
@@ -1282,7 +1276,7 @@
 
     __ jmpb(L_copy_8_bytes);
 
-    __ align(16);
+    __ align(OptoLoopAlignment);
   __ BIND(L_copy_8_bytes_loop);
     if (VM_Version::supports_mmx()) {
       if (UseXMMForArrayCopy) {
@@ -1454,7 +1448,7 @@
     // Loop control:
     //   for (count = -count; count != 0; count++)
     // Base pointers src, dst are biased by 8*count,to last element.
-    __ align(16);
+    __ align(OptoLoopAlignment);
 
     __ BIND(L_store_element);
     __ movptr(to_element_addr, elem);     // store the oop
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed May 19 10:22:39 2010 -0700
@@ -278,11 +278,6 @@
     __ movptr(c_rarg2, parameters);       // parameter pointer
     __ movl(c_rarg1, c_rarg3);            // parameter counter is in c_rarg1
     __ BIND(loop);
-    if (TaggedStackInterpreter) {
-      __ movl(rax, Address(c_rarg2, 0)); // get tag
-      __ addptr(c_rarg2, wordSize);      // advance to next tag
-      __ push(rax);                      // pass tag
-    }
     __ movptr(rax, Address(c_rarg2, 0));// get parameter
     __ addptr(c_rarg2, wordSize);       // advance to next parameter
     __ decrementl(c_rarg1);             // decrement counter
@@ -871,9 +866,8 @@
   }
 
   address generate_fp_mask(const char *stub_name, int64_t mask) {
+    __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", stub_name);
-
-    __ align(16);
     address start = __ pc();
 
     __ emit_data64( mask, relocInfo::none );
@@ -1268,7 +1262,7 @@
                              Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
     DEBUG_ONLY(__ stop("enter at entry label, not here"));
     Label L_loop;
-    __ align(16);
+    __ align(OptoLoopAlignment);
   __ BIND(L_loop);
     if(UseUnalignedLoadStores) {
       __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
@@ -1309,7 +1303,7 @@
                               Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
     DEBUG_ONLY(__ stop("enter at entry label, not here"));
     Label L_loop;
-    __ align(16);
+    __ align(OptoLoopAlignment);
   __ BIND(L_loop);
     if(UseUnalignedLoadStores) {
       __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16));
@@ -2229,7 +2223,7 @@
     // Loop control:
     //   for (count = -count; count != 0; count++)
     // Base pointers src, dst are biased by 8*(count-1),to last element.
-    __ align(16);
+    __ align(OptoLoopAlignment);
 
     __ BIND(L_store_element);
     __ store_heap_oop(to_element_addr, rax_oop);  // store the oop
--- a/src/cpu/x86/vm/templateInterpreter_x86.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,8 +28,8 @@
   // Size of interpreter code.  Increase if too small.  Interpreter will
   // fail with a guarantee ("not enough space for interpreter generation");
   // if too small.
-  // Run with +PrintInterpreterSize to get the VM to print out the size.
-  // Max size with JVMTI and TaggedStackInterpreter
+  // Run with +PrintInterpreter to get the VM to print out the size.
+  // Max size with JVMTI
 #ifdef AMD64
   const static int InterpreterCodeSize = 200 * 1024;
 #else
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed May 19 10:22:39 2010 -0700
@@ -305,7 +305,6 @@
     case T_FLOAT  :
       { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
         __ pop(t);                            // remove return address first
-        __ pop_dtos_to_rsp();
         // Must return a result for interpreter or compiler. In SSE
         // mode, results are returned in xmm0 and the FPU stack must
         // be empty.
@@ -468,7 +467,7 @@
   // see if the frame is greater than one page in size. If so,
   // then we need to verify there is enough stack space remaining
   // for the additional locals.
-  __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize());
+  __ cmpl(rdx, (page_size - overhead_size)/Interpreter::stackElementSize);
   __ jcc(Assembler::belowEqual, after_frame_check);
 
   // compute rsp as if this were going to be the last frame on
@@ -882,7 +881,7 @@
   __ get_method(method);
   __ verify_oop(method);
   __ load_unsigned_short(t, Address(method, methodOopDesc::size_of_parameters_offset()));
-  __ shlptr(t, Interpreter::logStackElementSize());
+  __ shlptr(t, Interpreter::logStackElementSize);
   __ addptr(t, 2*wordSize);     // allocate two more slots for JNIEnv and possible mirror
   __ subptr(rsp, t);
   __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
@@ -1225,9 +1224,6 @@
     __ testl(rdx, rdx);
     __ jcc(Assembler::lessEqual, exit);               // do nothing if rdx <= 0
     __ bind(loop);
-    if (TaggedStackInterpreter) {
-      __ push((int32_t)NULL_WORD);                    // push tag
-    }
     __ push((int32_t)NULL_WORD);                      // initialize local variables
     __ decrement(rdx);                                // until everything initialized
     __ jcc(Assembler::greater, loop);
@@ -1463,7 +1459,7 @@
 
   const int extra_stack = methodOopDesc::extra_stack_entries();
   const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
-                           Interpreter::stackElementWords();
+                           Interpreter::stackElementWords;
   return overhead_size + method_stack + stub_code;
 }
 
@@ -1487,9 +1483,9 @@
   // NOTE: return size is in words not bytes
 
   // fixed size of an interpreter frame:
-  int max_locals = method->max_locals() * Interpreter::stackElementWords();
+  int max_locals = method->max_locals() * Interpreter::stackElementWords;
   int extra_locals = (method->max_locals() - method->size_of_parameters()) *
-                     Interpreter::stackElementWords();
+                     Interpreter::stackElementWords;
 
   int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
 
@@ -1499,9 +1495,9 @@
 
 
   int size = overhead +
-         ((callee_locals - callee_param_count)*Interpreter::stackElementWords()) +
+         ((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
          (moncount*frame::interpreter_frame_monitor_size()) +
-         tempcount*Interpreter::stackElementWords() + popframe_extra_args;
+         tempcount*Interpreter::stackElementWords + popframe_extra_args;
 
   if (interpreter_frame != NULL) {
 #ifdef ASSERT
@@ -1525,7 +1521,7 @@
 
     // Set last_sp
     intptr_t*  rsp = (intptr_t*) monbot  -
-                     tempcount*Interpreter::stackElementWords() -
+                     tempcount*Interpreter::stackElementWords -
                      popframe_extra_args;
     interpreter_frame->interpreter_frame_set_last_sp(rsp);
 
@@ -1625,7 +1621,7 @@
     __ get_method(rax);
     __ verify_oop(rax);
     __ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset())));
-    __ shlptr(rax, Interpreter::logStackElementSize());
+    __ shlptr(rax, Interpreter::logStackElementSize);
     __ restore_locals();
     __ subptr(rdi, rax);
     __ addptr(rdi, wordSize);
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed May 19 10:22:39 2010 -0700
@@ -199,7 +199,6 @@
                        in_bytes(constantPoolCacheOopDesc::base_offset()) +
                        3 * wordSize));
   __ andl(rbx, 0xFF);
-  if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter.
   __ lea(rsp, Address(rsp, rbx, Address::times_8));
   __ dispatch_next(state, step);
 
@@ -417,7 +416,7 @@
   // see if the frame is greater than one page in size. If so,
   // then we need to verify there is enough stack space remaining
   // for the additional locals.
-  __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize());
+  __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
   __ jcc(Assembler::belowEqual, after_frame_check);
 
   // compute rsp as if this were going to be the last frame on
@@ -428,7 +427,7 @@
 
   // locals + overhead, in bytes
   __ mov(rax, rdx);
-  __ shlptr(rax, Interpreter::logStackElementSize()); // 2 slots per parameter.
+  __ shlptr(rax, Interpreter::logStackElementSize);  // 2 slots per parameter.
   __ addptr(rax, overhead_size);
 
 #ifdef ASSERT
@@ -759,7 +758,6 @@
   // for natives the size of locals is zero
 
   // compute beginning of parameters (r14)
-  if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
   __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
 
   // add 2 zero-initialized slots for native calls
@@ -865,7 +863,7 @@
   __ load_unsigned_short(t,
                          Address(method,
                                  methodOopDesc::size_of_parameters_offset()));
-  __ shll(t, Interpreter::logStackElementSize());
+  __ shll(t, Interpreter::logStackElementSize);
 
   __ subptr(rsp, t);
   __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
@@ -1228,7 +1226,6 @@
   __ pop(rax);
 
   // compute beginning of parameters (r14)
-  if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
   __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
 
   // rdx - # of additional locals
@@ -1239,7 +1236,6 @@
     __ testl(rdx, rdx);
     __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
     __ bind(loop);
-    if (TaggedStackInterpreter) __ push((int) NULL_WORD);  // push tag
     __ push((int) NULL_WORD); // initialize local variables
     __ decrementl(rdx); // until everything initialized
     __ jcc(Assembler::greater, loop);
@@ -1486,7 +1482,7 @@
   const int stub_code = frame::entry_frame_after_call_words;
   const int extra_stack = methodOopDesc::extra_stack_entries();
   const int method_stack = (method->max_locals() + method->max_stack() + extra_stack) *
-                           Interpreter::stackElementWords();
+                           Interpreter::stackElementWords;
   return (overhead_size + method_stack + stub_code);
 }
 
@@ -1507,9 +1503,9 @@
   // It is also guaranteed to be walkable even though it is in a skeletal state
 
   // fixed size of an interpreter frame:
-  int max_locals = method->max_locals() * Interpreter::stackElementWords();
+  int max_locals = method->max_locals() * Interpreter::stackElementWords;
   int extra_locals = (method->max_locals() - method->size_of_parameters()) *
-                     Interpreter::stackElementWords();
+                     Interpreter::stackElementWords;
 
   int overhead = frame::sender_sp_offset -
                  frame::interpreter_frame_initial_sp_offset;
@@ -1518,9 +1514,9 @@
   // for the callee's params we only need to account for the extra
   // locals.
   int size = overhead +
-         (callee_locals - callee_param_count)*Interpreter::stackElementWords() +
+         (callee_locals - callee_param_count)*Interpreter::stackElementWords +
          moncount * frame::interpreter_frame_monitor_size() +
-         tempcount* Interpreter::stackElementWords() + popframe_extra_args;
+         tempcount* Interpreter::stackElementWords + popframe_extra_args;
   if (interpreter_frame != NULL) {
 #ifdef ASSERT
     if (!EnableMethodHandles)
@@ -1544,7 +1540,7 @@
 
     // Set last_sp
     intptr_t*  esp = (intptr_t*) monbot -
-                     tempcount*Interpreter::stackElementWords() -
+                     tempcount*Interpreter::stackElementWords -
                      popframe_extra_args;
     interpreter_frame->interpreter_frame_set_last_sp(esp);
 
@@ -1650,7 +1646,7 @@
     __ get_method(rax);
     __ load_unsigned_short(rax, Address(rax, in_bytes(methodOopDesc::
                                                 size_of_parameters_offset())));
-    __ shll(rax, Interpreter::logStackElementSize());
+    __ shll(rax, Interpreter::logStackElementSize);
     __ restore_locals(); // XXX do we need this?
     __ subptr(r14, rax);
     __ addptr(r14, wordSize);
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed May 19 10:22:39 2010 -0700
@@ -50,7 +50,7 @@
 static inline Address aaddress(int n)            { return iaddress(n); }
 
 static inline Address iaddress(Register r)       {
-  return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::value_offset_in_bytes());
+  return Address(rdi, r, Interpreter::stackElementScale());
 }
 static inline Address laddress(Register r)       {
   return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
@@ -59,12 +59,9 @@
   return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
 }
 
-static inline Address faddress(Register r)       { return iaddress(r); };
-static inline Address daddress(Register r)       {
-  assert(!TaggedStackInterpreter, "This doesn't work");
-  return laddress(r);
-};
-static inline Address aaddress(Register r)       { return iaddress(r); };
+static inline Address faddress(Register r)       { return iaddress(r); }
+static inline Address daddress(Register r)       { return laddress(r); }
+static inline Address aaddress(Register r)       { return iaddress(r); }
 
 // expression stack
 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
@@ -448,7 +445,6 @@
   // Get the local value into tos
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 
@@ -456,18 +452,15 @@
   transition(vtos, itos);
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
   __ push(itos);
   locals_index(rbx, 3);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::fast_iload() {
   transition(vtos, itos);
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 
@@ -476,7 +469,6 @@
   locals_index(rbx);
   __ movptr(rax, laddress(rbx));
   NOT_LP64(__ movl(rdx, haddress(rbx)));
-  debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
 }
 
 
@@ -484,26 +476,13 @@
   transition(vtos, ftos);
   locals_index(rbx);
   __ fld_s(faddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 
 void TemplateTable::dload() {
   transition(vtos, dtos);
   locals_index(rbx);
-  if (TaggedStackInterpreter) {
-    // Get double out of locals array, onto temp stack and load with
-    // float instruction into ST0
-    __ movl(rax, laddress(rbx));
-    __ movl(rdx, haddress(rbx));
-    __ push(rdx);  // push hi first
-    __ push(rax);
-    __ fld_d(Address(rsp, 0));
-    __ addptr(rsp, 2*wordSize);
-    debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
-  } else {
-    __ fld_d(daddress(rbx));
-  }
+  __ fld_d(daddress(rbx));
 }
 
 
@@ -511,7 +490,6 @@
   transition(vtos, atos);
   locals_index(rbx);
   __ movptr(rax, aaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagReference, rbx));
 }
 
 
@@ -527,7 +505,6 @@
   transition(vtos, itos);
   locals_index_wide(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 
@@ -536,7 +513,6 @@
   locals_index_wide(rbx);
   __ movptr(rax, laddress(rbx));
   NOT_LP64(__ movl(rdx, haddress(rbx)));
-  debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
 }
 
 
@@ -544,26 +520,13 @@
   transition(vtos, ftos);
   locals_index_wide(rbx);
   __ fld_s(faddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 
 void TemplateTable::wide_dload() {
   transition(vtos, dtos);
   locals_index_wide(rbx);
-  if (TaggedStackInterpreter) {
-    // Get double out of locals array, onto temp stack and load with
-    // float instruction into ST0
-    __ movl(rax, laddress(rbx));
-    __ movl(rdx, haddress(rbx));
-    __ push(rdx);  // push hi first
-    __ push(rax);
-    __ fld_d(Address(rsp, 0));
-    __ addl(rsp, 2*wordSize);
-    debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
-  } else {
-    __ fld_d(daddress(rbx));
-  }
+  __ fld_d(daddress(rbx));
 }
 
 
@@ -571,7 +534,6 @@
   transition(vtos, atos);
   locals_index_wide(rbx);
   __ movptr(rax, aaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagReference, rbx));
 }
 
 void TemplateTable::index_check(Register array, Register index) {
@@ -672,7 +634,6 @@
   // load index out of locals
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 
   // rdx: array
   index_check(rdx, rax);
@@ -695,7 +656,6 @@
 void TemplateTable::iload(int n) {
   transition(vtos, itos);
   __ movl(rax, iaddress(n));
-  debug_only(__ verify_local_tag(frame::TagValue, n));
 }
 
 
@@ -703,39 +663,24 @@
   transition(vtos, ltos);
   __ movptr(rax, laddress(n));
   NOT_LP64(__ movptr(rdx, haddress(n)));
-  debug_only(__ verify_local_tag(frame::TagCategory2, n));
 }
 
 
 void TemplateTable::fload(int n) {
   transition(vtos, ftos);
   __ fld_s(faddress(n));
-  debug_only(__ verify_local_tag(frame::TagValue, n));
 }
 
 
 void TemplateTable::dload(int n) {
   transition(vtos, dtos);
-  if (TaggedStackInterpreter) {
-    // Get double out of locals array, onto temp stack and load with
-    // float instruction into ST0
-    __ movl(rax, laddress(n));
-    __ movl(rdx, haddress(n));
-    __ push(rdx);  // push hi first
-    __ push(rax);
-    __ fld_d(Address(rsp, 0));
-    __ addptr(rsp, 2*wordSize);  // reset rsp
-    debug_only(__ verify_local_tag(frame::TagCategory2, n));
-  } else {
-    __ fld_d(daddress(n));
-  }
+  __ fld_d(daddress(n));
 }
 
 
 void TemplateTable::aload(int n) {
   transition(vtos, atos);
   __ movptr(rax, aaddress(n));
-  debug_only(__ verify_local_tag(frame::TagReference, n));
 }
 
 
@@ -809,7 +754,6 @@
   transition(itos, vtos);
   locals_index(rbx);
   __ movl(iaddress(rbx), rax);
-  __ tag_local(frame::TagValue, rbx);
 }
 
 
@@ -818,7 +762,6 @@
   locals_index(rbx);
   __ movptr(laddress(rbx), rax);
   NOT_LP64(__ movptr(haddress(rbx), rdx));
-  __ tag_local(frame::TagCategory2, rbx);
 }
 
 
@@ -826,34 +769,21 @@
   transition(ftos, vtos);
   locals_index(rbx);
   __ fstp_s(faddress(rbx));
-  __ tag_local(frame::TagValue, rbx);
 }
 
 
 void TemplateTable::dstore() {
   transition(dtos, vtos);
   locals_index(rbx);
-  if (TaggedStackInterpreter) {
-    // Store double on stack and reload into locals nonadjacently
-    __ subptr(rsp, 2 * wordSize);
-    __ fstp_d(Address(rsp, 0));
-    __ pop(rax);
-    __ pop(rdx);
-    __ movptr(laddress(rbx), rax);
-    __ movptr(haddress(rbx), rdx);
-    __ tag_local(frame::TagCategory2, rbx);
-  } else {
-    __ fstp_d(daddress(rbx));
-  }
+  __ fstp_d(daddress(rbx));
 }
 
 
 void TemplateTable::astore() {
   transition(vtos, vtos);
-  __ pop_ptr(rax, rdx);   // will need to pop tag too
+  __ pop_ptr(rax);
   locals_index(rbx);
   __ movptr(aaddress(rbx), rax);
-  __ tag_local(rdx, rbx);    // need to store same tag in local may be returnAddr
 }
 
 
@@ -862,7 +792,6 @@
   __ pop_i(rax);
   locals_index_wide(rbx);
   __ movl(iaddress(rbx), rax);
-  __ tag_local(frame::TagValue, rbx);
 }
 
 
@@ -872,7 +801,6 @@
   locals_index_wide(rbx);
   __ movptr(laddress(rbx), rax);
   NOT_LP64(__ movl(haddress(rbx), rdx));
-  __ tag_local(frame::TagCategory2, rbx);
 }
 
 
@@ -888,10 +816,9 @@
 
 void TemplateTable::wide_astore() {
   transition(vtos, vtos);
-  __ pop_ptr(rax, rdx);
+  __ pop_ptr(rax);
   locals_index_wide(rbx);
   __ movptr(aaddress(rbx), rax);
-  __ tag_local(rdx, rbx);
 }
 
 
@@ -990,7 +917,7 @@
 
   // Pop stack arguments
   __ bind(done);
-  __ addptr(rsp, 3 * Interpreter::stackElementSize());
+  __ addptr(rsp, 3 * Interpreter::stackElementSize);
 }
 
 
@@ -1024,7 +951,6 @@
 void TemplateTable::istore(int n) {
   transition(itos, vtos);
   __ movl(iaddress(n), rax);
-  __ tag_local(frame::TagValue, n);
 }
 
 
@@ -1032,58 +958,45 @@
   transition(ltos, vtos);
   __ movptr(laddress(n), rax);
   NOT_LP64(__ movptr(haddress(n), rdx));
-  __ tag_local(frame::TagCategory2, n);
 }
 
 
 void TemplateTable::fstore(int n) {
   transition(ftos, vtos);
   __ fstp_s(faddress(n));
-  __ tag_local(frame::TagValue, n);
 }
 
 
 void TemplateTable::dstore(int n) {
   transition(dtos, vtos);
-  if (TaggedStackInterpreter) {
-    __ subptr(rsp, 2 * wordSize);
-    __ fstp_d(Address(rsp, 0));
-    __ pop(rax);
-    __ pop(rdx);
-    __ movl(laddress(n), rax);
-    __ movl(haddress(n), rdx);
-    __ tag_local(frame::TagCategory2, n);
-  } else {
-    __ fstp_d(daddress(n));
-  }
+  __ fstp_d(daddress(n));
 }
 
 
 void TemplateTable::astore(int n) {
   transition(vtos, vtos);
-  __ pop_ptr(rax, rdx);
+  __ pop_ptr(rax);
   __ movptr(aaddress(n), rax);
-  __ tag_local(rdx, n);
 }
 
 
 void TemplateTable::pop() {
   transition(vtos, vtos);
-  __ addptr(rsp, Interpreter::stackElementSize());
+  __ addptr(rsp, Interpreter::stackElementSize);
 }
 
 
 void TemplateTable::pop2() {
   transition(vtos, vtos);
-  __ addptr(rsp, 2*Interpreter::stackElementSize());
+  __ addptr(rsp, 2*Interpreter::stackElementSize);
 }
 
 
 void TemplateTable::dup() {
   transition(vtos, vtos);
   // stack: ..., a
-  __ load_ptr_and_tag(0, rax, rdx);
-  __ push_ptr(rax, rdx);
+  __ load_ptr(0, rax);
+  __ push_ptr(rax);
   // stack: ..., a, a
 }
 
@@ -1091,11 +1004,11 @@
 void TemplateTable::dup_x1() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(0, rax, rdx);  // load b
-  __ load_ptr_and_tag(1, rcx, rbx);  // load a
-  __ store_ptr_and_tag(1, rax, rdx); // store b
-  __ store_ptr_and_tag(0, rcx, rbx); // store a
-  __ push_ptr(rax, rdx);             // push b
+  __ load_ptr( 0, rax);  // load b
+  __ load_ptr( 1, rcx);  // load a
+  __ store_ptr(1, rax);  // store b
+  __ store_ptr(0, rcx);  // store a
+  __ push_ptr(rax);      // push b
   // stack: ..., b, a, b
 }
 
@@ -1103,15 +1016,15 @@
 void TemplateTable::dup_x2() {
   transition(vtos, vtos);
   // stack: ..., a, b, c
-  __ load_ptr_and_tag(0, rax, rdx);  // load c
-  __ load_ptr_and_tag(2, rcx, rbx);  // load a
-  __ store_ptr_and_tag(2, rax, rdx); // store c in a
-  __ push_ptr(rax, rdx);             // push c
+  __ load_ptr( 0, rax);  // load c
+  __ load_ptr( 2, rcx);  // load a
+  __ store_ptr(2, rax);  // store c in a
+  __ push_ptr(rax);      // push c
   // stack: ..., c, b, c, c
-  __ load_ptr_and_tag(2, rax, rdx);  // load b
-  __ store_ptr_and_tag(2, rcx, rbx); // store a in b
+  __ load_ptr( 2, rax);  // load b
+  __ store_ptr(2, rcx);  // store a in b
   // stack: ..., c, a, c, c
-  __ store_ptr_and_tag(1, rax, rdx); // store b in c
+  __ store_ptr(1, rax);  // store b in c
   // stack: ..., c, a, b, c
 }
 
@@ -1119,10 +1032,10 @@
 void TemplateTable::dup2() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(1, rax, rdx);  // load a
-  __ push_ptr(rax, rdx);             // push a
-  __ load_ptr_and_tag(1, rax, rdx);  // load b
-  __ push_ptr(rax, rdx);             // push b
+  __ load_ptr(1, rax);  // load a
+  __ push_ptr(rax);     // push a
+  __ load_ptr(1, rax);  // load b
+  __ push_ptr(rax);     // push b
   // stack: ..., a, b, a, b
 }
 
@@ -1130,17 +1043,17 @@
 void TemplateTable::dup2_x1() {
   transition(vtos, vtos);
   // stack: ..., a, b, c
-  __ load_ptr_and_tag(0, rcx, rbx);  // load c
-  __ load_ptr_and_tag(1, rax, rdx);  // load b
-  __ push_ptr(rax, rdx);             // push b
-  __ push_ptr(rcx, rbx);             // push c
+  __ load_ptr( 0, rcx);  // load c
+  __ load_ptr( 1, rax);  // load b
+  __ push_ptr(rax);      // push b
+  __ push_ptr(rcx);      // push c
   // stack: ..., a, b, c, b, c
-  __ store_ptr_and_tag(3, rcx, rbx); // store c in b
+  __ store_ptr(3, rcx);  // store c in b
   // stack: ..., a, c, c, b, c
-  __ load_ptr_and_tag(4, rcx, rbx);  // load a
-  __ store_ptr_and_tag(2, rcx, rbx); // store a in 2nd c
+  __ load_ptr( 4, rcx);  // load a
+  __ store_ptr(2, rcx);  // store a in 2nd c
   // stack: ..., a, c, a, b, c
-  __ store_ptr_and_tag(4, rax, rdx); // store b in a
+  __ store_ptr(4, rax);  // store b in a
   // stack: ..., b, c, a, b, c
   // stack: ..., b, c, a, b, c
 }
@@ -1149,19 +1062,19 @@
 void TemplateTable::dup2_x2() {
   transition(vtos, vtos);
   // stack: ..., a, b, c, d
-  __ load_ptr_and_tag(0, rcx, rbx);  // load d
-  __ load_ptr_and_tag(1, rax, rdx);  // load c
-  __ push_ptr(rax, rdx);             // push c
-  __ push_ptr(rcx, rbx);             // push d
+  __ load_ptr( 0, rcx);  // load d
+  __ load_ptr( 1, rax);  // load c
+  __ push_ptr(rax);      // push c
+  __ push_ptr(rcx);      // push d
   // stack: ..., a, b, c, d, c, d
-  __ load_ptr_and_tag(4, rax, rdx);  // load b
-  __ store_ptr_and_tag(2, rax, rdx); // store b in d
-  __ store_ptr_and_tag(4, rcx, rbx); // store d in b
+  __ load_ptr( 4, rax);  // load b
+  __ store_ptr(2, rax);  // store b in d
+  __ store_ptr(4, rcx);  // store d in b
   // stack: ..., a, d, c, b, c, d
-  __ load_ptr_and_tag(5, rcx, rbx);  // load a
-  __ load_ptr_and_tag(3, rax, rdx);  // load c
-  __ store_ptr_and_tag(3, rcx, rbx); // store a in c
-  __ store_ptr_and_tag(5, rax, rdx); // store c in a
+  __ load_ptr( 5, rcx);  // load a
+  __ load_ptr( 3, rax);  // load c
+  __ store_ptr(3, rcx);  // store a in c
+  __ store_ptr(5, rax);  // store c in a
   // stack: ..., c, d, a, b, c, d
   // stack: ..., c, d, a, b, c, d
 }
@@ -1170,10 +1083,10 @@
 void TemplateTable::swap() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(1, rcx, rbx);  // load a
-  __ load_ptr_and_tag(0, rax, rdx);  // load b
-  __ store_ptr_and_tag(0, rcx, rbx); // store a in b
-  __ store_ptr_and_tag(1, rax, rdx); // store b in a
+  __ load_ptr( 1, rcx);  // load a
+  __ load_ptr( 0, rax);  // load b
+  __ store_ptr(0, rcx);  // store a in b
+  __ store_ptr(1, rax);  // store b in a
   // stack: ..., b, a
 }
 
@@ -1181,12 +1094,12 @@
 void TemplateTable::iop2(Operation op) {
   transition(itos, itos);
   switch (op) {
-    case add  :                    __ pop_i(rdx); __ addl (rax, rdx); break;
+    case add  :                   __ pop_i(rdx); __ addl (rax, rdx); break;
     case sub  : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
-    case mul  :                    __ pop_i(rdx); __ imull(rax, rdx); break;
-    case _and :                    __ pop_i(rdx); __ andl (rax, rdx); break;
-    case _or  :                    __ pop_i(rdx); __ orl  (rax, rdx); break;
-    case _xor :                    __ pop_i(rdx); __ xorl (rax, rdx); break;
+    case mul  :                   __ pop_i(rdx); __ imull(rax, rdx); break;
+    case _and :                   __ pop_i(rdx); __ andl (rax, rdx); break;
+    case _or  :                   __ pop_i(rdx); __ orl  (rax, rdx); break;
+    case _xor :                   __ pop_i(rdx); __ xorl (rax, rdx); break;
     case shl  : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax);      break; // implicit masking of lower 5 bits by Intel shift instr.
     case shr  : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax);      break; // implicit masking of lower 5 bits by Intel shift instr.
     case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax);      break; // implicit masking of lower 5 bits by Intel shift instr.
@@ -1199,13 +1112,13 @@
   transition(ltos, ltos);
   __ pop_l(rbx, rcx);
   switch (op) {
-    case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
-    case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
-               __ mov(rax, rbx); __ mov(rdx, rcx); break;
-    case _and: __ andl(rax, rbx); __ andl(rdx, rcx); break;
-    case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
-    case _xor: __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
-    default : ShouldNotReachHere();
+    case add  : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
+    case sub  : __ subl(rbx, rax); __ sbbl(rcx, rdx);
+                __ mov (rax, rbx); __ mov (rdx, rcx); break;
+    case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
+    case _or  : __ orl (rax, rbx); __ orl (rdx, rcx); break;
+    case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
+    default   : ShouldNotReachHere();
   }
 }
 
@@ -1299,7 +1212,6 @@
 
 void TemplateTable::fop2(Operation op) {
   transition(ftos, ftos);
-  __ pop_ftos_to_rsp();  // pop ftos into rsp
   switch (op) {
     case add: __ fadd_s (at_rsp());                break;
     case sub: __ fsubr_s(at_rsp());                break;
@@ -1315,7 +1227,6 @@
 
 void TemplateTable::dop2(Operation op) {
   transition(dtos, dtos);
-  __ pop_dtos_to_rsp();  // pop dtos into rsp
 
   switch (op) {
     case add: __ fadd_d (at_rsp());                break;
@@ -1557,10 +1468,8 @@
 
 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
   if (is_float) {
-    __ pop_ftos_to_rsp();
     __ fld_s(at_rsp());
   } else {
-    __ pop_dtos_to_rsp();
     __ fld_d(at_rsp());
     __ pop(rdx);
   }
@@ -2854,7 +2763,6 @@
   transition(vtos, state);
   // get receiver
   __ movptr(rax, aaddress(0));
-  debug_only(__ verify_local_tag(frame::TagReference, 0));
   // access constant pool cache
   __ get_cache_and_index_at_bcp(rcx, rdx, 2);
   __ movptr(rbx, Address(rcx,
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed May 19 10:22:39 2010 -0700
@@ -58,7 +58,7 @@
 }
 
 static inline Address iaddress(Register r) {
-  return Address(r14, r, Address::times_8, Interpreter::value_offset_in_bytes());
+  return Address(r14, r, Address::times_8);
 }
 
 static inline Address laddress(Register r) {
@@ -418,7 +418,6 @@
 void TemplateTable::locals_index(Register reg, int offset) {
   __ load_unsigned_byte(reg, at_bcp(offset));
   __ negptr(reg);
-  if (TaggedStackInterpreter) __ shlptr(reg, 1);  // index = index*2
 }
 
 void TemplateTable::iload() {
@@ -460,53 +459,45 @@
   // Get the local value into tos
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::fast_iload2() {
   transition(vtos, itos);
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
   __ push(itos);
   locals_index(rbx, 3);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::fast_iload() {
   transition(vtos, itos);
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::lload() {
   transition(vtos, ltos);
   locals_index(rbx);
   __ movq(rax, laddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
 }
 
 void TemplateTable::fload() {
   transition(vtos, ftos);
   locals_index(rbx);
   __ movflt(xmm0, faddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::dload() {
   transition(vtos, dtos);
   locals_index(rbx);
   __ movdbl(xmm0, daddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
 }
 
 void TemplateTable::aload() {
   transition(vtos, atos);
   locals_index(rbx);
   __ movptr(rax, aaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagReference, rbx));
 }
 
 void TemplateTable::locals_index_wide(Register reg) {
@@ -514,42 +505,36 @@
   __ bswapl(reg);
   __ shrl(reg, 16);
   __ negptr(reg);
-  if (TaggedStackInterpreter) __ shlptr(reg, 1);  // index = index*2
 }
 
 void TemplateTable::wide_iload() {
   transition(vtos, itos);
   locals_index_wide(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::wide_lload() {
   transition(vtos, ltos);
   locals_index_wide(rbx);
   __ movq(rax, laddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
 }
 
 void TemplateTable::wide_fload() {
   transition(vtos, ftos);
   locals_index_wide(rbx);
   __ movflt(xmm0, faddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 }
 
 void TemplateTable::wide_dload() {
   transition(vtos, dtos);
   locals_index_wide(rbx);
   __ movdbl(xmm0, daddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
 }
 
 void TemplateTable::wide_aload() {
   transition(vtos, atos);
   locals_index_wide(rbx);
   __ movptr(rax, aaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagReference, rbx));
 }
 
 void TemplateTable::index_check(Register array, Register index) {
@@ -654,7 +639,6 @@
   // load index out of locals
   locals_index(rbx);
   __ movl(rax, iaddress(rbx));
-  debug_only(__ verify_local_tag(frame::TagValue, rbx));
 
   // eax: index
   // rdx: array
@@ -681,31 +665,26 @@
 void TemplateTable::iload(int n) {
   transition(vtos, itos);
   __ movl(rax, iaddress(n));
-  debug_only(__ verify_local_tag(frame::TagValue, n));
 }
 
 void TemplateTable::lload(int n) {
   transition(vtos, ltos);
   __ movq(rax, laddress(n));
-  debug_only(__ verify_local_tag(frame::TagCategory2, n));
 }
 
 void TemplateTable::fload(int n) {
   transition(vtos, ftos);
   __ movflt(xmm0, faddress(n));
-  debug_only(__ verify_local_tag(frame::TagValue, n));
 }
 
 void TemplateTable::dload(int n) {
   transition(vtos, dtos);
   __ movdbl(xmm0, daddress(n));
-  debug_only(__ verify_local_tag(frame::TagCategory2, n));
 }
 
 void TemplateTable::aload(int n) {
   transition(vtos, atos);
   __ movptr(rax, aaddress(n));
-  debug_only(__ verify_local_tag(frame::TagReference, n));
 }
 
 void TemplateTable::aload_0() {
@@ -791,36 +770,31 @@
   transition(itos, vtos);
   locals_index(rbx);
   __ movl(iaddress(rbx), rax);
-  __ tag_local(frame::TagValue, rbx);
 }
 
 void TemplateTable::lstore() {
   transition(ltos, vtos);
   locals_index(rbx);
   __ movq(laddress(rbx), rax);
-  __ tag_local(frame::TagCategory2, rbx);
 }
 
 void TemplateTable::fstore() {
   transition(ftos, vtos);
   locals_index(rbx);
   __ movflt(faddress(rbx), xmm0);
-  __ tag_local(frame::TagValue, rbx);
 }
 
 void TemplateTable::dstore() {
   transition(dtos, vtos);
   locals_index(rbx);
   __ movdbl(daddress(rbx), xmm0);
-  __ tag_local(frame::TagCategory2, rbx);
 }
 
 void TemplateTable::astore() {
   transition(vtos, vtos);
-  __ pop_ptr(rax, rdx);    // will need to pop tag too
+  __ pop_ptr(rax);
   locals_index(rbx);
   __ movptr(aaddress(rbx), rax);
-  __ tag_local(rdx, rbx);  // store tag from stack, might be returnAddr
 }
 
 void TemplateTable::wide_istore() {
@@ -828,7 +802,6 @@
   __ pop_i();
   locals_index_wide(rbx);
   __ movl(iaddress(rbx), rax);
-  __ tag_local(frame::TagValue, rbx);
 }
 
 void TemplateTable::wide_lstore() {
@@ -836,7 +809,6 @@
   __ pop_l();
   locals_index_wide(rbx);
   __ movq(laddress(rbx), rax);
-  __ tag_local(frame::TagCategory2, rbx);
 }
 
 void TemplateTable::wide_fstore() {
@@ -844,7 +816,6 @@
   __ pop_f();
   locals_index_wide(rbx);
   __ movflt(faddress(rbx), xmm0);
-  __ tag_local(frame::TagValue, rbx);
 }
 
 void TemplateTable::wide_dstore() {
@@ -852,15 +823,13 @@
   __ pop_d();
   locals_index_wide(rbx);
   __ movdbl(daddress(rbx), xmm0);
-  __ tag_local(frame::TagCategory2, rbx);
 }
 
 void TemplateTable::wide_astore() {
   transition(vtos, vtos);
-  __ pop_ptr(rax, rdx);    // will need to pop tag too
+  __ pop_ptr(rax);
   locals_index_wide(rbx);
   __ movptr(aaddress(rbx), rax);
-  __ tag_local(rdx, rbx);  // store tag from stack, might be returnAddr
 }
 
 void TemplateTable::iastore() {
@@ -972,7 +941,7 @@
 
   // Pop stack arguments
   __ bind(done);
-  __ addptr(rsp, 3 * Interpreter::stackElementSize());
+  __ addptr(rsp, 3 * Interpreter::stackElementSize);
 }
 
 void TemplateTable::bastore() {
@@ -1010,130 +979,125 @@
 void TemplateTable::istore(int n) {
   transition(itos, vtos);
   __ movl(iaddress(n), rax);
-  __ tag_local(frame::TagValue, n);
 }
 
 void TemplateTable::lstore(int n) {
   transition(ltos, vtos);
   __ movq(laddress(n), rax);
-  __ tag_local(frame::TagCategory2, n);
 }
 
 void TemplateTable::fstore(int n) {
   transition(ftos, vtos);
   __ movflt(faddress(n), xmm0);
-  __ tag_local(frame::TagValue, n);
 }
 
 void TemplateTable::dstore(int n) {
   transition(dtos, vtos);
   __ movdbl(daddress(n), xmm0);
-  __ tag_local(frame::TagCategory2, n);
 }
 
 void TemplateTable::astore(int n) {
   transition(vtos, vtos);
-  __ pop_ptr(rax, rdx);
+  __ pop_ptr(rax);
   __ movptr(aaddress(n), rax);
-  __ tag_local(rdx, n);
 }
 
 void TemplateTable::pop() {
   transition(vtos, vtos);
-  __ addptr(rsp, Interpreter::stackElementSize());
+  __ addptr(rsp, Interpreter::stackElementSize);
 }
 
 void TemplateTable::pop2() {
   transition(vtos, vtos);
-  __ addptr(rsp, 2 * Interpreter::stackElementSize());
+  __ addptr(rsp, 2 * Interpreter::stackElementSize);
 }
 
 void TemplateTable::dup() {
   transition(vtos, vtos);
-  __ load_ptr_and_tag(0, rax, rdx);
-  __ push_ptr(rax, rdx);
+  __ load_ptr(0, rax);
+  __ push_ptr(rax);
   // stack: ..., a, a
 }
 
 void TemplateTable::dup_x1() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(0, rax, rdx);  // load b
-  __ load_ptr_and_tag(1, rcx, rbx);  // load a
-  __ store_ptr_and_tag(1, rax, rdx); // store b
-  __ store_ptr_and_tag(0, rcx, rbx); // store a
-  __ push_ptr(rax, rdx);             // push b
+  __ load_ptr( 0, rax);  // load b
+  __ load_ptr( 1, rcx);  // load a
+  __ store_ptr(1, rax);  // store b
+  __ store_ptr(0, rcx);  // store a
+  __ push_ptr(rax);      // push b
   // stack: ..., b, a, b
 }
 
 void TemplateTable::dup_x2() {
   transition(vtos, vtos);
   // stack: ..., a, b, c
-  __ load_ptr_and_tag(0, rax, rdx);  // load c
-  __ load_ptr_and_tag(2, rcx, rbx);  // load a
-  __ store_ptr_and_tag(2, rax, rdx); // store c in a
-  __ push_ptr(rax, rdx);             // push c
+  __ load_ptr( 0, rax);  // load c
+  __ load_ptr( 2, rcx);  // load a
+  __ store_ptr(2, rax);  // store c in a
+  __ push_ptr(rax);      // push c
   // stack: ..., c, b, c, c
-  __ load_ptr_and_tag(2, rax, rdx);  // load b
-  __ store_ptr_and_tag(2, rcx, rbx); // store a in b
+  __ load_ptr( 2, rax);  // load b
+  __ store_ptr(2, rcx);  // store a in b
   // stack: ..., c, a, c, c
-  __ store_ptr_and_tag(1, rax, rdx); // store b in c
+  __ store_ptr(1, rax);  // store b in c
   // stack: ..., c, a, b, c
 }
 
 void TemplateTable::dup2() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(1, rax, rdx);  // load a
-  __ push_ptr(rax, rdx);             // push a
-  __ load_ptr_and_tag(1, rax, rdx);  // load b
-  __ push_ptr(rax, rdx);             // push b
+  __ load_ptr(1, rax);  // load a
+  __ push_ptr(rax);     // push a
+  __ load_ptr(1, rax);  // load b
+  __ push_ptr(rax);     // push b
   // stack: ..., a, b, a, b
 }
 
 void TemplateTable::dup2_x1() {
   transition(vtos, vtos);
   // stack: ..., a, b, c
-  __ load_ptr_and_tag(0, rcx, rbx);  // load c
-  __ load_ptr_and_tag(1, rax, rdx);  // load b
-  __ push_ptr(rax, rdx);             // push b
-  __ push_ptr(rcx, rbx);             // push c
+  __ load_ptr( 0, rcx);  // load c
+  __ load_ptr( 1, rax);  // load b
+  __ push_ptr(rax);      // push b
+  __ push_ptr(rcx);      // push c
   // stack: ..., a, b, c, b, c
-  __ store_ptr_and_tag(3, rcx, rbx); // store c in b
+  __ store_ptr(3, rcx);  // store c in b
   // stack: ..., a, c, c, b, c
-  __ load_ptr_and_tag(4, rcx, rbx);  // load a
-  __ store_ptr_and_tag(2, rcx, rbx); // store a in 2nd c
+  __ load_ptr( 4, rcx);  // load a
+  __ store_ptr(2, rcx);  // store a in 2nd c
   // stack: ..., a, c, a, b, c
-  __ store_ptr_and_tag(4, rax, rdx); // store b in a
+  __ store_ptr(4, rax);  // store b in a
   // stack: ..., b, c, a, b, c
 }
 
 void TemplateTable::dup2_x2() {
   transition(vtos, vtos);
   // stack: ..., a, b, c, d
-  __ load_ptr_and_tag(0, rcx, rbx);  // load d
-  __ load_ptr_and_tag(1, rax, rdx);  // load c
-  __ push_ptr(rax, rdx);             // push c
-  __ push_ptr(rcx, rbx);             // push d
+  __ load_ptr( 0, rcx);  // load d
+  __ load_ptr( 1, rax);  // load c
+  __ push_ptr(rax);      // push c
+  __ push_ptr(rcx);      // push d
   // stack: ..., a, b, c, d, c, d
-  __ load_ptr_and_tag(4, rax, rdx);  // load b
-  __ store_ptr_and_tag(2, rax, rdx); // store b in d
-  __ store_ptr_and_tag(4, rcx, rbx); // store d in b
+  __ load_ptr( 4, rax);  // load b
+  __ store_ptr(2, rax);  // store b in d
+  __ store_ptr(4, rcx);  // store d in b
   // stack: ..., a, d, c, b, c, d
-  __ load_ptr_and_tag(5, rcx, rbx);  // load a
-  __ load_ptr_and_tag(3, rax, rdx);  // load c
-  __ store_ptr_and_tag(3, rcx, rbx); // store a in c
-  __ store_ptr_and_tag(5, rax, rdx); // store c in a
+  __ load_ptr( 5, rcx);  // load a
+  __ load_ptr( 3, rax);  // load c
+  __ store_ptr(3, rcx);  // store a in c
+  __ store_ptr(5, rax);  // store c in a
   // stack: ..., c, d, a, b, c, d
 }
 
 void TemplateTable::swap() {
   transition(vtos, vtos);
   // stack: ..., a, b
-  __ load_ptr_and_tag(1, rcx, rbx);  // load a
-  __ load_ptr_and_tag(0, rax, rdx);  // load b
-  __ store_ptr_and_tag(0, rcx, rbx); // store a in b
-  __ store_ptr_and_tag(1, rax, rdx); // store b in a
+  __ load_ptr( 1, rcx);  // load a
+  __ load_ptr( 0, rax);  // load b
+  __ store_ptr(0, rcx);  // store a in b
+  __ store_ptr(1, rax);  // store b in a
   // stack: ..., b, a
 }
 
@@ -1156,12 +1120,12 @@
 void TemplateTable::lop2(Operation op) {
   transition(ltos, ltos);
   switch (op) {
-  case add  :                    __ pop_l(rdx); __ addptr (rax, rdx); break;
-  case sub  : __ mov(rdx, rax);  __ pop_l(rax); __ subptr (rax, rdx); break;
-  case _and :                    __ pop_l(rdx); __ andptr (rax, rdx); break;
-  case _or  :                    __ pop_l(rdx); __ orptr  (rax, rdx); break;
-  case _xor :                    __ pop_l(rdx); __ xorptr (rax, rdx); break;
-  default : ShouldNotReachHere();
+  case add  :                    __ pop_l(rdx); __ addptr(rax, rdx); break;
+  case sub  : __ mov(rdx, rax);  __ pop_l(rax); __ subptr(rax, rdx); break;
+  case _and :                    __ pop_l(rdx); __ andptr(rax, rdx); break;
+  case _or  :                    __ pop_l(rdx); __ orptr (rax, rdx); break;
+  case _xor :                    __ pop_l(rdx); __ xorptr(rax, rdx); break;
+  default   : ShouldNotReachHere();
   }
 }
 
@@ -1250,7 +1214,7 @@
   switch (op) {
   case add:
     __ addss(xmm0, at_rsp());
-    __ addptr(rsp, Interpreter::stackElementSize());
+    __ addptr(rsp, Interpreter::stackElementSize);
     break;
   case sub:
     __ movflt(xmm1, xmm0);
@@ -1259,7 +1223,7 @@
     break;
   case mul:
     __ mulss(xmm0, at_rsp());
-    __ addptr(rsp, Interpreter::stackElementSize());
+    __ addptr(rsp, Interpreter::stackElementSize);
     break;
   case div:
     __ movflt(xmm1, xmm0);
@@ -1282,7 +1246,7 @@
   switch (op) {
   case add:
     __ addsd(xmm0, at_rsp());
-    __ addptr(rsp, 2 * Interpreter::stackElementSize());
+    __ addptr(rsp, 2 * Interpreter::stackElementSize);
     break;
   case sub:
     __ movdbl(xmm1, xmm0);
@@ -1291,7 +1255,7 @@
     break;
   case mul:
     __ mulsd(xmm0, at_rsp());
-    __ addptr(rsp, 2 * Interpreter::stackElementSize());
+    __ addptr(rsp, 2 * Interpreter::stackElementSize);
     break;
   case div:
     __ movdbl(xmm1, xmm0);
@@ -2782,7 +2746,6 @@
 
   // get receiver
   __ movptr(rax, aaddress(0));
-  debug_only(__ verify_local_tag(frame::TagReference, 0));
   // access constant pool cache
   __ get_cache_and_index_at_bcp(rcx, rdx, 2);
   __ movptr(rbx,
@@ -2858,7 +2821,6 @@
   if (load_receiver) {
     __ movl(recv, flags);
     __ andl(recv, 0xFF);
-    if (TaggedStackInterpreter) __ shll(recv, 1);  // index*2
     Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
     __ movptr(recv, recv_addr);
     __ verify_oop(recv);
@@ -3610,13 +3572,11 @@
   __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
   // last dim is on top of stack; we want address of first one:
   // first_addr = last_addr + (ndims - 1) * wordSize
-  if (TaggedStackInterpreter) __ shll(rax, 1);  // index*2
   __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
   call_VM(rax,
           CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
           c_rarg1);
   __ load_unsigned_byte(rbx, at_bcp(3));
-  if (TaggedStackInterpreter) __ shll(rbx, 1);  // index*2
   __ lea(rsp, Address(rsp, rbx, Address::times_8));
 }
 #endif // !CC_INTERP
--- a/src/cpu/x86/vm/x86_32.ad	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/x86_32.ad	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+// Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -6272,6 +6272,30 @@
   ins_pipe( ialu_reg_reg);
 %}
 
+instruct bytes_reverse_unsigned_short(eRegI dst) %{
+  match(Set dst (ReverseBytesUS dst));
+
+  format %{ "BSWAP  $dst\n\t" 
+            "SHR    $dst,16\n\t" %}
+  ins_encode %{
+    __ bswapl($dst$$Register);
+    __ shrl($dst$$Register, 16); 
+  %}
+  ins_pipe( ialu_reg );
+%}
+
+instruct bytes_reverse_short(eRegI dst) %{
+  match(Set dst (ReverseBytesS dst));
+
+  format %{ "BSWAP  $dst\n\t" 
+            "SAR    $dst,16\n\t" %}
+  ins_encode %{
+    __ bswapl($dst$$Register);
+    __ sarl($dst$$Register, 16); 
+  %}
+  ins_pipe( ialu_reg );
+%}
+
 
 //---------- Zeros Count Instructions ------------------------------------------
 
--- a/src/cpu/x86/vm/x86_64.ad	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/x86/vm/x86_64.ad	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+// Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -7371,6 +7371,30 @@
   ins_pipe( ialu_reg);
 %}
 
+instruct bytes_reverse_unsigned_short(rRegI dst) %{
+  match(Set dst (ReverseBytesUS dst));
+
+  format %{ "bswapl  $dst\n\t" 
+            "shrl    $dst,16\n\t" %}
+  ins_encode %{
+    __ bswapl($dst$$Register);
+    __ shrl($dst$$Register, 16); 
+  %}
+  ins_pipe( ialu_reg );
+%}
+
+instruct bytes_reverse_short(rRegI dst) %{
+  match(Set dst (ReverseBytesS dst));
+
+  format %{ "bswapl  $dst\n\t" 
+            "sar     $dst,16\n\t" %}
+  ins_encode %{
+    __ bswapl($dst$$Register);
+    __ sarl($dst$$Register, 16); 
+  %}
+  ins_pipe( ialu_reg );
+%}
+
 instruct loadI_reversed(rRegI dst, memory src) %{
   match(Set dst (ReverseBytesI (LoadI src)));
 
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed May 19 10:22:39 2010 -0700
@@ -37,27 +37,18 @@
   thread->reset_last_Java_frame();              \
   fixup_after_potential_safepoint()
 
-void CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) {
+int CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) {
   JavaThread *thread = (JavaThread *) THREAD;
-  ZeroStack *stack = thread->zero_stack();
-
-  // Adjust the caller's stack frame to accomodate any additional
-  // local variables we have contiguously with our parameters.
-  int extra_locals = method->max_locals() - method->size_of_parameters();
-  if (extra_locals > 0) {
-    if (extra_locals > stack->available_words()) {
-      Unimplemented();
-    }
-    for (int i = 0; i < extra_locals; i++)
-      stack->push(0);
-  }
 
   // Allocate and initialize our frame.
-  InterpreterFrame *frame = InterpreterFrame::build(stack, method, thread);
+  InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0);
   thread->push_zero_frame(frame);
 
   // Execute those bytecodes!
   main_loop(0, THREAD);
+
+  // No deoptimized frames on the stack
+  return 0;
 }
 
 void CppInterpreter::main_loop(int recurse, TRAPS) {
@@ -76,12 +67,6 @@
   intptr_t *result = NULL;
   int result_slots = 0;
 
-  // Check we're not about to run out of stack
-  if (stack_overflow_imminent(thread)) {
-    CALL_VM_NOCHECK(InterpreterRuntime::throw_StackOverflowError(thread));
-    goto unwind_and_return;
-  }
-
   while (true) {
     // We can set up the frame anchor with everything we want at
     // this point as we are thread_in_Java and no safepoints can
@@ -123,9 +108,9 @@
       int monitor_words = frame::interpreter_frame_monitor_size();
 
       // Allocate the space
-      if (monitor_words > stack->available_words()) {
-        Unimplemented();
-      }
+      stack->overflow_check(monitor_words, THREAD);
+      if (HAS_PENDING_EXCEPTION)
+        break;
       stack->alloc(monitor_words * wordSize);
 
       // Move the expression stack contents
@@ -172,8 +157,6 @@
     }
   }
 
- unwind_and_return:
-
   // Unwind the current frame
   thread->pop_zero_frame();
 
@@ -185,7 +168,7 @@
     stack->push(result[-i]);
 }
 
-void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
+int CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
   // Make sure method is native and not abstract
   assert(method->is_native() && !method->is_abstract(), "should be");
 
@@ -193,17 +176,11 @@
   ZeroStack *stack = thread->zero_stack();
 
   // Allocate and initialize our frame
-  InterpreterFrame *frame = InterpreterFrame::build(stack, method, thread);
+  InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0);
   thread->push_zero_frame(frame);
   interpreterState istate = frame->interpreter_state();
   intptr_t *locals = istate->locals();
 
-  // Check we're not about to run out of stack
-  if (stack_overflow_imminent(thread)) {
-    CALL_VM_NOCHECK(InterpreterRuntime::throw_StackOverflowError(thread));
-    goto unwind_and_return;
-  }
-
   // Update the invocation counter
   if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
     InvocationCounter *counter = method->invocation_counter();
@@ -264,9 +241,10 @@
   assert(function != NULL, "should be set if signature handler is");
 
   // Build the argument list
-  if (handler->argument_count() * 2 > stack->available_words()) {
-    Unimplemented();
-  }
+  stack->overflow_check(handler->argument_count() * 2, THREAD);
+  if (HAS_PENDING_EXCEPTION)
+    goto unlock_unwind_and_return;
+
   void **arguments;
   void *mirror; {
     arguments =
@@ -455,25 +433,26 @@
       ShouldNotReachHere();
     }
   }
+
+  // No deoptimized frames on the stack
+  return 0;
 }
 
-void CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
+int CppInterpreter::accessor_entry(methodOop method, intptr_t UNUSED, TRAPS) {
   JavaThread *thread = (JavaThread *) THREAD;
   ZeroStack *stack = thread->zero_stack();
   intptr_t *locals = stack->sp();
 
   // Drop into the slow path if we need a safepoint check
   if (SafepointSynchronize::do_call_back()) {
-    normal_entry(method, 0, THREAD);
-    return;
+    return normal_entry(method, 0, THREAD);
   }
 
   // Load the object pointer and drop into the slow path
   // if we have a NullPointerException
   oop object = LOCALS_OBJECT(0);
   if (object == NULL) {
-    normal_entry(method, 0, THREAD);
-    return;
+    return normal_entry(method, 0, THREAD);
   }
 
   // Read the field index from the bytecode, which looks like this:
@@ -495,17 +474,14 @@
   constantPoolCacheOop cache = method->constants()->cache();
   ConstantPoolCacheEntry* entry = cache->entry_at(index);
   if (!entry->is_resolved(Bytecodes::_getfield)) {
-    normal_entry(method, 0, THREAD);
-    return;
+    return normal_entry(method, 0, THREAD);
   }
 
   // Get the result and push it onto the stack
   switch (entry->flag_state()) {
   case ltos:
   case dtos:
-    if (stack->available_words() < 1) {
-      Unimplemented();
-    }
+    stack->overflow_check(1, CHECK_0);
     stack->alloc(wordSize);
     break;
   }
@@ -585,55 +561,51 @@
       ShouldNotReachHere();
     }
   }
+
+  // No deoptimized frames on the stack
+  return 0;
 }
 
-void CppInterpreter::empty_entry(methodOop method, intptr_t UNUSED, TRAPS) {
+int CppInterpreter::empty_entry(methodOop method, intptr_t UNUSED, TRAPS) {
   JavaThread *thread = (JavaThread *) THREAD;
   ZeroStack *stack = thread->zero_stack();
 
   // Drop into the slow path if we need a safepoint check
   if (SafepointSynchronize::do_call_back()) {
-    normal_entry(method, 0, THREAD);
-    return;
+    return normal_entry(method, 0, THREAD);
   }
 
   // Pop our parameters
   stack->set_sp(stack->sp() + method->size_of_parameters());
+
+  // No deoptimized frames on the stack
+  return 0;
 }
 
-bool CppInterpreter::stack_overflow_imminent(JavaThread *thread) {
-  // How is the ABI stack?
-  address stack_top = thread->stack_base() - thread->stack_size();
-  int free_stack = os::current_stack_pointer() - stack_top;
-  if (free_stack < StackShadowPages * os::vm_page_size()) {
-    return true;
-  }
+InterpreterFrame *InterpreterFrame::build(const methodOop method, TRAPS) {
+  JavaThread *thread = (JavaThread *) THREAD;
+  ZeroStack *stack = thread->zero_stack();
+
+  // Calculate the size of the frame we'll build, including
+  // any adjustments to the caller's frame that we'll make.
+  int extra_locals  = 0;
+  int monitor_words = 0;
+  int stack_words   = 0;
 
-  // How is the Zero stack?
-  // Throwing a StackOverflowError involves a VM call, which means
-  // we need a frame on the stack.  We should be checking here to
-  // ensure that methods we call have enough room to install the
-  // largest possible frame, but that's more than twice the size
-  // of the entire Zero stack we get by default, so we just check
-  // we have *some* space instead...
-  free_stack = thread->zero_stack()->available_words() * wordSize;
-  if (free_stack < StackShadowPages * os::vm_page_size()) {
-    return true;
+  if (!method->is_native()) {
+    extra_locals = method->max_locals() - method->size_of_parameters();
+    stack_words  = method->max_stack();
   }
+  if (method->is_synchronized()) {
+    monitor_words = frame::interpreter_frame_monitor_size();
+  }
+  stack->overflow_check(
+    extra_locals + header_words + monitor_words + stack_words, CHECK_NULL);
 
-  return false;
-}
-
-InterpreterFrame *InterpreterFrame::build(ZeroStack*       stack,
-                                          const methodOop  method,
-                                          JavaThread*      thread) {
-  int monitor_words =
-    method->is_synchronized() ? frame::interpreter_frame_monitor_size() : 0;
-  int stack_words = method->is_native() ? 0 : method->max_stack();
-
-  if (header_words + monitor_words + stack_words > stack->available_words()) {
-    Unimplemented();
-  }
+  // Adjust the caller's stack frame to accomodate any additional
+  // local variables we have contiguously with our parameters.
+  for (int i = 0; i < extra_locals; i++)
+    stack->push(0);
 
   intptr_t *locals;
   if (method->is_native())
@@ -812,14 +784,13 @@
 
 // Deoptimization helpers
 
-InterpreterFrame *InterpreterFrame::build(ZeroStack* stack, int size) {
+InterpreterFrame *InterpreterFrame::build(int size, TRAPS) {
+  ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
+
   int size_in_words = size >> LogBytesPerWord;
   assert(size_in_words * wordSize == size, "unaligned");
   assert(size_in_words >= header_words, "too small");
-
-  if (size_in_words > stack->available_words()) {
-    Unimplemented();
-  }
+  stack->overflow_check(size_in_words, CHECK_NULL);
 
   stack->push(0); // next_frame, filled in later
   intptr_t *fp = stack->sp();
@@ -870,7 +841,7 @@
   int callee_extra_locals = callee_locals - callee_param_count;
 
   if (interpreter_frame) {
-    intptr_t *locals        = interpreter_frame->sp() + method->max_locals();
+    intptr_t *locals        = interpreter_frame->fp() + method->max_locals();
     interpreterState istate = interpreter_frame->get_interpreterState();
     intptr_t *monitor_base  = (intptr_t*) istate;
     intptr_t *stack_base    = monitor_base - monitor_words;
--- a/src/cpu/zero/vm/cppInterpreter_zero.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.hpp	Wed May 19 10:22:39 2010 -0700
@@ -29,19 +29,15 @@
 
  public:
   // Method entries
-  static void normal_entry(methodOop method, intptr_t UNUSED, TRAPS);
-  static void native_entry(methodOop method, intptr_t UNUSED, TRAPS);
-  static void accessor_entry(methodOop method, intptr_t UNUSED, TRAPS);
-  static void empty_entry(methodOop method, intptr_t UNUSED, TRAPS);
+  static int normal_entry(methodOop method, intptr_t UNUSED, TRAPS);
+  static int native_entry(methodOop method, intptr_t UNUSED, TRAPS);
+  static int accessor_entry(methodOop method, intptr_t UNUSED, TRAPS);
+  static int empty_entry(methodOop method, intptr_t UNUSED, TRAPS);
 
  public:
   // Main loop of normal_entry
   static void main_loop(int recurse, TRAPS);
 
  private:
-  // Stack overflow checks
-  static bool stack_overflow_imminent(JavaThread *thread);
-
- private:
   // Fast result type determination
   static BasicType result_type_of(methodOop method);
--- a/src/cpu/zero/vm/entryFrame_zero.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/entryFrame_zero.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2008 Red Hat, Inc.
+ * Copyright 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,10 +47,10 @@
   };
 
  public:
-  static EntryFrame *build(ZeroStack*       stack,
-                           const intptr_t*  parameters,
+  static EntryFrame *build(const intptr_t*  parameters,
                            int              parameter_words,
-                           JavaCallWrapper* call_wrapper);
+                           JavaCallWrapper* call_wrapper,
+                           TRAPS);
  public:
   JavaCallWrapper *call_wrapper() const {
     return (JavaCallWrapper *) value_of_word(call_wrapper_off);
--- a/src/cpu/zero/vm/entry_zero.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/entry_zero.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,20 +41,30 @@
   }
 
  private:
-  typedef void (*NormalEntryFunc)(methodOop method,
-                                  intptr_t  base_pc,
-                                  TRAPS);
-  typedef void (*OSREntryFunc)(methodOop method,
-                               address   osr_buf,
-                               intptr_t  base_pc,
-                               TRAPS);
+  typedef int (*NormalEntryFunc)(methodOop method,
+                                 intptr_t  base_pc,
+                                 TRAPS);
+  typedef int (*OSREntryFunc)(methodOop method,
+                              address   osr_buf,
+                              intptr_t  base_pc,
+                              TRAPS);
 
  public:
   void invoke(methodOop method, TRAPS) const {
-    ((NormalEntryFunc) entry_point())(method, (intptr_t) this, THREAD);
+    maybe_deoptimize(
+      ((NormalEntryFunc) entry_point())(method, (intptr_t) this, THREAD),
+      THREAD);
   }
   void invoke_osr(methodOop method, address osr_buf, TRAPS) const {
-    ((OSREntryFunc) entry_point())(method, osr_buf, (intptr_t) this, THREAD);
+    maybe_deoptimize(
+      ((OSREntryFunc) entry_point())(method, osr_buf, (intptr_t) this, THREAD),
+      THREAD);
+  }
+
+ private:
+  static void maybe_deoptimize(int deoptimized_frames, TRAPS) {
+    if (deoptimized_frames)
+      CppInterpreter::main_loop(deoptimized_frames - 1, THREAD);
   }
 
  public:
--- a/src/cpu/zero/vm/fakeStubFrame_zero.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/fakeStubFrame_zero.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2008 Red Hat, Inc.
+ * Copyright 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
   };
 
  public:
-  static FakeStubFrame *build(ZeroStack* stack);
+  static FakeStubFrame *build(TRAPS);
 
  public:
   void identify_word(int   frame_index,
--- a/src/cpu/zero/vm/frame_zero.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/frame_zero.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,10 @@
   return zeroframe()->is_interpreter_frame();
 }
 
+bool frame::is_fake_stub_frame() const {
+  return zeroframe()->is_fake_stub_frame();
+}
+
 frame frame::sender_for_entry_frame(RegisterMap *map) const {
   assert(zeroframe()->is_entry_frame(), "wrong type of frame");
   assert(map != NULL, "map must be set");
@@ -44,14 +48,14 @@
          "sender should be next Java frame");
   map->clear();
   assert(map->include_argument_oops(), "should be set by clear");
-  return frame(sender_sp(), sp() + 1);
+  return frame(zeroframe()->next(), sender_sp());
 }
 
 frame frame::sender_for_nonentry_frame(RegisterMap *map) const {
   assert(zeroframe()->is_interpreter_frame() ||
          zeroframe()->is_shark_frame() ||
          zeroframe()->is_fake_stub_frame(), "wrong type of frame");
-  return frame(sender_sp(), sp() + 1);
+  return frame(zeroframe()->next(), sender_sp());
 }
 
 frame frame::sender(RegisterMap* map) const {
@@ -172,8 +176,8 @@
   char *valuebuf = buf + buflen;
 
   // Print each word of the frame
-  for (intptr_t *addr = fp(); addr <= sp(); addr++) {
-    int offset = sp() - addr;
+  for (intptr_t *addr = sp(); addr <= fp(); addr++) {
+    int offset = fp() - addr;
 
     // Fill in default values, then try and improve them
     snprintf(fieldbuf, buflen, "word[%d]", offset);
--- a/src/cpu/zero/vm/frame_zero.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/frame_zero.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,17 +32,18 @@
 
   // Constructor
  public:
-  frame(intptr_t* sp, intptr_t* fp);
+  frame(ZeroFrame* zeroframe, intptr_t* sp);
 
-  // The sp of a Zero frame is the address of the highest word in
-  // that frame.  We keep track of the lowest address too, so the
-  // boundaries of the frame are available for debug printing.
  private:
-  intptr_t* _fp;
+  ZeroFrame* _zeroframe;
 
  public:
+  const ZeroFrame *zeroframe() const {
+    return _zeroframe;
+  }
+
   intptr_t* fp() const {
-    return _fp;
+    return (intptr_t *) zeroframe();
   }
 
 #ifdef CC_INTERP
@@ -50,10 +51,6 @@
 #endif // CC_INTERP
 
  public:
-  const ZeroFrame *zeroframe() const {
-    return (ZeroFrame *) sp();
-  }
-
   const EntryFrame *zero_entryframe() const {
     return zeroframe()->as_entry_frame();
   }
@@ -65,6 +62,9 @@
   }
 
  public:
+  bool is_fake_stub_frame() const;
+
+ public:
   frame sender_for_nonentry_frame(RegisterMap* map) const;
 
  public:
--- a/src/cpu/zero/vm/frame_zero.inline.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/frame_zero.inline.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,16 +26,16 @@
 // Constructors
 
 inline frame::frame() {
+  _zeroframe = NULL;
   _sp = NULL;
-  _fp = NULL;
   _pc = NULL;
   _cb = NULL;
   _deopt_state = unknown;
 }
 
-inline frame::frame(intptr_t* sp, intptr_t* fp) {
+inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
+  _zeroframe = zf;
   _sp = sp;
-  _fp = fp;
   switch (zeroframe()->type()) {
   case ZeroFrame::ENTRY_FRAME:
     _pc = StubRoutines::call_stub_return_pc();
@@ -66,7 +66,7 @@
 // Accessors
 
 inline intptr_t* frame::sender_sp() const {
-  return (intptr_t *) zeroframe()->next();
+  return fp() + 1;
 }
 
 inline intptr_t* frame::link() const {
@@ -120,7 +120,7 @@
 // we can distinguish identity and younger/older relationship. NULL
 // represents an invalid (incomparable) frame.
 inline intptr_t* frame::id() const {
-  return sp();
+  return fp();
 }
 
 inline JavaCallWrapper* frame::entry_frame_call_wrapper() const {
--- a/src/cpu/zero/vm/globals_zero.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/globals_zero.hpp	Wed May 19 10:22:39 2010 -0700
@@ -35,6 +35,7 @@
 define_pd_global(bool,  UncommonNullCast,     true);
 
 define_pd_global(intx,  CodeEntryAlignment,   32);
+define_pd_global(intx,  OptoLoopAlignment,    16);
 define_pd_global(intx,  InlineFrequencyCount, 100);
 define_pd_global(intx,  PreInflateSpin,       10);
 
--- a/src/cpu/zero/vm/interpreterFrame_zero.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/interpreterFrame_zero.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2008 Red Hat, Inc.
+ * Copyright 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,10 +55,8 @@
   };
 
  public:
-  static InterpreterFrame *build(ZeroStack*      stack,
-                                 const methodOop method,
-                                 JavaThread*     thread);
-  static InterpreterFrame *build(ZeroStack* stack, int size);
+  static InterpreterFrame *build(const methodOop method, TRAPS);
+  static InterpreterFrame *build(int size, TRAPS);
 
  public:
   interpreterState interpreter_state() const {
--- a/src/cpu/zero/vm/interpreterRT_zero.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/interpreterRT_zero.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -140,9 +140,8 @@
   int required_words =
     (align_size_up(sizeof(ffi_cif), wordSize) >> LogBytesPerWord) +
     (method->is_static() ? 2 : 1) + method->size_of_parameters() + 1;
-  if (required_words > stack->available_words()) {
-    Unimplemented();
-  }
+
+  stack->overflow_check(required_words, CHECK_NULL);
 
   intptr_t *buf = (intptr_t *) stack->alloc(required_words * wordSize);
   SlowSignatureHandlerGenerator sshg(methodHandle(thread, method), buf);
--- a/src/cpu/zero/vm/interpreter_zero.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/interpreter_zero.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * Copyright 2007, 2008 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -36,26 +36,14 @@
 
  public:
   static int expr_index_at(int i) {
-    return stackElementWords() * i;
-  }
-  static int expr_tag_index_at(int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    Unimplemented();
+    return stackElementWords * i;
   }
 
   static int expr_offset_in_bytes(int i) {
-    return stackElementSize() * i;
-  }
-  static int expr_tag_offset_in_bytes(int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    Unimplemented();
+    return stackElementSize * i;
   }
 
   static int local_index_at(int i) {
     assert(i <= 0, "local direction already negated");
-    return stackElementWords() * i + (value_offset_in_bytes() / wordSize);
+    return stackElementWords * i;
   }
-  static int local_tag_index_at(int i) {
-    assert(TaggedStackInterpreter, "should not call this");
-    Unimplemented();
-  }
--- a/src/cpu/zero/vm/javaFrameAnchor_zero.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/javaFrameAnchor_zero.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,21 +23,31 @@
  *
  */
 
+ private:
+  ZeroFrame* volatile _last_Java_fp;
+
  public:
   // Each arch must define reset, save, restore
   // These are used by objects that only care about:
   //  1 - initializing a new state (thread creation, javaCalls)
   //  2 - saving a current state (javaCalls)
   //  3 - restoring an old state (javaCalls)
+  // Note that whenever _last_Java_sp != NULL other anchor fields
+  // must be valid.  The profiler apparently depends on this.
 
   void clear() {
     // clearing _last_Java_sp must be first
     _last_Java_sp = NULL;
     // fence?
+    _last_Java_fp = NULL;
     _last_Java_pc = NULL;
   }
 
   void copy(JavaFrameAnchor* src) {
+    set(src->_last_Java_sp, src->_last_Java_pc, src->_last_Java_fp);
+  }
+
+  void set(intptr_t* sp, address pc, ZeroFrame* fp) {
     // In order to make sure the transition state is valid for "this"
     // We must clear _last_Java_sp before copying the rest of the new
     // data
@@ -46,13 +56,14 @@
     // previous version (pd_cache_state) don't NULL _last_Java_sp
     // unless the value is changing
     //
-    if (_last_Java_sp != src->_last_Java_sp)
+    if (_last_Java_sp != sp)
       _last_Java_sp = NULL;
 
-    _last_Java_pc = src->_last_Java_pc;
+    _last_Java_fp = fp;
+    _last_Java_pc = pc;
     // Must be last so profiler will always see valid frame if
     // has_last_frame() is true
-    _last_Java_sp = src->_last_Java_sp;
+    _last_Java_sp = sp;
   }
 
   bool walkable() {
@@ -67,6 +78,10 @@
     return _last_Java_sp;
   }
 
-  void set_last_Java_sp(intptr_t* sp) {
-    _last_Java_sp = sp;
+  ZeroFrame* last_Java_fp() const {
+    return _last_Java_fp;
   }
+
+  static ByteSize last_Java_fp_offset() {
+    return byte_offset_of(JavaFrameAnchor, _last_Java_fp);
+  }
--- a/src/cpu/zero/vm/methodHandles_zero.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/methodHandles_zero.cpp	Wed May 19 10:22:39 2010 -0700
@@ -26,6 +26,10 @@
 #include "incls/_precompiled.incl"
 #include "incls/_methodHandles_zero.cpp.incl"
 
+int MethodHandles::adapter_conversion_ops_supported_mask() {
+  ShouldNotCallThis();
+}
+
 void MethodHandles::generate_method_handle_stub(MacroAssembler*          masm,
                                                 MethodHandles::EntryKind ek) {
   ShouldNotCallThis();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/zero/vm/stack_zero.cpp	Wed May 19 10:22:39 2010 -0700
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_stack_zero.cpp.incl"
+
+int ZeroStack::suggest_size(Thread *thread) const {
+  assert(needs_setup(), "already set up");
+  return align_size_down(abi_stack_available(thread) / 2, wordSize);
+}
+
+void ZeroStack::handle_overflow(TRAPS) {
+  JavaThread *thread = (JavaThread *) THREAD;
+
+  // Set up the frame anchor if it isn't already
+  bool has_last_Java_frame = thread->has_last_Java_frame();
+  if (!has_last_Java_frame) {
+    intptr_t *sp = thread->zero_stack()->sp();
+    ZeroFrame *frame = thread->top_zero_frame();
+    while (frame) {
+      if (frame->is_shark_frame())
+        break;
+
+      if (frame->is_interpreter_frame()) {
+        interpreterState istate =
+          frame->as_interpreter_frame()->interpreter_state();
+        if (istate->self_link() == istate)
+          break;
+      }
+
+      sp = ((intptr_t *) frame) + 1;
+      frame = frame->next();
+    }
+
+    if (frame == NULL)
+      fatal("unrecoverable stack overflow");
+
+    thread->set_last_Java_frame(frame, sp);
+  }
+
+  // Throw the exception
+  switch (thread->thread_state()) {
+  case _thread_in_Java:
+    InterpreterRuntime::throw_StackOverflowError(thread);
+    break;
+
+  case _thread_in_vm:
+    Exceptions::throw_stack_overflow_exception(thread, __FILE__, __LINE__);
+    break;
+
+  default:
+    ShouldNotReachHere();
+  }
+
+  // Reset the frame anchor if necessary
+  if (!has_last_Java_frame)
+    thread->reset_last_Java_frame();
+}
+
+#ifndef PRODUCT
+void ZeroStack::zap(int c) {
+  memset(_base, c, available_words() * wordSize);
+}
+#endif // PRODUCT
--- a/src/cpu/zero/vm/stack_zero.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/stack_zero.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2008, 2009 Red Hat, Inc.
+ * Copyright 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,14 +29,21 @@
   intptr_t *_top;  // the word past the end of the stack
   intptr_t *_sp;   // the top word on the stack
 
+ private:
+  int _shadow_pages_size; // how much ABI stack must we keep free?
+
  public:
   ZeroStack()
-    : _base(NULL), _top(NULL), _sp(NULL) {}
+    : _base(NULL), _top(NULL), _sp(NULL) {
+    _shadow_pages_size = StackShadowPages * os::vm_page_size();
+  }
 
   bool needs_setup() const {
     return _base == NULL;
   }
 
+  int suggest_size(Thread *thread) const;
+
   void setup(void *mem, size_t size) {
     assert(needs_setup(), "already set up");
     assert(!(size & WordAlignmentMask), "unaligned");
@@ -62,6 +69,9 @@
     _sp = new_sp;
   }
 
+  int total_words() const {
+    return _top - _base;
+  }
   int available_words() const {
     return _sp - _base;
   }
@@ -81,6 +91,18 @@
     return _sp -= count;
   }
 
+  int shadow_pages_size() const {
+    return _shadow_pages_size;
+  }
+  int abi_stack_available(Thread *thread) const;
+
+ public:
+  void overflow_check(int required_words, TRAPS);
+  static void handle_overflow(TRAPS);
+
+ public:
+  void zap(int c) PRODUCT_RETURN;
+
  public:
   static ByteSize base_offset() {
     return byte_offset_of(ZeroStack, _base);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/zero/vm/stack_zero.inline.hpp	Wed May 19 10:22:39 2010 -0700
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2010 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// This function should match SharkStack::CreateStackOverflowCheck
+inline void ZeroStack::overflow_check(int required_words, TRAPS) {
+  // Check the Zero stack
+  if (available_words() < required_words) {
+    handle_overflow(THREAD);
+    return;
+  }
+
+  // Check the ABI stack
+  if (abi_stack_available(THREAD) < 0) {
+    handle_overflow(THREAD);
+    return;
+  }
+}
+
+// This method returns the amount of ABI stack available for us
+// to use under normal circumstances.  Note that the returned
+// value can be negative.
+inline int ZeroStack::abi_stack_available(Thread *thread) const {
+  int stack_used = thread->stack_base() - (address) &stack_used;
+  int stack_free = thread->stack_size() - stack_used;
+  return stack_free - shadow_pages_size();
+}
--- a/src/cpu/zero/vm/stubGenerator_zero.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/cpu/zero/vm/stubGenerator_zero.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,47 +51,49 @@
     // Set up the stack if necessary
     bool stack_needs_teardown = false;
     if (stack->needs_setup()) {
-      size_t stack_used = thread->stack_base() - (address) &stack_used;
-      size_t stack_free = thread->stack_size() - stack_used;
-      size_t zero_stack_size = align_size_down(stack_free / 2, wordSize);
-
+      size_t zero_stack_size = stack->suggest_size(thread);
       stack->setup(alloca(zero_stack_size), zero_stack_size);
       stack_needs_teardown = true;
     }
 
     // Allocate and initialize our frame
-    thread->push_zero_frame(
-      EntryFrame::build(stack, parameters, parameter_words, call_wrapper));
+    EntryFrame *frame =
+      EntryFrame::build(parameters, parameter_words, call_wrapper, THREAD);
 
-    // Make the call
-    Interpreter::invoke_method(method, entry_point, THREAD);
-
-    // Store result depending on type
     if (!HAS_PENDING_EXCEPTION) {
-      switch (result_type) {
-      case T_INT:
-        *(jint *) result = *(jint *) stack->sp();
-        break;
-      case T_LONG:
-        *(jlong *) result = *(jlong *) stack->sp();
-        break;
-      case T_FLOAT:
-        *(jfloat *) result = *(jfloat *) stack->sp();
-        break;
-      case T_DOUBLE:
-        *(jdouble *) result = *(jdouble *) stack->sp();
-        break;
-      case T_OBJECT:
-        *(oop *) result = *(oop *) stack->sp();
-        break;
-      default:
-        ShouldNotReachHere();
+      // Push the frame
+      thread->push_zero_frame(frame);
+
+      // Make the call
+      Interpreter::invoke_method(method, entry_point, THREAD);
+
+      // Store the result
+      if (!HAS_PENDING_EXCEPTION) {
+        switch (result_type) {
+        case T_INT:
+          *(jint *) result = *(jint *) stack->sp();
+          break;
+        case T_LONG:
+          *(jlong *) result = *(jlong *) stack->sp();
+          break;
+        case T_FLOAT:
+          *(jfloat *) result = *(jfloat *) stack->sp();
+          break;
+        case T_DOUBLE:
+          *(jdouble *) result = *(jdouble *) stack->sp();
+          break;
+        case T_OBJECT:
+          *(oop *) result = *(oop *) stack->sp();
+          break;
+        default:
+          ShouldNotReachHere();
+        }
       }
+
+      // Unwind the frame
+      thread->pop_zero_frame();
     }
 
-    // Unwind our frame
-    thread->pop_zero_frame();
-
     // Tear down the stack if necessary
     if (stack_needs_teardown)
       stack->teardown();
@@ -226,13 +228,13 @@
   StubGenerator g(code, all);
 }
 
-EntryFrame *EntryFrame::build(ZeroStack*       stack,
-                              const intptr_t*  parameters,
+EntryFrame *EntryFrame::build(const intptr_t*  parameters,
                               int              parameter_words,
-                              JavaCallWrapper* call_wrapper) {
-  if (header_words + parameter_words > stack->available_words()) {
-    Unimplemented();
-  }
+                              JavaCallWrapper* call_wrapper,
+                              TRAPS) {
+
+  ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack();
+  stack->overflow_check(header_words + parameter_words, CHECK_NULL);
 
   stack->push(0); // next_frame, filled in later
   intptr_t *fp = stack->sp();
--- a/src/os/linux/vm/attachListener_linux.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os/linux/vm/attachListener_linux.cpp	Wed May 19 10:22:39 2010 -0700
@@ -192,7 +192,8 @@
     res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
   }
   if (res == -1) {
-    sprintf(path, "%s/.java_pid%d", os::get_temp_directory(), os::current_process_id());
+    snprintf(path, PATH_MAX+1, "%s/.java_pid%d",
+             os::get_temp_directory(), os::current_process_id());
     strcpy(addr.sun_path, path);
     ::unlink(path);
     res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
@@ -460,13 +461,14 @@
   if (init_at_startup() || is_initialized()) {
     return false;               // initialized at startup or already initialized
   }
-  char fn[32];
+  char fn[PATH_MAX+1];
   sprintf(fn, ".attach_pid%d", os::current_process_id());
   int ret;
   struct stat64 st;
   RESTARTABLE(::stat64(fn, &st), ret);
   if (ret == -1) {
-    sprintf(fn, "/tmp/.attach_pid%d", os::current_process_id());
+    snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
+             os::get_temp_directory(), os::current_process_id());
     RESTARTABLE(::stat64(fn, &st), ret);
   }
   if (ret == 0) {
--- a/src/os/linux/vm/os_linux.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1522,7 +1522,10 @@
 
 const char* os::dll_file_extension() { return ".so"; }
 
-const char* os::get_temp_directory() { return "/tmp/"; }
+const char* os::get_temp_directory() {
+  const char *prop = Arguments::get_property("java.io.tmpdir");
+  return prop == NULL ? "/tmp" : prop;
+}
 
 static bool file_exists(const char* filename) {
   struct stat statbuf;
@@ -2302,10 +2305,11 @@
     return;
   }
 
-  char buf[40];
+  char buf[PATH_MAX+1];
   int num = Atomic::add(1, &cnt);
 
-  sprintf(buf, "/tmp/hs-vm-%d-%d", os::current_process_id(), num);
+  snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
+           os::get_temp_directory(), os::current_process_id(), num);
   unlink(buf);
 
   int fd = open(buf, O_CREAT | O_RDWR, S_IRWXU);
@@ -2784,7 +2788,7 @@
   }
 
   // attach to the region
-  addr = (char*)shmat(shmid, NULL, 0);
+  addr = (char*)shmat(shmid, req_addr, 0);
   int err = errno;
 
   // Remove shmid. If shmat() is successful, the actual shared memory segment
@@ -3491,7 +3495,8 @@
       // libjsig also interposes the sigaction() call below and saves the
       // old sigaction on it own.
     } else {
-      fatal2("Encountered unexpected pre-existing sigaction handler %#lx for signal %d.", (long)oldhand, sig);
+      fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
+                    "%#lx for signal %d.", (long)oldhand, sig));
     }
   }
 
@@ -3813,7 +3818,8 @@
 
   Linux::set_page_size(sysconf(_SC_PAGESIZE));
   if (Linux::page_size() == -1) {
-    fatal1("os_linux.cpp: os::init: sysconf failed (%s)", strerror(errno));
+    fatal(err_msg("os_linux.cpp: os::init: sysconf failed (%s)",
+                  strerror(errno)));
   }
   init_page_sizes((size_t) Linux::page_size());
 
--- a/src/os/linux/vm/perfMemory_linux.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os/linux/vm/perfMemory_linux.cpp	Wed May 19 10:22:39 2010 -0700
@@ -145,11 +145,11 @@
 
   const char* tmpdir = os::get_temp_directory();
   const char* perfdir = PERFDATA_NAME;
-  size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2;
+  size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
   char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
 
   // construct the path name to user specific tmp directory
-  snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user);
+  snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user);
 
   return dirname;
 }
@@ -331,8 +331,9 @@
     }
 
     char* usrdir_name = NEW_C_HEAP_ARRAY(char,
-                              strlen(tmpdirname) + strlen(dentry->d_name) + 1);
+                              strlen(tmpdirname) + strlen(dentry->d_name) + 2);
     strcpy(usrdir_name, tmpdirname);
+    strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
 
     DIR* subdirp = os::opendir(usrdir_name);
--- a/src/os/solaris/vm/attachListener_solaris.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os/solaris/vm/attachListener_solaris.cpp	Wed May 19 10:22:39 2010 -0700
@@ -375,7 +375,8 @@
     return -1;
   }
 
-  sprintf(door_path, "%s/.java_pid%d", os::get_temp_directory(), os::current_process_id());
+  snprintf(door_path, sizeof(door_path), "%s/.java_pid%d",
+           os::get_temp_directory(), os::current_process_id());
   RESTARTABLE(::creat(door_path, S_IRUSR | S_IWUSR), fd);
 
   if (fd == -1) {
@@ -591,13 +592,14 @@
   if (init_at_startup() || is_initialized()) {
     return false;               // initialized at startup or already initialized
   }
-  char fn[32];
+  char fn[PATH_MAX+1];
   sprintf(fn, ".attach_pid%d", os::current_process_id());
   int ret;
   struct stat64 st;
   RESTARTABLE(::stat64(fn, &st), ret);
   if (ret == -1) {
-    sprintf(fn, "/tmp/.attach_pid%d", os::current_process_id());
+    snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
+             os::get_temp_directory(), os::current_process_id());
     RESTARTABLE(::stat64(fn, &st), ret);
   }
   if (ret == 0) {
--- a/src/os/solaris/vm/os_solaris.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os/solaris/vm/os_solaris.cpp	Wed May 19 10:22:39 2010 -0700
@@ -676,15 +676,6 @@
 }
 
 
-static char* get_property(char* name, char* buffer, int buffer_size) {
-  if (os::getenv(name, buffer, buffer_size)) {
-    return buffer;
-  }
-  static char empty[] = "";
-  return empty;
-}
-
-
 void os::init_system_properties_values() {
   char arch[12];
   sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
@@ -1576,7 +1567,8 @@
   //           treat %g2 as a caller-save register, preserving it in a %lN.
   thread_key_t tk;
   if (thr_keycreate( &tk, NULL ) )
-    fatal1("os::allocate_thread_local_storage: thr_keycreate failed (%s)", strerror(errno));
+    fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
+                  "(%s)", strerror(errno)));
   return int(tk);
 }
 
@@ -1594,7 +1586,8 @@
     if (errno == ENOMEM) {
        vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space");
     } else {
-      fatal1("os::thread_local_storage_at_put: thr_setspecific failed (%s)", strerror(errno));
+      fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
+                    "(%s)", strerror(errno)));
     }
   } else {
       ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
@@ -1747,7 +1740,7 @@
 jlong os::javaTimeMillis() {
   timeval t;
   if (gettimeofday( &t, NULL) == -1)
-    fatal1("os::javaTimeMillis: gettimeofday (%s)", strerror(errno));
+    fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
 }
 
@@ -1826,7 +1819,10 @@
 
 const char* os::dll_file_extension() { return ".so"; }
 
-const char* os::get_temp_directory() { return "/tmp/"; }
+const char* os::get_temp_directory() {
+  const char *prop = Arguments::get_property("java.io.tmpdir");
+  return prop == NULL ? "/tmp" : prop;
+}
 
 static bool file_exists(const char* filename) {
   struct stat statbuf;
@@ -4239,7 +4235,8 @@
       // libjsig also interposes the sigaction() call below and saves the
       // old sigaction on it own.
     } else {
-      fatal2("Encountered unexpected pre-existing sigaction handler %#lx for signal %d.", (long)oldhand, sig);
+      fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
+                    "%#lx for signal %d.", (long)oldhand, sig));
     }
   }
 
@@ -4770,7 +4767,8 @@
 
   page_size = sysconf(_SC_PAGESIZE);
   if (page_size == -1)
-    fatal1("os_solaris.cpp: os::init: sysconf failed (%s)", strerror(errno));
+    fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
+                  strerror(errno)));
   init_page_sizes((size_t) page_size);
 
   Solaris::initialize_system_info();
@@ -4781,7 +4779,7 @@
 
   int fd = open("/dev/zero", O_RDWR);
   if (fd < 0) {
-    fatal1("os::init: cannot open /dev/zero (%s)", strerror(errno));
+    fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
   } else {
     Solaris::set_dev_zero_fd(fd);
 
--- a/src/os/solaris/vm/perfMemory_solaris.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os/solaris/vm/perfMemory_solaris.cpp	Wed May 19 10:22:39 2010 -0700
@@ -147,11 +147,11 @@
 
   const char* tmpdir = os::get_temp_directory();
   const char* perfdir = PERFDATA_NAME;
-  size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2;
+  size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
   char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
 
   // construct the path name to user specific tmp directory
-  snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user);
+  snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user);
 
   return dirname;
 }
@@ -322,8 +322,9 @@
     }
 
     char* usrdir_name = NEW_C_HEAP_ARRAY(char,
-                              strlen(tmpdirname) + strlen(dentry->d_name) + 1);
+                              strlen(tmpdirname) + strlen(dentry->d_name) + 2);
     strcpy(usrdir_name, tmpdirname);
+    strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
 
     DIR* subdirp = os::opendir(usrdir_name);
--- a/src/os/solaris/vm/threadCritical_solaris.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os/solaris/vm/threadCritical_solaris.cpp	Wed May 19 10:22:39 2010 -0700
@@ -47,7 +47,8 @@
     thread_t owner = thr_self();
     if (global_mut_owner != owner) {
       if (os::Solaris::mutex_lock(&global_mut))
-        fatal1("ThreadCritical::ThreadCritical: mutex_lock failed (%s)", strerror(errno));
+        fatal(err_msg("ThreadCritical::ThreadCritical: mutex_lock failed (%s)",
+                      strerror(errno)));
       assert(global_mut_count == 0, "must have clean count");
       assert(global_mut_owner == -1, "must have clean owner");
     }
@@ -66,7 +67,8 @@
     if (global_mut_count == 0) {
       global_mut_owner = -1;
       if (os::Solaris::mutex_unlock(&global_mut))
-        fatal1("ThreadCritical::~ThreadCritical: mutex_unlock failed (%s)", strerror(errno));
+        fatal(err_msg("ThreadCritical::~ThreadCritical: mutex_unlock failed "
+                      "(%s)", strerror(errno)));
     }
   } else {
     assert (Threads::number_of_threads() == 0, "valid only during initialization");
--- a/src/os/windows/vm/os_windows.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os/windows/vm/os_windows.cpp	Wed May 19 10:22:39 2010 -0700
@@ -724,7 +724,7 @@
   java_origin.wMilliseconds  = 0;
   FILETIME jot;
   if (!SystemTimeToFileTime(&java_origin, &jot)) {
-    fatal1("Error = %d\nWindows error", GetLastError());
+    fatal(err_msg("Error = %d\nWindows error", GetLastError()));
   }
   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
   _has_calculated_offset = 1;
@@ -998,15 +998,16 @@
 
 const char* os::dll_file_extension() { return ".dll"; }
 
-const char * os::get_temp_directory()
-{
-    static char path_buf[MAX_PATH];
-    if (GetTempPath(MAX_PATH, path_buf)>0)
-      return path_buf;
-    else{
-      path_buf[0]='\0';
-      return path_buf;
-    }
+const char* os::get_temp_directory() {
+  const char *prop = Arguments::get_property("java.io.tmpdir");
+  if (prop != 0) return prop;
+  static char path_buf[MAX_PATH];
+  if (GetTempPath(MAX_PATH, path_buf)>0)
+    return path_buf;
+  else{
+    path_buf[0]='\0';
+    return path_buf;
+  }
 }
 
 static bool file_exists(const char* filename) {
@@ -4094,7 +4095,7 @@
       }
       int err = GetLastError();
       if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) {
-        fatal1("heap walk aborted with error %d", err);
+        fatal(err_msg("heap walk aborted with error %d", err));
       }
       HeapUnlock(heap);
     }
--- a/src/os/windows/vm/perfMemory_windows.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os/windows/vm/perfMemory_windows.cpp	Wed May 19 10:22:39 2010 -0700
@@ -149,11 +149,11 @@
 
   const char* tmpdir = os::get_temp_directory();
   const char* perfdir = PERFDATA_NAME;
-  size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2;
+  size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
   char* dirname = NEW_C_HEAP_ARRAY(char, nbytes);
 
   // construct the path name to user specific tmp directory
-  _snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user);
+  _snprintf(dirname, nbytes, "%s\\%s_%s", tmpdir, perfdir, user);
 
   return dirname;
 }
@@ -318,8 +318,9 @@
     }
 
     char* usrdir_name = NEW_C_HEAP_ARRAY(char,
-                              strlen(tmpdirname) + strlen(dentry->d_name) + 1);
+                              strlen(tmpdirname) + strlen(dentry->d_name) + 2);
     strcpy(usrdir_name, tmpdirname);
+    strcat(usrdir_name, "\\");
     strcat(usrdir_name, dentry->d_name);
 
     DIR* subdirp = os::opendir(usrdir_name);
--- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Wed May 19 10:22:39 2010 -0700
@@ -153,7 +153,7 @@
       if (rslt == ENOMEM) {
         vm_exit_out_of_memory(0, "pthread_getattr_np");
       } else {
-        fatal1("pthread_getattr_np failed with errno = %d", rslt);
+        fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
       }
     }
 
--- a/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,9 +30,9 @@
 define_pd_global(intx, ThreadStackSize,          1024); // 0 => use system default
 define_pd_global(intx, VMThreadStackSize,        1024);
 #else
-// ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases
-// to run while keeping the number of threads that can be created high.
-// System default ThreadStackSize appears to be 512 which is too big.
+// ThreadStackSize 320 allows a couple of test cases to run while
+// keeping the number of threads that can be created high.  System
+// default ThreadStackSize appears to be 512 which is too big.
 define_pd_global(intx, ThreadStackSize,          320);
 define_pd_global(intx, VMThreadStackSize,        512);
 #endif // AMD64
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Wed May 19 10:22:39 2010 -0700
@@ -680,7 +680,7 @@
        if (rslt == ENOMEM) {
          vm_exit_out_of_memory(0, "pthread_getattr_np");
        } else {
-         fatal1("pthread_getattr_np failed with errno = %d", rslt);
+         fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
        }
      }
 
--- a/src/os_cpu/linux_zero/vm/thread_linux_zero.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os_cpu/linux_zero/vm/thread_linux_zero.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,18 +68,30 @@
 
  public:
   void set_last_Java_frame() {
-    JavaFrameAnchor *jfa = frame_anchor();
-    jfa->set_last_Java_sp((intptr_t *) top_zero_frame());
+    set_last_Java_frame(top_zero_frame(), zero_stack()->sp());
   }
   void reset_last_Java_frame() {
-    JavaFrameAnchor *jfa = frame_anchor();
-    jfa->set_last_Java_sp(NULL);
+    frame_anchor()->zap();
+  }
+  void set_last_Java_frame(ZeroFrame* fp, intptr_t* sp) {
+    frame_anchor()->set(sp, NULL, fp);
+  }
+
+ public:
+  ZeroFrame* last_Java_fp() {
+    return frame_anchor()->last_Java_fp();
   }
 
  private:
   frame pd_last_frame() {
     assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
-    return frame(last_Java_sp(), zero_stack()->sp());
+    return frame(last_Java_fp(), last_Java_sp());
+  }
+
+ public:
+  static ByteSize last_Java_fp_offset() {
+    return byte_offset_of(JavaThread, _anchor) +
+      JavaFrameAnchor::last_Java_fp_offset();
   }
 
  public:
--- a/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,8 +31,8 @@
 define_pd_global(intx, VMThreadStackSize,        1024);
 define_pd_global(uintx,JVMInvokeMethodSlack,     8*K);
 #else
-// ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases
-// to run while keeping the number of threads that can be created high.
+// ThreadStackSize 320 allows a couple of test cases to run while
+// keeping the number of threads that can be created high.
 define_pd_global(intx, ThreadStackSize,          320);
 define_pd_global(intx, VMThreadStackSize,        512);
 define_pd_global(uintx,JVMInvokeMethodSlack,     10*K);
--- a/src/share/vm/adlc/formssel.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/adlc/formssel.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -735,7 +735,7 @@
 
 // This instruction captures the machine-independent bottom_type
 // Expected use is for pointer vs oop determination for LoadP
-bool InstructForm::captures_bottom_type() const {
+bool InstructForm::captures_bottom_type(FormDict &globals) const {
   if( _matrule && _matrule->_rChild &&
        (!strcmp(_matrule->_rChild->_opType,"CastPP")     ||  // new result type
         !strcmp(_matrule->_rChild->_opType,"CastX2P")    ||  // new result type
@@ -748,6 +748,8 @@
   else if ( is_ideal_load() == Form::idealP )                return true;
   else if ( is_ideal_store() != Form::none  )                return true;
 
+  if (needs_base_oop_edge(globals)) return true;
+
   return  false;
 }
 
@@ -1061,7 +1063,7 @@
 
 
 // Base class for this instruction, MachNode except for calls
-const char *InstructForm::mach_base_class()  const {
+const char *InstructForm::mach_base_class(FormDict &globals)  const {
   if( is_ideal_call() == Form::JAVA_STATIC ) {
     return "MachCallStaticJavaNode";
   }
@@ -1092,7 +1094,7 @@
   else if (is_ideal_nop()) {
     return "MachNopNode";
   }
-  else if (captures_bottom_type()) {
+  else if (captures_bottom_type(globals)) {
     return "MachTypeNode";
   } else {
     return "MachNode";
@@ -3861,6 +3863,8 @@
         strcmp(opType,"RoundFloat")==0 ||
         strcmp(opType,"ReverseBytesI")==0 ||
         strcmp(opType,"ReverseBytesL")==0 ||
+        strcmp(opType,"ReverseBytesUS")==0 ||
+        strcmp(opType,"ReverseBytesS")==0 ||
         strcmp(opType,"Replicate16B")==0 ||
         strcmp(opType,"Replicate8B")==0 ||
         strcmp(opType,"Replicate4B")==0 ||
--- a/src/share/vm/adlc/formssel.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/adlc/formssel.hpp	Wed May 19 10:22:39 2010 -0700
@@ -188,7 +188,7 @@
 
   // This instruction captures the machine-independent bottom_type
   // Expected use is for pointer vs oop determination for LoadP
-  virtual bool        captures_bottom_type() const;
+  virtual bool        captures_bottom_type(FormDict& globals) const;
 
   virtual const char *cost();      // Access ins_cost attribute
   virtual uint        num_opnds(); // Count of num_opnds for MachNode class
@@ -229,7 +229,7 @@
   const char         *reduce_left(FormDict &globals)   const;
 
   // Base class for this instruction, MachNode except for calls
-  virtual const char *mach_base_class()  const;
+  virtual const char *mach_base_class(FormDict &globals)  const;
 
   // Check if this instruction can cisc-spill to 'alternate'
   bool                cisc_spills_to(ArchDesc &AD, InstructForm *alternate);
@@ -252,7 +252,7 @@
   bool                has_short_branch_form() { return _short_branch_form != NULL; }
   // Output short branch prototypes and method bodies
   void                declare_short_branch_methods(FILE *fp_cpp);
-  bool                define_short_branch_methods(FILE *fp_cpp);
+  bool                define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp);
 
   uint                alignment() { return _alignment; }
   void                set_alignment(uint val) { _alignment = val; }
--- a/src/share/vm/adlc/output_c.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/adlc/output_c.cpp	Wed May 19 10:22:39 2010 -0700
@@ -721,8 +721,8 @@
   fprintf(fp_cpp, "  }\n");
   fprintf(fp_cpp, "#endif\n\n");
 #endif
-  fprintf(fp_cpp, "  assert(this, \"NULL pipeline info\")\n");
-  fprintf(fp_cpp, "  assert(pred, \"NULL predecessor pipline info\")\n\n");
+  fprintf(fp_cpp, "  assert(this, \"NULL pipeline info\");\n");
+  fprintf(fp_cpp, "  assert(pred, \"NULL predecessor pipline info\");\n\n");
   fprintf(fp_cpp, "  if (pred->hasFixedLatency())\n    return (pred->fixedLatency());\n\n");
   fprintf(fp_cpp, "  // If this is not an operand, then assume a dependence with 0 latency\n");
   fprintf(fp_cpp, "  if (opnd > _read_stage_count)\n    return (0);\n\n");
@@ -1382,7 +1382,7 @@
                                           inst_num, unmatched_edge);
         }
         // If new instruction captures bottom type
-        if( root_form->captures_bottom_type() ) {
+        if( root_form->captures_bottom_type(globals) ) {
           // Get bottom type from instruction whose result we are replacing
           fprintf(fp, "        root->_bottom_type = inst%d->bottom_type();\n", inst_num);
         }
@@ -2963,7 +2963,7 @@
     used |= instr->define_cisc_version(*this, fp);
 
     // Output code to convert to the short branch version, if applicable
-    used |= instr->define_short_branch_methods(fp);
+    used |= instr->define_short_branch_methods(*this, fp);
   }
 
   // Construct the method called by cisc_version() to copy inputs and operands.
@@ -3708,7 +3708,7 @@
   }
 
   // Fill in the bottom_type where requested
-  if ( inst->captures_bottom_type() ) {
+  if ( inst->captures_bottom_type(_globalNames) ) {
     fprintf(fp_cpp, "%s node->_bottom_type = _leaf->bottom_type();\n", indent);
   }
   if( inst->is_ideal_if() ) {
@@ -3762,7 +3762,7 @@
     // Create the MachNode object
     fprintf(fp_cpp, "  %sNode *node = new (C) %sNode();\n", name, name);
     // Fill in the bottom_type where requested
-    if ( this->captures_bottom_type() ) {
+    if ( this->captures_bottom_type(AD.globalNames()) ) {
       fprintf(fp_cpp, "  node->_bottom_type = bottom_type();\n");
     }
 
@@ -3798,7 +3798,7 @@
 
 //---------------------------define_short_branch_methods-----------------------
 // Build definitions for short branch methods
-bool InstructForm::define_short_branch_methods(FILE *fp_cpp) {
+bool InstructForm::define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp) {
   if (has_short_branch_form()) {
     InstructForm *short_branch = short_branch_form();
     const char   *name         = short_branch->_ident;
@@ -3813,7 +3813,7 @@
       fprintf(fp_cpp, "  node->_fcnt = _fcnt;\n");
     }
     // Fill in the bottom_type where requested
-    if ( this->captures_bottom_type() ) {
+    if ( this->captures_bottom_type(AD.globalNames()) ) {
       fprintf(fp_cpp, "  node->_bottom_type = bottom_type();\n");
     }
 
--- a/src/share/vm/adlc/output_h.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/adlc/output_h.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1493,7 +1493,7 @@
     // Build class definition for this instruction
     fprintf(fp,"\n");
     fprintf(fp,"class %sNode : public %s { \n",
-            instr->_ident, instr->mach_base_class() );
+            instr->_ident, instr->mach_base_class(_globalNames) );
     fprintf(fp,"private:\n");
     fprintf(fp,"  MachOper *_opnd_array[%d];\n", instr->num_opnds() );
     if ( instr->is_ideal_jump() ) {
@@ -1566,7 +1566,7 @@
     // Use MachNode::ideal_Opcode() for nodes based on MachNode class
     // if the ideal_Opcode == Op_Node.
     if ( strcmp("Node", instr->ideal_Opcode(_globalNames)) != 0 ||
-         strcmp("MachNode", instr->mach_base_class()) != 0 ) {
+         strcmp("MachNode", instr->mach_base_class(_globalNames)) != 0 ) {
       fprintf(fp,"  virtual int            ideal_Opcode() const { return Op_%s; }\n",
             instr->ideal_Opcode(_globalNames) );
     }
@@ -1631,7 +1631,7 @@
     // Use MachNode::oper_input_base() for nodes based on MachNode class
     // if the base == 1.
     if ( instr->oper_input_base(_globalNames) != 1 ||
-         strcmp("MachNode", instr->mach_base_class()) != 0 ) {
+         strcmp("MachNode", instr->mach_base_class(_globalNames)) != 0 ) {
       fprintf(fp,"  virtual uint           oper_input_base() const { return %d; }\n",
             instr->oper_input_base(_globalNames));
     }
@@ -1906,11 +1906,6 @@
       fprintf(fp,"  const Type            *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN\n",
         offset, offset+1, offset+1);
     }
-    else if( instr->needs_base_oop_edge(_globalNames) ) {
-      // Special hack for ideal AddP.  Bottom type is an oop IFF it has a
-      // legal base-pointer input.  Otherwise it is NOT an oop.
-      fprintf(fp,"  const Type *bottom_type() const { return AddPNode::mach_bottom_type(this); } // AddP\n");
-    }
     else if (instr->is_tls_instruction()) {
       // Special hack for tlsLoadP
       fprintf(fp,"  const Type            *bottom_type() const { return TypeRawPtr::BOTTOM; } // tlsLoadP\n");
--- a/src/share/vm/asm/assembler.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/asm/assembler.cpp	Wed May 19 10:22:39 2010 -0700
@@ -43,7 +43,8 @@
   _code_pos    = cs->end();
   _oop_recorder= code->oop_recorder();
   if (_code_begin == NULL)  {
-    vm_exit_out_of_memory1(0, "CodeCache: no room for %s", code->name());
+    vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s",
+                                     code->name()));
   }
 }
 
--- a/src/share/vm/asm/codeBuffer.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/asm/codeBuffer.hpp	Wed May 19 10:22:39 2010 -0700
@@ -40,6 +40,7 @@
                  Exceptions,     // Offset where exception handler lives
                  Deopt,          // Offset where deopt handler lives
                  DeoptMH,        // Offset where MethodHandle deopt handler lives
+                 UnwindHandler,  // Offset to default unwind handler
                  max_Entries };
 
   // special value to note codeBlobs where profile (forte) stack walking is
@@ -59,6 +60,7 @@
     _values[Exceptions    ] = -1;
     _values[Deopt         ] = -1;
     _values[DeoptMH       ] = -1;
+    _values[UnwindHandler ] = -1;
   }
 
   int value(Entries e) { return _values[e]; }
--- a/src/share/vm/c1/c1_Compilation.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/c1/c1_Compilation.cpp	Wed May 19 10:22:39 2010 -0700
@@ -229,6 +229,10 @@
   code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler());
   CHECK_BAILOUT();
 
+  // Emit the handler to remove the activation from the stack and
+  // dispatch to the caller.
+  offsets()->set_value(CodeOffsets::UnwindHandler, assembler->emit_unwind_handler());
+
   // done
   masm()->flush();
 }
@@ -312,7 +316,7 @@
     implicit_exception_table(),
     compiler(),
     _env->comp_level(),
-    needs_debug_information(),
+    true,
     has_unsafe_access()
   );
 }
@@ -445,8 +449,6 @@
   assert(_arena == NULL, "shouldn't only one instance of Compilation in existence at a time");
   _arena = Thread::current()->resource_area();
   _compilation = this;
-  _needs_debug_information = _env->jvmti_can_examine_or_deopt_anywhere() ||
-                               JavaMonitorsInStackTrace || AlwaysEmitDebugInfo || DeoptimizeALot;
   _exception_info_list = new ExceptionInfoList();
   _implicit_exception_table.set_size(0);
   compile_method();
--- a/src/share/vm/c1/c1_Compilation.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/c1/c1_Compilation.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,6 @@
   int                _max_spills;
   FrameMap*          _frame_map;
   C1_MacroAssembler* _masm;
-  bool               _needs_debug_information;
   bool               _has_exception_handlers;
   bool               _has_fpu_code;
   bool               _has_unsafe_access;
@@ -117,7 +116,6 @@
   // accessors
   ciEnv* env() const                             { return _env; }
   AbstractCompiler* compiler() const             { return _compiler; }
-  bool needs_debug_information() const           { return _needs_debug_information; }
   bool has_exception_handlers() const            { return _has_exception_handlers; }
   bool has_fpu_code() const                      { return _has_fpu_code; }
   bool has_unsafe_access() const                 { return _has_unsafe_access; }
@@ -132,7 +130,6 @@
   CodeOffsets* offsets()                         { return &_offsets; }
 
   // setters
-  void set_needs_debug_information(bool f)       { _needs_debug_information = f; }
   void set_has_exception_handlers(bool f)        { _has_exception_handlers = f; }
   void set_has_fpu_code(bool f)                  { _has_fpu_code = f; }
   void set_has_unsafe_access(bool f)             { _has_unsafe_access = f; }
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Wed May 19 10:22:39 2010 -0700
@@ -829,12 +829,8 @@
     // should be left alone since there can be only one and all code
     // should dispatch to the same one.
     XHandler* h = handlers->handler_at(i);
-    if (h->handler_bci() != SynchronizationEntryBCI) {
-      h->set_entry_block(block_at(h->handler_bci()));
-    } else {
-      assert(h->entry_block()->is_set(BlockBegin::default_exception_handler_flag),
-             "should be the synthetic unlock block");
-    }
+    assert(h->handler_bci() != SynchronizationEntryBCI, "must be real");
+    h->set_entry_block(block_at(h->handler_bci()));
   }
   _jsr_xhandlers = handlers;
 }
@@ -1497,7 +1493,6 @@
 
 Dependencies* GraphBuilder::dependency_recorder() const {
   assert(DeoptC1, "need debug information");
-  compilation()->set_needs_debug_information(true);
   return compilation()->dependency_recorder();
 }
 
@@ -2867,19 +2862,6 @@
   _initial_state = state_at_entry();
   start_block->merge(_initial_state);
 
-  // setup an exception handler to do the unlocking and/or
-  // notification and unwind the frame.
-  BlockBegin* sync_handler = new BlockBegin(-1);
-  sync_handler->set(BlockBegin::exception_entry_flag);
-  sync_handler->set(BlockBegin::is_on_work_list_flag);
-  sync_handler->set(BlockBegin::default_exception_handler_flag);
-
-  ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0);
-  XHandler* h = new XHandler(desc);
-  h->set_entry_block(sync_handler);
-  scope_data()->xhandlers()->append(h);
-  scope_data()->set_has_handler();
-
   // complete graph
   _vmap        = new ValueMap();
   scope->compute_lock_stack_size();
@@ -2930,19 +2912,6 @@
   }
   CHECK_BAILOUT();
 
-  if (sync_handler && sync_handler->state() != NULL) {
-    Value lock = NULL;
-    if (method()->is_synchronized()) {
-      lock = method()->is_static() ? new Constant(new InstanceConstant(method()->holder()->java_mirror())) :
-                                     _initial_state->local_at(0);
-
-      sync_handler->state()->unlock();
-      sync_handler->state()->lock(scope, lock);
-
-    }
-    fill_sync_handler(lock, sync_handler, true);
-  }
-
   _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state);
 
   eliminate_redundant_phis(_start);
@@ -3009,7 +2978,11 @@
 
 bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
   if (!InlineNatives           ) INLINE_BAILOUT("intrinsic method inlining disabled");
-  if (callee->is_synchronized()) INLINE_BAILOUT("intrinsic method is synchronized");
+  if (callee->is_synchronized()) {
+    // We don't currently support any synchronized intrinsics
+    return false;
+  }
+
   // callee seems like a good candidate
   // determine id
   bool preserves_state = false;
--- a/src/share/vm/c1/c1_Instruction.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/c1/c1_Instruction.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1628,11 +1628,10 @@
     backward_branch_target_flag   = 1 << 4,
     is_on_work_list_flag          = 1 << 5,
     was_visited_flag              = 1 << 6,
-    default_exception_handler_flag = 1 << 8, // identify block which represents the default exception handler
-    parser_loop_header_flag       = 1 << 9,  // set by parser to identify blocks where phi functions can not be created on demand
-    critical_edge_split_flag      = 1 << 10, // set for all blocks that are introduced when critical edges are split
-    linear_scan_loop_header_flag  = 1 << 11, // set during loop-detection for LinearScan
-    linear_scan_loop_end_flag     = 1 << 12  // set during loop-detection for LinearScan
+    parser_loop_header_flag       = 1 << 7,  // set by parser to identify blocks where phi functions can not be created on demand
+    critical_edge_split_flag      = 1 << 8, // set for all blocks that are introduced when critical edges are split
+    linear_scan_loop_header_flag  = 1 << 9, // set during loop-detection for LinearScan
+    linear_scan_loop_end_flag     = 1 << 10  // set during loop-detection for LinearScan
   };
 
   void set(Flag f)                               { _flags |= f; }
--- a/src/share/vm/c1/c1_LIR.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/c1/c1_LIR.cpp	Wed May 19 10:22:39 2010 -0700
@@ -626,8 +626,7 @@
       break;
     }
 
-    case lir_throw:
-    case lir_unwind: {
+    case lir_throw: {
       assert(op->as_Op2() != NULL, "must be");
       LIR_Op2* op2 = (LIR_Op2*)op;
 
@@ -639,6 +638,17 @@
       break;
     }
 
+    case lir_unwind: {
+      assert(op->as_Op1() != NULL, "must be");
+      LIR_Op1* op1 = (LIR_Op1*)op;
+
+      assert(op1->_info == NULL, "no info");
+      assert(op1->_opr->is_valid(), "exception oop");         do_input(op1->_opr);
+      assert(op1->_result->is_illegal(), "no result");
+
+      break;
+    }
+
 
     case lir_tan:
     case lir_sin:
--- a/src/share/vm/c1/c1_LIR.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/c1/c1_LIR.hpp	Wed May 19 10:22:39 2010 -0700
@@ -801,6 +801,7 @@
       , lir_monaddr
       , lir_roundfp
       , lir_safepoint
+      , lir_unwind
   , end_op1
   , begin_op2
       , lir_cmp
@@ -830,7 +831,6 @@
       , lir_ushr
       , lir_alloc_array
       , lir_throw
-      , lir_unwind
       , lir_compare_to
   , end_op2
   , begin_op3
@@ -1062,7 +1062,7 @@
       is_invokedynamic()  // An invokedynamic is always a MethodHandle call site.
       ||
       (method()->holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
-       method()->name()           == ciSymbol::invoke_name());
+       methodOopDesc::is_method_handle_invoke_name(method()->name()->sid()));
   }
 
   intptr_t vtable_offset() const {
@@ -1827,8 +1827,12 @@
   void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
 
   void null_check(LIR_Opr opr, CodeEmitInfo* info)         { append(new LIR_Op1(lir_null_check, opr, info)); }
-  void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); }
-  void unwind_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_unwind, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); }
+  void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
+    append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
+  }
+  void unwind_exception(LIR_Opr exceptionOop) {
+    append(new LIR_Op1(lir_unwind, exceptionOop));
+  }
 
   void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
     append(new LIR_Op2(lir_compare_to,  left, right, dst));
--- a/src/share/vm/c1/c1_LIRAssembler.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Wed May 19 10:22:39 2010 -0700
@@ -552,6 +552,10 @@
       monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
       break;
 
+    case lir_unwind:
+      unwind_op(op->in_opr());
+      break;
+
     default:
       Unimplemented();
       break;
@@ -707,8 +711,7 @@
       break;
 
     case lir_throw:
-    case lir_unwind:
-      throw_op(op->in_opr1(), op->in_opr2(), op->info(), op->code() == lir_unwind);
+      throw_op(op->in_opr1(), op->in_opr2(), op->info());
       break;
 
     default:
--- a/src/share/vm/c1/c1_LIRAssembler.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Wed May 19 10:22:39 2010 -0700
@@ -39,6 +39,8 @@
   Instruction*       _pending_non_safepoint;
   int                _pending_non_safepoint_offset;
 
+  Label              _unwind_handler_entry;
+
 #ifdef ASSERT
   BlockList          _branch_target_blocks;
   void check_no_unbound_labels();
@@ -134,6 +136,7 @@
 
   // code patterns
   int  emit_exception_handler();
+  int  emit_unwind_handler();
   void emit_exception_entries(ExceptionInfoList* info_list);
   int  emit_deopt_handler();
 
@@ -217,7 +220,8 @@
 
   void build_frame();
 
-  void throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind);
+  void throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info);
+  void unwind_op(LIR_Opr exceptionOop);
   void monitor_address(int monitor_ix, LIR_Opr dst);
 
   void align_backward_branch_target();
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1765,35 +1765,17 @@
     __ null_check(exception_opr, new CodeEmitInfo(info, true));
   }
 
-  if (compilation()->env()->jvmti_can_post_on_exceptions() &&
-      !block()->is_set(BlockBegin::default_exception_handler_flag)) {
+  if (compilation()->env()->jvmti_can_post_on_exceptions()) {
     // we need to go through the exception lookup path to get JVMTI
     // notification done
     unwind = false;
   }
 
-  assert(!block()->is_set(BlockBegin::default_exception_handler_flag) || unwind,
-         "should be no more handlers to dispatch to");
-
-  if (compilation()->env()->dtrace_method_probes() &&
-      block()->is_set(BlockBegin::default_exception_handler_flag)) {
-    // notify that this frame is unwinding
-    BasicTypeList signature;
-    signature.append(T_INT);    // thread
-    signature.append(T_OBJECT); // methodOop
-    LIR_OprList* args = new LIR_OprList();
-    args->append(getThreadPointer());
-    LIR_Opr meth = new_register(T_OBJECT);
-    __ oop2reg(method()->constant_encoding(), meth);
-    args->append(meth);
-    call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
-  }
-
   // move exception oop into fixed register
   __ move(exception_opr, exceptionOopOpr());
 
   if (unwind) {
-    __ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info);
+    __ unwind_exception(exceptionOopOpr());
   } else {
     __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
   }
--- a/src/share/vm/c1/c1_LinearScan.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/c1/c1_LinearScan.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2608,12 +2608,17 @@
     } else if (opr->is_double_xmm()) {
       assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation");
       VMReg rname_first  = opr->as_xmm_double_reg()->as_VMReg();
+#  ifdef _LP64
+      first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
+      second = &_int_0_scope_value;
+#  else
       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
       // %%% This is probably a waste but we'll keep things as they were for now
       if (true) {
         VMReg rname_second = rname_first->next();
         second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
       }
+#  endif
 #endif
 
     } else if (opr->is_double_fpu()) {
@@ -2639,13 +2644,17 @@
 #endif
 
       VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
-
+#ifdef _LP64
+      first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
+      second = &_int_0_scope_value;
+#else
       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
       // %%% This is probably a waste but we'll keep things as they were for now
       if (true) {
         VMReg rname_second = rname_first->next();
         second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
       }
+#endif
 
     } else {
       ShouldNotReachHere();
@@ -2805,9 +2814,6 @@
 
 
 void LinearScan::compute_debug_info(CodeEmitInfo* info, int op_id) {
-  if (!compilation()->needs_debug_information()) {
-    return;
-  }
   TRACE_LINEAR_SCAN(3, tty->print_cr("creating debug information at op_id %d", op_id));
 
   IRScope* innermost_scope = info->scope();
--- a/src/share/vm/c1/c1_globals.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/c1/c1_globals.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -252,9 +252,6 @@
   develop(bool, BailoutOnExceptionHandlers, false,                          \
           "bailout of compilation for methods with exception handlers")     \
                                                                             \
-  develop(bool, AlwaysEmitDebugInfo, false,                                 \
-          "always emit debug info")                                         \
-                                                                            \
   develop(bool, InstallMethods, true,                                       \
           "Install methods at the end of successful compilations")          \
                                                                             \
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1408,8 +1408,11 @@
 }
 
 void BCEscapeAnalyzer::copy_dependencies(Dependencies *deps) {
-  if(!has_dependencies())
-    return;
+  if (ciEnv::current()->jvmti_can_hotswap_or_post_breakpoint()) {
+    // Also record evol dependencies so redefinition of the
+    // callee will trigger recompilation.
+    deps->assert_evol_method(method());
+  }
   for (int i = 0; i < _dependencies.length(); i+=2) {
     ciKlass *k = _dependencies[i]->as_klass();
     ciMethod *m = _dependencies[i+1]->as_method();
--- a/src/share/vm/ci/ciEnv.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/ci/ciEnv.cpp	Wed May 19 10:22:39 2010 -0700
@@ -176,7 +176,6 @@
   // Get Jvmti capabilities under lock to get consistant values.
   MutexLocker mu(JvmtiThreadState_lock);
   _jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint();
-  _jvmti_can_examine_or_deopt_anywhere  = JvmtiExport::can_examine_or_deopt_anywhere();
   _jvmti_can_access_local_variables     = JvmtiExport::can_access_local_variables();
   _jvmti_can_post_on_exceptions         = JvmtiExport::can_post_on_exceptions();
 }
@@ -385,11 +384,6 @@
                                                      KILL_COMPILE_ON_FATAL_(fail_type));
   }
 
-  if (found_klass != NULL) {
-    // Found it.  Build a CI handle.
-    return get_object(found_klass)->as_klass();
-  }
-
   // If we fail to find an array klass, look again for its element type.
   // The element type may be available either locally or via constraints.
   // In either case, if we can find the element type in the system dictionary,
@@ -414,6 +408,11 @@
     }
   }
 
+  if (found_klass != NULL) {
+    // Found it.  Build a CI handle.
+    return get_object(found_klass)->as_klass();
+  }
+
   if (require_local)  return NULL;
   // Not yet loaded into the VM, or not governed by loader constraints.
   // Make a CI representative for it.
@@ -732,26 +731,29 @@
 // ciEnv::get_fake_invokedynamic_method_impl
 ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
                                                     int index, Bytecodes::Code bc) {
+  // Compare the following logic with InterpreterRuntime::resolve_invokedynamic.
   assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic");
 
-  // Get the CallSite from the constant pool cache.
-  ConstantPoolCacheEntry* cpc_entry = cpool->cache()->secondary_entry_at(index);
-  assert(cpc_entry != NULL && cpc_entry->is_secondary_entry(), "sanity");
-  Handle call_site = cpc_entry->f1();
+  bool is_resolved = cpool->cache()->main_entry_at(index)->is_resolved(bc);
+  if (is_resolved && (oop) cpool->cache()->secondary_entry_at(index)->f1() == NULL)
+    // FIXME: code generation could allow for null (unlinked) call site
+    is_resolved = false;
 
-  // Call site might not be linked yet.
-  if (call_site.is_null()) {
+  // Call site might not be resolved yet.  We could create a real invoker method from the
+  // compiler, but it is simpler to stop the code path here with an unlinked method.
+  if (!is_resolved) {
     ciInstanceKlass* mh_klass = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass();
-    ciSymbol*       sig_sym   = get_object(cpool->signature_ref_at(index))->as_symbol();
-    return get_unloaded_method(mh_klass, ciSymbol::invoke_name(), sig_sym);
+    ciSymbol*        sig_sym  = get_object(cpool->signature_ref_at(index))->as_symbol();
+    return get_unloaded_method(mh_klass, ciSymbol::invokeExact_name(), sig_sym);
   }
 
-  // Get the methodOop from the CallSite.
-  methodOop method_oop = (methodOop) java_dyn_CallSite::vmmethod(call_site());
-  assert(method_oop != NULL, "sanity");
-  assert(method_oop->is_method_handle_invoke(), "consistent");
+  // Get the invoker methodOop from the constant pool.
+  intptr_t f2_value = cpool->cache()->main_entry_at(index)->f2();
+  methodOop signature_invoker = methodOop(f2_value);
+  assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
+         "correct result from LinkResolver::resolve_invokedynamic");
 
-  return get_object(method_oop)->as_method();
+  return get_object(signature_invoker)->as_method();
 }
 
 
@@ -887,8 +889,6 @@
     if (!failing() &&
         ( (!jvmti_can_hotswap_or_post_breakpoint() &&
            JvmtiExport::can_hotswap_or_post_breakpoint()) ||
-          (!jvmti_can_examine_or_deopt_anywhere() &&
-           JvmtiExport::can_examine_or_deopt_anywhere()) ||
           (!jvmti_can_access_local_variables() &&
            JvmtiExport::can_access_local_variables()) ||
           (!jvmti_can_post_on_exceptions() &&
--- a/src/share/vm/ci/ciEnv.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/ci/ciEnv.hpp	Wed May 19 10:22:39 2010 -0700
@@ -55,7 +55,6 @@
 
   // Cache Jvmti state
   bool  _jvmti_can_hotswap_or_post_breakpoint;
-  bool  _jvmti_can_examine_or_deopt_anywhere;
   bool  _jvmti_can_access_local_variables;
   bool  _jvmti_can_post_on_exceptions;
 
@@ -257,7 +256,6 @@
   // Cache Jvmti state
   void  cache_jvmti_state();
   bool  jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint; }
-  bool  jvmti_can_examine_or_deopt_anywhere()  const { return _jvmti_can_examine_or_deopt_anywhere; }
   bool  jvmti_can_access_local_variables()     const { return _jvmti_can_access_local_variables; }
   bool  jvmti_can_post_on_exceptions()         const { return _jvmti_can_post_on_exceptions; }
 
--- a/src/share/vm/ci/ciObjectFactory.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/ci/ciObjectFactory.cpp	Wed May 19 10:22:39 2010 -0700
@@ -103,7 +103,7 @@
     for (i = vmSymbols::FIRST_SID; i < vmSymbols::SID_LIMIT; i++) {
       symbolHandle sym_handle = vmSymbolHandles::symbol_handle_at((vmSymbols::SID) i);
       assert(vmSymbols::find_sid(sym_handle()) == i, "1-1 mapping");
-      ciSymbol* sym = new (_arena) ciSymbol(sym_handle);
+      ciSymbol* sym = new (_arena) ciSymbol(sym_handle, (vmSymbols::SID) i);
       init_ident_of(sym);
       _shared_ci_symbols[i] = sym;
     }
@@ -273,7 +273,8 @@
 
   if (o->is_symbol()) {
     symbolHandle h_o(THREAD, (symbolOop)o);
-    return new (arena()) ciSymbol(h_o);
+    assert(vmSymbols::find_sid(h_o()) == vmSymbols::NO_SID, "");
+    return new (arena()) ciSymbol(h_o, vmSymbols::NO_SID);
   } else if (o->is_klass()) {
     KlassHandle h_k(THREAD, (klassOop)o);
     Klass* k = ((klassOop)o)->klass_part();
--- a/src/share/vm/ci/ciSymbol.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/ci/ciSymbol.cpp	Wed May 19 10:22:39 2010 -0700
@@ -29,7 +29,17 @@
 // ciSymbol::ciSymbol
 //
 // Preallocated handle variant.  Used with handles from vmSymboHandles.
-ciSymbol::ciSymbol(symbolHandle h_s) : ciObject(h_s) {
+ciSymbol::ciSymbol(symbolHandle h_s, vmSymbols::SID sid)
+  : ciObject(h_s), _sid(sid)
+{
+  assert(sid_ok(), "must be in vmSymbols");
+}
+
+// Normal case for non-famous symbols.
+ciSymbol::ciSymbol(symbolOop s)
+  : ciObject(s), _sid(vmSymbols::NO_SID)
+{
+  assert(sid_ok(), "must not be in vmSymbols");
 }
 
 // ciSymbol
--- a/src/share/vm/ci/ciSymbol.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/ci/ciSymbol.hpp	Wed May 19 10:22:39 2010 -0700
@@ -36,8 +36,11 @@
   friend class ciObjArrayKlass;
 
 private:
-  ciSymbol(symbolOop s) : ciObject(s) {}
-  ciSymbol(symbolHandle s);   // for use with vmSymbolHandles
+  const vmSymbols::SID _sid;
+  DEBUG_ONLY( bool sid_ok() { return vmSymbols::find_sid(get_symbolOop()) == _sid; } )
+
+  ciSymbol(symbolOop s);  // normal case, for symbols not mentioned in vmSymbols
+  ciSymbol(symbolHandle s, vmSymbols::SID sid);   // for use with vmSymbolHandles
 
   symbolOop get_symbolOop() const { return (symbolOop)get_oop(); }
 
@@ -52,6 +55,9 @@
   static ciSymbol* make_impl(const char* s);
 
 public:
+  // The enumeration ID from vmSymbols, or vmSymbols::NO_SID if none.
+  vmSymbols::SID sid() const { return _sid; }
+
   // The text of the symbol as a null-terminated utf8 string.
   const char* as_utf8();
   int         utf8_length();
--- a/src/share/vm/classfile/classFileParser.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/classfile/classFileParser.cpp	Wed May 19 10:22:39 2010 -0700
@@ -334,7 +334,8 @@
         }
         break;
       default:
-        fatal1("bad constant pool tag value %u", cp->tag_at(index).value());
+        fatal(err_msg("bad constant pool tag value %u",
+                      cp->tag_at(index).value()));
         ShouldNotReachHere();
         break;
     } // end of switch
@@ -1837,7 +1838,8 @@
     _has_vanilla_constructor = true;
   }
 
-  if (EnableMethodHandles && m->is_method_handle_invoke()) {
+  if (EnableMethodHandles && (m->is_method_handle_invoke() ||
+                              m->is_method_handle_adapter())) {
     THROW_MSG_(vmSymbols::java_lang_VirtualMachineError(),
                "Method handle invokers must be defined internally to the VM", nullHandle);
   }
@@ -2956,8 +2958,8 @@
 #endif
     bool compact_fields   = CompactFields;
     int  allocation_style = FieldsAllocationStyle;
-    if( allocation_style < 0 || allocation_style > 1 ) { // Out of range?
-      assert(false, "0 <= FieldsAllocationStyle <= 1");
+    if( allocation_style < 0 || allocation_style > 2 ) { // Out of range?
+      assert(false, "0 <= FieldsAllocationStyle <= 2");
       allocation_style = 1; // Optimistic
     }
 
@@ -2993,6 +2995,25 @@
     } else if( allocation_style == 1 ) {
       // Fields order: longs/doubles, ints, shorts/chars, bytes, oops
       next_nonstatic_double_offset = next_nonstatic_field_offset;
+    } else if( allocation_style == 2 ) {
+      // Fields allocation: oops fields in super and sub classes are together.
+      if( nonstatic_field_size > 0 && super_klass() != NULL &&
+          super_klass->nonstatic_oop_map_size() > 0 ) {
+        int map_size = super_klass->nonstatic_oop_map_size();
+        OopMapBlock* first_map = super_klass->start_of_nonstatic_oop_maps();
+        OopMapBlock* last_map = first_map + map_size - 1;
+        int next_offset = last_map->offset() + (last_map->count() * heapOopSize);
+        if (next_offset == next_nonstatic_field_offset) {
+          allocation_style = 0;   // allocate oops first
+          next_nonstatic_oop_offset    = next_nonstatic_field_offset;
+          next_nonstatic_double_offset = next_nonstatic_oop_offset +
+                                         (nonstatic_oop_count * heapOopSize);
+        }
+      }
+      if( allocation_style == 2 ) {
+        allocation_style = 1;     // allocate oops last
+        next_nonstatic_double_offset = next_nonstatic_field_offset;
+      }
     } else {
       ShouldNotReachHere();
     }
--- a/src/share/vm/classfile/dictionary.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/classfile/dictionary.cpp	Wed May 19 10:22:39 2010 -0700
@@ -127,7 +127,7 @@
 
 
 bool Dictionary::do_unloading(BoolObjectClosure* is_alive) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   bool class_was_unloaded = false;
   int  index = 0; // Defined here for portability! Do not move
 
@@ -561,10 +561,11 @@
 
 
 SymbolPropertyEntry* SymbolPropertyTable::find_entry(int index, unsigned int hash,
-                                                     symbolHandle sym) {
-  assert(index == index_for(sym), "incorrect index?");
+                                                     symbolHandle sym,
+                                                     intptr_t sym_mode) {
+  assert(index == index_for(sym, sym_mode), "incorrect index?");
   for (SymbolPropertyEntry* p = bucket(index); p != NULL; p = p->next()) {
-    if (p->hash() == hash && p->symbol() == sym()) {
+    if (p->hash() == hash && p->symbol() == sym() && p->symbol_mode() == sym_mode) {
       return p;
     }
   }
@@ -573,12 +574,12 @@
 
 
 SymbolPropertyEntry* SymbolPropertyTable::add_entry(int index, unsigned int hash,
-                                                    symbolHandle sym) {
+                                                    symbolHandle sym, intptr_t sym_mode) {
   assert_locked_or_safepoint(SystemDictionary_lock);
-  assert(index == index_for(sym), "incorrect index?");
-  assert(find_entry(index, hash, sym) == NULL, "no double entry");
+  assert(index == index_for(sym, sym_mode), "incorrect index?");
+  assert(find_entry(index, hash, sym, sym_mode) == NULL, "no double entry");
 
-  SymbolPropertyEntry* p = new_entry(hash, sym());
+  SymbolPropertyEntry* p = new_entry(hash, sym(), sym_mode);
   Hashtable::add_entry(index, p);
   return p;
 }
--- a/src/share/vm/classfile/dictionary.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/classfile/dictionary.hpp	Wed May 19 10:22:39 2010 -0700
@@ -223,12 +223,16 @@
 class SymbolPropertyEntry : public HashtableEntry {
   friend class VMStructs;
  private:
+  intptr_t _symbol_mode;  // secondary key
   oop     _property_oop;
   address _property_data;
 
  public:
   symbolOop symbol() const          { return (symbolOop) literal(); }
 
+  intptr_t symbol_mode() const      { return _symbol_mode; }
+  void set_symbol_mode(intptr_t m)  { _symbol_mode = m; }
+
   oop      property_oop() const     { return _property_oop; }
   void set_property_oop(oop p)      { _property_oop = p; }
 
@@ -248,6 +252,7 @@
 
   void print_on(outputStream* st) const {
     symbol()->print_value_on(st);
+    st->print("/mode="INTX_FORMAT, symbol_mode());
     st->print(" -> ");
     bool printed = false;
     if (property_oop() != NULL) {
@@ -285,8 +290,9 @@
     ShouldNotReachHere();
   }
 
-  SymbolPropertyEntry* new_entry(unsigned int hash, symbolOop symbol) {
+  SymbolPropertyEntry* new_entry(unsigned int hash, symbolOop symbol, intptr_t symbol_mode) {
     SymbolPropertyEntry* entry = (SymbolPropertyEntry*) Hashtable::new_entry(hash, symbol);
+    entry->set_symbol_mode(symbol_mode);
     entry->set_property_oop(NULL);
     entry->set_property_data(NULL);
     return entry;
@@ -300,16 +306,20 @@
     Hashtable::free_entry(entry);
   }
 
-  unsigned int compute_hash(symbolHandle sym) {
+  unsigned int compute_hash(symbolHandle sym, intptr_t symbol_mode) {
     // Use the regular identity_hash.
-    return Hashtable::compute_hash(sym);
+    return Hashtable::compute_hash(sym) ^ symbol_mode;
+  }
+
+  int index_for(symbolHandle name, intptr_t symbol_mode) {
+    return hash_to_index(compute_hash(name, symbol_mode));
   }
 
   // need not be locked; no state change
-  SymbolPropertyEntry* find_entry(int index, unsigned int hash, symbolHandle name);
+  SymbolPropertyEntry* find_entry(int index, unsigned int hash, symbolHandle name, intptr_t name_mode);
 
   // must be done under SystemDictionary_lock
-  SymbolPropertyEntry* add_entry(int index, unsigned int hash, symbolHandle name);
+  SymbolPropertyEntry* add_entry(int index, unsigned int hash, symbolHandle name, intptr_t name_mode);
 
   // GC support
   void oops_do(OopClosure* f);
--- a/src/share/vm/classfile/javaClasses.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/classfile/javaClasses.cpp	Wed May 19 10:22:39 2010 -0700
@@ -2446,24 +2446,20 @@
 
 // Support for java_dyn_CallSite
 
-int java_dyn_CallSite::_type_offset;
 int java_dyn_CallSite::_target_offset;
-int java_dyn_CallSite::_vmmethod_offset;
+int java_dyn_CallSite::_caller_method_offset;
+int java_dyn_CallSite::_caller_bci_offset;
 
 void java_dyn_CallSite::compute_offsets() {
   if (!EnableInvokeDynamic)  return;
   klassOop k = SystemDictionary::CallSite_klass();
   if (k != NULL) {
-    compute_offset(_type_offset,   k, vmSymbols::type_name(),   vmSymbols::java_dyn_MethodType_signature(), true);
-    compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature(), true);
-    compute_offset(_vmmethod_offset, k, vmSymbols::vmmethod_name(), vmSymbols::object_signature(), true);
+    compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature());
+    compute_offset(_caller_method_offset, k, vmSymbols::vmmethod_name(), vmSymbols::sun_dyn_MemberName_signature());
+    compute_offset(_caller_bci_offset, k, vmSymbols::vmindex_name(), vmSymbols::int_signature());
   }
 }
 
-oop java_dyn_CallSite::type(oop site) {
-  return site->obj_field(_type_offset);
-}
-
 oop java_dyn_CallSite::target(oop site) {
   return site->obj_field(_target_offset);
 }
@@ -2472,12 +2468,20 @@
   site->obj_field_put(_target_offset, target);
 }
 
-oop java_dyn_CallSite::vmmethod(oop site) {
-  return site->obj_field(_vmmethod_offset);
+oop java_dyn_CallSite::caller_method(oop site) {
+  return site->obj_field(_caller_method_offset);
+}
+
+void java_dyn_CallSite::set_caller_method(oop site, oop ref) {
+  site->obj_field_put(_caller_method_offset, ref);
 }
 
-void java_dyn_CallSite::set_vmmethod(oop site, oop ref) {
-  site->obj_field_put(_vmmethod_offset, ref);
+jint java_dyn_CallSite::caller_bci(oop site) {
+  return site->int_field(_caller_bci_offset);
+}
+
+void java_dyn_CallSite::set_caller_bci(oop site, jint bci) {
+  site->int_field_put(_caller_bci_offset, bci);
 }
 
 
--- a/src/share/vm/classfile/javaClasses.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/classfile/javaClasses.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1068,21 +1068,22 @@
   friend class JavaClasses;
 
 private:
-  static int _type_offset;
   static int _target_offset;
-  static int _vmmethod_offset;
+  static int _caller_method_offset;
+  static int _caller_bci_offset;
 
   static void compute_offsets();
 
 public:
   // Accessors
-  static oop            type(oop site);
-
   static oop            target(oop site);
   static void       set_target(oop site, oop target);
 
-  static oop            vmmethod(oop site);
-  static void       set_vmmethod(oop site, oop ref);
+  static oop            caller_method(oop site);
+  static void       set_caller_method(oop site, oop ref);
+
+  static jint           caller_bci(oop site);
+  static void       set_caller_bci(oop site, jint bci);
 
   // Testers
   static bool is_subclass(klassOop klass) {
@@ -1094,8 +1095,8 @@
 
   // Accessors for code generation:
   static int target_offset_in_bytes()           { return _target_offset; }
-  static int type_offset_in_bytes()             { return _type_offset; }
-  static int vmmethod_offset_in_bytes()         { return _vmmethod_offset; }
+  static int caller_method_offset_in_bytes()    { return _caller_method_offset; }
+  static int caller_bci_offset_in_bytes()       { return _caller_bci_offset; }
 };
 
 
--- a/src/share/vm/classfile/loaderConstraints.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/classfile/loaderConstraints.cpp	Wed May 19 10:22:39 2010 -0700
@@ -103,7 +103,7 @@
 
 
 void LoaderConstraintTable::purge_loader_constraints(BoolObjectClosure* is_alive) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   // Remove unloaded entries from constraint table
   for (int index = 0; index < table_size(); index++) {
     LoaderConstraintEntry** p = bucket_addr(index);
@@ -334,33 +334,6 @@
   return NULL;
 }
 
-
-klassOop LoaderConstraintTable::find_constrained_elem_klass(symbolHandle name,
-                                                            symbolHandle elem_name,
-                                                            Handle loader,
-                                                            TRAPS) {
-  LoaderConstraintEntry *p = *(find_loader_constraint(name, loader));
-  if (p != NULL) {
-    assert(p->klass() == NULL, "Expecting null array klass");
-
-    // The array name has a constraint, but it will not have a class. Check
-    // each loader for an associated elem
-    for (int i = 0; i < p->num_loaders(); i++) {
-      Handle no_protection_domain;
-
-      klassOop k = SystemDictionary::find(elem_name, p->loader(i), no_protection_domain, THREAD);
-      if (k != NULL) {
-        // Return the first elem klass found.
-        return k;
-      }
-    }
-  }
-
-  // No constraints, or else no klass loaded yet.
-  return NULL;
-}
-
-
 void LoaderConstraintTable::ensure_loader_constraint_capacity(
                                                      LoaderConstraintEntry *p,
                                                     int nfree) {
--- a/src/share/vm/classfile/loaderConstraints.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/classfile/loaderConstraints.hpp	Wed May 19 10:22:39 2010 -0700
@@ -66,9 +66,6 @@
   //                                           bool is_method, TRAPS)
 
   klassOop find_constrained_klass(symbolHandle name, Handle loader);
-  klassOop find_constrained_elem_klass(symbolHandle name, symbolHandle elem_name,
-                                       Handle loader, TRAPS);
-
 
   // Class loader constraints
 
--- a/src/share/vm/classfile/resolutionErrors.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/classfile/resolutionErrors.cpp	Wed May 19 10:22:39 2010 -0700
@@ -102,7 +102,7 @@
 
 // Remove unloaded entries from the table
 void ResolutionErrorTable::purge_resolution_errors(BoolObjectClosure* is_alive) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint")
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   for (int i = 0; i < table_size(); i++) {
     for (ResolutionErrorEntry** p = bucket_addr(i); *p != NULL; ) {
       ResolutionErrorEntry* entry = *p;
--- a/src/share/vm/classfile/systemDictionary.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/classfile/systemDictionary.cpp	Wed May 19 10:22:39 2010 -0700
@@ -2178,9 +2178,8 @@
   // a loader constraint that would require this loader to return the
   // klass that is already loaded.
   if (FieldType::is_array(class_name())) {
-    // Array classes are hard because their klassOops are not kept in the
-    // constraint table. The array klass may be constrained, but the elem class
-    // may not be.
+    // For array classes, their klassOops are not kept in the
+    // constraint table. The element klassOops are.
     jint dimension;
     symbolOop object_key;
     BasicType t = FieldType::get_array_info(class_name(), &dimension,
@@ -2190,8 +2189,9 @@
     } else {
       symbolHandle elem_name(THREAD, object_key);
       MutexLocker mu(SystemDictionary_lock, THREAD);
-      klass = constraints()->find_constrained_elem_klass(class_name, elem_name, class_loader, THREAD);
+      klass = constraints()->find_constrained_klass(elem_name, class_loader);
     }
+    // If element class already loaded, allocate array klass
     if (klass != NULL) {
       klass = Klass::cast(klass)->array_klass_or_null(dimension);
     }
@@ -2209,22 +2209,38 @@
                                              Handle class_loader1,
                                              Handle class_loader2,
                                              Thread* THREAD) {
-  unsigned int d_hash1 = dictionary()->compute_hash(class_name, class_loader1);
+  symbolHandle constraint_name;
+  if (!FieldType::is_array(class_name())) {
+    constraint_name = class_name;
+  } else {
+    // For array classes, their klassOops are not kept in the
+    // constraint table. The element classes are.
+    jint dimension;
+    symbolOop object_key;
+    BasicType t = FieldType::get_array_info(class_name(), &dimension,
+                                            &object_key, CHECK_(false));
+    // primitive types always pass
+    if (t != T_OBJECT) {
+      return true;
+    } else {
+      constraint_name = symbolHandle(THREAD, object_key);
+    }
+  }
+  unsigned int d_hash1 = dictionary()->compute_hash(constraint_name, class_loader1);
   int d_index1 = dictionary()->hash_to_index(d_hash1);
 
-  unsigned int d_hash2 = dictionary()->compute_hash(class_name, class_loader2);
+  unsigned int d_hash2 = dictionary()->compute_hash(constraint_name, class_loader2);
   int d_index2 = dictionary()->hash_to_index(d_hash2);
-
   {
-    MutexLocker mu_s(SystemDictionary_lock, THREAD);
+  MutexLocker mu_s(SystemDictionary_lock, THREAD);
 
-    // Better never do a GC while we're holding these oops
-    No_Safepoint_Verifier nosafepoint;
+  // Better never do a GC while we're holding these oops
+  No_Safepoint_Verifier nosafepoint;
 
-    klassOop klass1 = find_class(d_index1, d_hash1, class_name, class_loader1);
-    klassOop klass2 = find_class(d_index2, d_hash2, class_name, class_loader2);
-    return constraints()->add_entry(class_name, klass1, class_loader1,
-                                    klass2, class_loader2);
+  klassOop klass1 = find_class(d_index1, d_hash1, constraint_name, class_loader1);
+  klassOop klass2 = find_class(d_index2, d_hash2, constraint_name, class_loader2);
+  return constraints()->add_entry(constraint_name, klass1, class_loader1,
+                                  klass2, class_loader2);
   }
 }
 
@@ -2301,6 +2317,7 @@
 // Returns the name of the type that failed a loader constraint check, or
 // NULL if no constraint failed. The returned C string needs cleaning up
 // with a ResourceMark in the caller.  No exception except OOME is thrown.
+// Arrays are not added to the loader constraint table, their elements are.
 char* SystemDictionary::check_signature_loaders(symbolHandle signature,
                                                Handle loader1, Handle loader2,
                                                bool is_method, TRAPS)  {
@@ -2324,118 +2341,150 @@
 }
 
 
-methodOop SystemDictionary::find_method_handle_invoke(symbolHandle signature,
-                                                      Handle class_loader,
-                                                      Handle protection_domain,
+methodOop SystemDictionary::find_method_handle_invoke(symbolHandle name,
+                                                      symbolHandle signature,
+                                                      KlassHandle accessing_klass,
                                                       TRAPS) {
   if (!EnableMethodHandles)  return NULL;
-  assert(class_loader.is_null() && protection_domain.is_null(),
-         "cannot load specialized versions of MethodHandle.invoke");
   if (invoke_method_table() == NULL) {
     // create this side table lazily
     _invoke_method_table = new SymbolPropertyTable(_invoke_method_size);
   }
-  unsigned int hash  = invoke_method_table()->compute_hash(signature);
+  vmSymbols::SID name_id = vmSymbols::find_sid(name());
+  assert(name_id != vmSymbols::NO_SID, "must be a known name");
+  unsigned int hash  = invoke_method_table()->compute_hash(signature, name_id);
   int          index = invoke_method_table()->hash_to_index(hash);
-  SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature);
+  SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature, name_id);
+  methodHandle non_cached_result;
   if (spe == NULL || spe->property_oop() == NULL) {
+    spe = NULL;
     // Must create lots of stuff here, but outside of the SystemDictionary lock.
     if (THREAD->is_Compiler_thread())
       return NULL;              // do not attempt from within compiler
-    Handle mt = compute_method_handle_type(signature(),
-                                           class_loader, protection_domain,
-                                           CHECK_NULL);
+    bool found_on_bcp = false;
+    Handle mt = find_method_handle_type(signature(), accessing_klass, found_on_bcp, CHECK_NULL);
     KlassHandle  mh_klass = SystemDictionaryHandles::MethodHandle_klass();
-    methodHandle m = methodOopDesc::make_invoke_method(mh_klass, signature,
+    methodHandle m = methodOopDesc::make_invoke_method(mh_klass, name, signature,
                                                        mt, CHECK_NULL);
     // Now grab the lock.  We might have to throw away the new method,
     // if a racing thread has managed to install one at the same time.
-    {
+    if (found_on_bcp) {
       MutexLocker ml(SystemDictionary_lock, Thread::current());
-      spe = invoke_method_table()->find_entry(index, hash, signature);
+      spe = invoke_method_table()->find_entry(index, hash, signature, name_id);
       if (spe == NULL)
-        spe = invoke_method_table()->add_entry(index, hash, signature);
+        spe = invoke_method_table()->add_entry(index, hash, signature, name_id);
       if (spe->property_oop() == NULL)
         spe->set_property_oop(m());
+    } else {
+      non_cached_result = m;
     }
   }
-  methodOop m = (methodOop) spe->property_oop();
-  assert(m->is_method(), "");
-  return m;
+  if (spe != NULL && spe->property_oop() != NULL) {
+    assert(spe->property_oop()->is_method(), "");
+    return (methodOop) spe->property_oop();
+  } else {
+    return non_cached_result();
+  }
 }
 
 // Ask Java code to find or construct a java.dyn.MethodType for the given
 // signature, as interpreted relative to the given class loader.
 // Because of class loader constraints, all method handle usage must be
 // consistent with this loader.
-Handle SystemDictionary::compute_method_handle_type(symbolHandle signature,
-                                                    Handle class_loader,
-                                                    Handle protection_domain,
-                                                    TRAPS) {
+Handle SystemDictionary::find_method_handle_type(symbolHandle signature,
+                                                 KlassHandle accessing_klass,
+                                                 bool& return_bcp_flag,
+                                                 TRAPS) {
+  Handle class_loader, protection_domain;
+  bool is_on_bcp = true;  // keep this true as long as we can materialize from the boot classloader
   Handle empty;
   int npts = ArgumentCount(signature()).size();
   objArrayHandle pts = oopFactory::new_objArray(SystemDictionary::Class_klass(), npts, CHECK_(empty));
   int arg = 0;
   Handle rt;                            // the return type from the signature
   for (SignatureStream ss(signature()); !ss.is_done(); ss.next()) {
-    oop mirror;
-    if (!ss.is_object()) {
-      mirror = Universe::java_mirror(ss.type());
-    } else {
-      symbolOop    name_oop = ss.as_symbol(CHECK_(empty));
-      symbolHandle name(THREAD, name_oop);
-      klassOop klass = resolve_or_fail(name,
-                                       class_loader, protection_domain,
-                                       true, CHECK_(empty));
-      mirror = Klass::cast(klass)->java_mirror();
+    oop mirror = NULL;
+    if (is_on_bcp) {
+      mirror = ss.as_java_mirror(class_loader, protection_domain,
+                                 SignatureStream::ReturnNull, CHECK_(empty));
+      if (mirror == NULL) {
+        // fall back from BCP to accessing_klass
+        if (accessing_klass.not_null()) {
+          class_loader      = Handle(THREAD, instanceKlass::cast(accessing_klass())->class_loader());
+          protection_domain = Handle(THREAD, instanceKlass::cast(accessing_klass())->protection_domain());
+        }
+        is_on_bcp = false;
+      }
+    }
+    if (!is_on_bcp) {
+      // Resolve, throwing a real error if it doesn't work.
+      mirror = ss.as_java_mirror(class_loader, protection_domain,
+                                 SignatureStream::NCDFError, CHECK_(empty));
     }
     if (ss.at_return_type())
       rt = Handle(THREAD, mirror);
     else
       pts->obj_at_put(arg++, mirror);
+    // Check accessibility.
+    if (ss.is_object() && accessing_klass.not_null()) {
+      klassOop sel_klass = java_lang_Class::as_klassOop(mirror);
+      // Emulate constantPoolOopDesc::verify_constant_pool_resolve.
+      if (Klass::cast(sel_klass)->oop_is_objArray())
+        sel_klass = objArrayKlass::cast(sel_klass)->bottom_klass();
+      if (Klass::cast(sel_klass)->oop_is_instance()) {
+        KlassHandle sel_kh(THREAD, sel_klass);
+        LinkResolver::check_klass_accessability(accessing_klass, sel_kh, CHECK_(empty));
+      }
+    }
   }
   assert(arg == npts, "");
 
-  // call MethodType java.dyn.MethodType::makeImpl(Class rt, Class[] pts, false, true)
-  bool varargs = false, trusted = true;
+  // call sun.dyn.MethodHandleNatives::findMethodType(Class rt, Class[] pts) -> MethodType
   JavaCallArguments args(Handle(THREAD, rt()));
   args.push_oop(pts());
-  args.push_int(false);
-  args.push_int(trusted);
   JavaValue result(T_OBJECT);
   JavaCalls::call_static(&result,
-                         SystemDictionary::MethodType_klass(),
-                         vmSymbols::makeImpl_name(), vmSymbols::makeImpl_signature(),
+                         SystemDictionary::MethodHandleNatives_klass(),
+                         vmSymbols::findMethodHandleType_name(),
+                         vmSymbols::findMethodHandleType_signature(),
                          &args, CHECK_(empty));
+
+  // report back to the caller with the MethodType and the "on_bcp" flag
+  return_bcp_flag = is_on_bcp;
   return Handle(THREAD, (oop) result.get_jobject());
 }
 
 
 // Ask Java code to find or construct a java.dyn.CallSite for the given
 // name and signature, as interpreted relative to the given class loader.
-Handle SystemDictionary::make_dynamic_call_site(KlassHandle caller,
-                                                int caller_method_idnum,
+Handle SystemDictionary::make_dynamic_call_site(Handle bootstrap_method,
+                                                symbolHandle name,
+                                                methodHandle signature_invoker,
+                                                Handle info,
+                                                methodHandle caller_method,
                                                 int caller_bci,
-                                                symbolHandle name,
-                                                methodHandle mh_invdyn,
                                                 TRAPS) {
   Handle empty;
-  // call java.dyn.CallSite::makeSite(caller, name, mtype, cmid, cbci)
+  Handle caller_mname = MethodHandles::new_MemberName(CHECK_(empty));
+  MethodHandles::init_MemberName(caller_mname(), caller_method());
+
+  // call sun.dyn.MethodHandleNatives::makeDynamicCallSite(bootm, name, mtype, info, caller_mname, caller_pos)
   oop name_str_oop = StringTable::intern(name(), CHECK_(empty)); // not a handle!
-  JavaCallArguments args(Handle(THREAD, caller->java_mirror()));
+  JavaCallArguments args(Handle(THREAD, bootstrap_method()));
   args.push_oop(name_str_oop);
-  args.push_oop(mh_invdyn->method_handle_type());
-  args.push_int(caller_method_idnum);
+  args.push_oop(signature_invoker->method_handle_type());
+  args.push_oop(info());
+  args.push_oop(caller_mname());
   args.push_int(caller_bci);
   JavaValue result(T_OBJECT);
   JavaCalls::call_static(&result,
-                         SystemDictionary::CallSite_klass(),
-                         vmSymbols::makeSite_name(), vmSymbols::makeSite_signature(),
+                         SystemDictionary::MethodHandleNatives_klass(),
+                         vmSymbols::makeDynamicCallSite_name(),
+                         vmSymbols::makeDynamicCallSite_signature(),
                          &args, CHECK_(empty));
   oop call_site_oop = (oop) result.get_jobject();
   assert(call_site_oop->is_oop()
          /*&& java_dyn_CallSite::is_instance(call_site_oop)*/, "must be sane");
-  java_dyn_CallSite::set_vmmethod(call_site_oop, mh_invdyn());
   if (TraceMethodHandles) {
 #ifndef PRODUCT
     tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop);
@@ -2446,9 +2495,7 @@
   return call_site_oop;
 }
 
-Handle SystemDictionary::find_bootstrap_method(KlassHandle caller,
-                                               KlassHandle search_bootstrap_klass,
-                                               TRAPS) {
+Handle SystemDictionary::find_bootstrap_method(KlassHandle caller, TRAPS) {
   Handle empty;
   if (!caller->oop_is_instance())  return empty;
 
@@ -2459,57 +2506,12 @@
     if (TraceMethodHandles) {
       tty->print_cr("bootstrap method for "PTR_FORMAT" cached as "PTR_FORMAT":", ik(), boot_method_oop);
     }
-    NOT_PRODUCT(if (!boot_method_oop->is_oop()) { tty->print_cr("*** boot MH of "PTR_FORMAT" = "PTR_FORMAT, ik(), boot_method_oop); ik()->print(); });
     assert(boot_method_oop->is_oop()
            && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
     return Handle(THREAD, boot_method_oop);
   }
-  boot_method_oop = NULL;  // GC safety
 
-  // call java.dyn.Linkage::findBootstrapMethod(caller, sbk)
-  JavaCallArguments args(Handle(THREAD, ik->java_mirror()));
-  if (search_bootstrap_klass.is_null())
-    args.push_oop(Handle());
-  else
-    args.push_oop(search_bootstrap_klass->java_mirror());
-  JavaValue result(T_OBJECT);
-  JavaCalls::call_static(&result,
-                         SystemDictionary::Linkage_klass(),
-                         vmSymbols::findBootstrapMethod_name(),
-                         vmSymbols::findBootstrapMethod_signature(),
-                         &args, CHECK_(empty));
-  boot_method_oop = (oop) result.get_jobject();
-
-  if (boot_method_oop != NULL) {
-    if (TraceMethodHandles) {
-#ifndef PRODUCT
-      tty->print_cr("--------");
-      tty->print_cr("bootstrap method for "PTR_FORMAT" computed as "PTR_FORMAT":", ik(), boot_method_oop);
-      ik()->print();
-      boot_method_oop->print();
-      tty->print_cr("========");
-#endif //PRODUCT
-    }
-    assert(boot_method_oop->is_oop()
-           && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
-    // probably no race conditions, but let's be careful:
-    if (Atomic::cmpxchg_ptr(boot_method_oop, ik->adr_bootstrap_method(), NULL) == NULL)
-      ik->set_bootstrap_method(boot_method_oop);
-    else
-      boot_method_oop = ik->bootstrap_method();
-  } else {
-    if (TraceMethodHandles) {
-#ifndef PRODUCT
-      tty->print_cr("--------");
-      tty->print_cr("bootstrap method for "PTR_FORMAT" computed as NULL:", ik());
-      ik()->print();
-      tty->print_cr("========");
-#endif //PRODUCT
-    }
-    boot_method_oop = ik->bootstrap_method();
-  }
-
-  return Handle(THREAD, boot_method_oop);
+  return empty;
 }
 
 // Since the identity hash code for symbols changes when the symbols are
--- a/src/share/vm/classfile/systemDictionary.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/classfile/systemDictionary.hpp	Wed May 19 10:22:39 2010 -0700
@@ -136,6 +136,7 @@
   template(MethodHandle_klass,           java_dyn_MethodHandle,          Opt) \
   template(MemberName_klass,             sun_dyn_MemberName,             Opt) \
   template(MethodHandleImpl_klass,       sun_dyn_MethodHandleImpl,       Opt) \
+  template(MethodHandleNatives_klass,    sun_dyn_MethodHandleNatives,    Opt) \
   template(AdapterMethodHandle_klass,    sun_dyn_AdapterMethodHandle,    Opt) \
   template(BoundMethodHandle_klass,      sun_dyn_BoundMethodHandle,      Opt) \
   template(DirectMethodHandle_klass,     sun_dyn_DirectMethodHandle,     Opt) \
@@ -463,29 +464,28 @@
 
   // JSR 292
   // find the java.dyn.MethodHandles::invoke method for a given signature
-  static methodOop find_method_handle_invoke(symbolHandle signature,
-                                             Handle class_loader,
-                                             Handle protection_domain,
+  static methodOop find_method_handle_invoke(symbolHandle name,
+                                             symbolHandle signature,
+                                             KlassHandle accessing_klass,
                                              TRAPS);
-  // ask Java to compute the java.dyn.MethodType object for a given signature
-  static Handle    compute_method_handle_type(symbolHandle signature,
-                                              Handle class_loader,
-                                              Handle protection_domain,
-                                              TRAPS);
+  // ask Java to compute a java.dyn.MethodType object for a given signature
+  static Handle    find_method_handle_type(symbolHandle signature,
+                                           KlassHandle accessing_klass,
+                                           bool& return_bcp_flag,
+                                           TRAPS);
   // ask Java to create a dynamic call site, while linking an invokedynamic op
-  static Handle    make_dynamic_call_site(KlassHandle caller,
-                                          int caller_method_idnum,
+  static Handle    make_dynamic_call_site(Handle bootstrap_method,
+                                          // Callee information:
+                                          symbolHandle name,
+                                          methodHandle signature_invoker,
+                                          Handle info,
+                                          // Caller information:
+                                          methodHandle caller_method,
                                           int caller_bci,
-                                          symbolHandle name,
-                                          methodHandle mh_invoke,
                                           TRAPS);
 
   // coordinate with Java about bootstrap methods
-  static Handle    find_bootstrap_method(KlassHandle caller,
-                                         // This argument is non-null only when a
-                                         // classfile attribute has been found:
-                                         KlassHandle search_bootstrap_klass,
-                                         TRAPS);
+  static Handle    find_bootstrap_method(KlassHandle caller, TRAPS);
 
   // Utility for printing loader "name" as part of tracing constraints
   static const char* loader_name(oop loader) {
--- a/src/share/vm/classfile/vmSymbols.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/classfile/vmSymbols.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -137,6 +137,7 @@
   template(java_lang_CloneNotSupportedException,      "java/lang/CloneNotSupportedException")     \
   template(java_lang_IllegalAccessException,          "java/lang/IllegalAccessException")         \
   template(java_lang_IllegalArgumentException,        "java/lang/IllegalArgumentException")       \
+  template(java_lang_IllegalStateException,           "java/lang/IllegalStateException")          \
   template(java_lang_IllegalMonitorStateException,    "java/lang/IllegalMonitorStateException")   \
   template(java_lang_IllegalThreadStateException,     "java/lang/IllegalThreadStateException")    \
   template(java_lang_IndexOutOfBoundsException,       "java/lang/IndexOutOfBoundsException")      \
@@ -201,6 +202,11 @@
   template(newField_signature,                        "(Lsun/reflect/FieldInfo;)Ljava/lang/reflect/Field;") \
   template(newMethod_name,                            "newMethod")                                \
   template(newMethod_signature,                       "(Lsun/reflect/MethodInfo;)Ljava/lang/reflect/Method;") \
+  /* the following two names must be in order: */                                                 \
+  template(invokeExact_name,                          "invokeExact")                              \
+  template(invokeGeneric_name,                        "invokeGeneric")                            \
+  template(invokeVarargs_name,                        "invokeVarargs")                            \
+  template(star_name,                                 "*") /*not really a name*/                  \
   template(invoke_name,                               "invoke")                                   \
   template(override_name,                             "override")                                 \
   template(parameterTypes_name,                       "parameterTypes")                           \
@@ -231,16 +237,17 @@
   template(java_dyn_MethodTypeForm,                   "java/dyn/MethodTypeForm")                  \
   template(java_dyn_MethodTypeForm_signature,         "Ljava/dyn/MethodTypeForm;")                \
   template(sun_dyn_MemberName,                        "sun/dyn/MemberName")                       \
+  template(sun_dyn_MemberName_signature,              "Lsun/dyn/MemberName;")                     \
   template(sun_dyn_MethodHandleImpl,                  "sun/dyn/MethodHandleImpl")                 \
+  template(sun_dyn_MethodHandleNatives,               "sun/dyn/MethodHandleNatives")              \
   template(sun_dyn_AdapterMethodHandle,               "sun/dyn/AdapterMethodHandle")              \
   template(sun_dyn_BoundMethodHandle,                 "sun/dyn/BoundMethodHandle")                \
   template(sun_dyn_DirectMethodHandle,                "sun/dyn/DirectMethodHandle")               \
-  template(makeImpl_name,                             "makeImpl") /*MethodType::makeImpl*/        \
-  template(makeImpl_signature,    "(Ljava/lang/Class;[Ljava/lang/Class;ZZ)Ljava/dyn/MethodType;") \
-  template(makeSite_name,                             "makeSite") /*CallSite::makeSite*/          \
-  template(makeSite_signature,    "(Ljava/lang/Class;Ljava/lang/String;Ljava/dyn/MethodType;II)Ljava/dyn/CallSite;") \
-  template(findBootstrapMethod_name,                  "findBootstrapMethod")                      \
-  template(findBootstrapMethod_signature, "(Ljava/lang/Class;Ljava/lang/Class;)Ljava/dyn/MethodHandle;") \
+  /* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */            \
+  template(findMethodHandleType_name,                 "findMethodHandleType")                     \
+  template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \
+  template(makeDynamicCallSite_name,                  "makeDynamicCallSite")                      \
+  template(makeDynamicCallSite_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") \
   NOT_LP64(  do_alias(machine_word_signature,         int_signature)  )                           \
   LP64_ONLY( do_alias(machine_word_signature,         long_signature) )                           \
                                                                                                   \
@@ -357,6 +364,8 @@
   template(void_double_signature,                     "()D")                                      \
   template(int_void_signature,                        "(I)V")                                     \
   template(int_int_signature,                         "(I)I")                                     \
+  template(char_char_signature,                       "(C)C")                                     \
+  template(short_short_signature,                     "(S)S")                                     \
   template(int_bool_signature,                        "(I)Z")                                     \
   template(float_int_signature,                       "(F)I")                                     \
   template(double_long_signature,                     "(D)J")                                     \
@@ -406,8 +415,9 @@
   template(void_classloader_signature,                "()Ljava/lang/ClassLoader;")                                \
   template(void_object_signature,                     "()Ljava/lang/Object;")                                     \
   template(void_class_signature,                      "()Ljava/lang/Class;")                                      \
-  template(void_string_signature,                     "()Ljava/lang/String;")                                      \
-  template(object_array_object_object_signature,      "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\
+  template(void_string_signature,                     "()Ljava/lang/String;")                                     \
+  template(object_array_object_signature,             "([Ljava/lang/Object;)Ljava/lang/Object;")                  \
+  template(object_object_array_object_signature,      "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\
   template(exception_void_signature,                  "(Ljava/lang/Exception;)V")                                 \
   template(protectiondomain_signature,                "[Ljava/security/ProtectionDomain;")                        \
   template(accesscontrolcontext_signature,            "Ljava/security/AccessControlContext;")                     \
@@ -585,6 +595,10 @@
    do_name(     reverseBytes_name,                               "reverseBytes")                                        \
   do_intrinsic(_reverseBytes_l,           java_lang_Long,         reverseBytes_name,        long_long_signature, F_S)   \
     /*  (symbol reverseBytes_name defined above) */                                                                     \
+  do_intrinsic(_reverseBytes_c,           java_lang_Character,    reverseBytes_name,        char_char_signature, F_S)   \
+    /*  (symbol reverseBytes_name defined above) */                                                                     \
+  do_intrinsic(_reverseBytes_s,           java_lang_Short,        reverseBytes_name,        short_short_signature, F_S) \
+    /*  (symbol reverseBytes_name defined above) */                                                                     \
                                                                                                                         \
   do_intrinsic(_identityHashCode,         java_lang_System,       identityHashCode_name, object_int_signature,   F_S)   \
    do_name(     identityHashCode_name,                           "identityHashCode")                                    \
@@ -857,11 +871,15 @@
   do_intrinsic(_Object_init,              java_lang_Object, object_initializer_name, void_method_signature,        F_R)   \
   /*    (symbol object_initializer_name defined above) */                                                                 \
                                                                                                                           \
-  do_intrinsic(_invoke,                   java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
+  do_intrinsic(_invoke,                   java_lang_reflect_Method, invoke_name, object_object_array_object_signature, F_R) \
   /*   (symbols invoke_name and invoke_signature defined above) */                                                      \
   do_intrinsic(_checkSpreadArgument,      sun_dyn_MethodHandleImpl, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \
    do_name(    checkSpreadArgument_name,       "checkSpreadArgument")                                                   \
    do_name(    checkSpreadArgument_signature,  "(Ljava/lang/Object;I)V")                                                \
+  do_intrinsic(_invokeExact,              java_dyn_MethodHandle, invokeExact_name,   object_array_object_signature, F_RN) \
+  do_intrinsic(_invokeGeneric,            java_dyn_MethodHandle, invokeGeneric_name, object_array_object_signature, F_RN) \
+  do_intrinsic(_invokeVarargs,            java_dyn_MethodHandle, invokeVarargs_name, object_array_object_signature, F_R)  \
+  do_intrinsic(_invokeDynamic,            java_dyn_InvokeDynamic, star_name,         object_array_object_signature, F_SN) \
                                                                                                                         \
   /* unboxing methods: */                                                                                               \
   do_intrinsic(_booleanValue,             java_lang_Boolean,      booleanValue_name, void_boolean_signature, F_R)       \
--- a/src/share/vm/code/codeCache.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/code/codeCache.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -124,6 +124,23 @@
   return (nmethod*)cb;
 }
 
+nmethod* CodeCache::first_nmethod() {
+  assert_locked_or_safepoint(CodeCache_lock);
+  CodeBlob* cb = first();
+  while (cb != NULL && !cb->is_nmethod()) {
+    cb = next(cb);
+  }
+  return (nmethod*)cb;
+}
+
+nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  cb = next(cb);
+  while (cb != NULL && !cb->is_nmethod()) {
+    cb = next(cb);
+  }
+  return (nmethod*)cb;
+}
 
 CodeBlob* CodeCache::allocate(int size) {
   // Do not seize the CodeCache lock here--if the caller has not
@@ -284,9 +301,11 @@
       cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
     }
 #endif //PRODUCT
-    if (is_live)
+    if (is_live) {
       // Perform cur->oops_do(f), maybe just once per nmethod.
       f->do_code_blob(cur);
+      cur->fix_oop_relocations();
+    }
   }
 
   // Check for stray marks.
@@ -412,7 +431,7 @@
       saved->set_speculatively_disconnected(false);
       saved->set_saved_nmethod_link(NULL);
       if (PrintMethodFlushing) {
-        saved->print_on(tty, " ### nmethod is reconnected");
+        saved->print_on(tty, " ### nmethod is reconnected\n");
       }
       if (LogCompilation && (xtty != NULL)) {
         ttyLocker ttyl;
@@ -430,7 +449,8 @@
 }
 
 void CodeCache::remove_saved_code(nmethod* nm) {
-  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+  // For conc swpr this will be called with CodeCache_lock taken by caller
+  assert_locked_or_safepoint(CodeCache_lock);
   assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
   nmethod* saved = _saved_nmethods;
   nmethod* prev = NULL;
@@ -461,7 +481,7 @@
   nm->set_saved_nmethod_link(_saved_nmethods);
   _saved_nmethods = nm;
   if (PrintMethodFlushing) {
-    nm->print_on(tty, " ### nmethod is speculatively disconnected");
+    nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
   }
   if (LogCompilation && (xtty != NULL)) {
     ttyLocker ttyl;
--- a/src/share/vm/code/codeCache.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/code/codeCache.hpp	Wed May 19 10:22:39 2010 -0700
@@ -102,6 +102,8 @@
   static CodeBlob* next (CodeBlob* cb);
   static CodeBlob* alive(CodeBlob *cb);
   static nmethod* alive_nmethod(CodeBlob *cb);
+  static nmethod* first_nmethod();
+  static nmethod* next_nmethod (CodeBlob* cb);
   static int       nof_blobs()                 { return _number_of_blobs; }
 
   // GC support
--- a/src/share/vm/code/exceptionHandlerTable.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/code/exceptionHandlerTable.cpp	Wed May 19 10:22:39 2010 -0700
@@ -221,6 +221,6 @@
   for (uint i = 0; i < len(); i++) {
      if ((*adr(i) > (unsigned int)nm->code_size()) ||
          (*(adr(i)+1) > (unsigned int)nm->code_size()))
-       fatal1("Invalid offset in ImplicitExceptionTable at %lx", _data);
+       fatal(err_msg("Invalid offset in ImplicitExceptionTable at " PTR_FORMAT, _data));
   }
 }
--- a/src/share/vm/code/nmethod.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/code/nmethod.cpp	Wed May 19 10:22:39 2010 -0700
@@ -685,6 +685,7 @@
     _exception_offset        = 0;
     _deoptimize_offset       = 0;
     _deoptimize_mh_offset    = 0;
+    _unwind_handler_offset   = -1;
     _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
     _orig_pc_offset          = 0;
     _stub_offset             = data_offset();
@@ -798,6 +799,11 @@
     _exception_offset        = _stub_offset + offsets->value(CodeOffsets::Exceptions);
     _deoptimize_offset       = _stub_offset + offsets->value(CodeOffsets::Deopt);
     _deoptimize_mh_offset    = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
+    if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
+      _unwind_handler_offset   = instructions_offset() + offsets->value(CodeOffsets::UnwindHandler);
+    } else {
+      _unwind_handler_offset   = -1;
+    }
     _consts_offset           = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
     _scopes_data_offset      = data_offset();
     _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size         (), oopSize);
@@ -1008,9 +1014,7 @@
 
 void nmethod::cleanup_inline_caches() {
 
-  assert(SafepointSynchronize::is_at_safepoint() &&
-        !CompiledIC_lock->is_locked() &&
-        !Patching_lock->is_locked(), "no threads must be updating the inline caches by them selfs");
+  assert_locked_or_safepoint(CompiledIC_lock);
 
   // If the method is not entrant or zombie then a JMP is plastered over the
   // first few bytes.  If an oop in the old code was there, that oop
@@ -1065,7 +1069,6 @@
 // Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
 bool nmethod::can_not_entrant_be_converted() {
   assert(is_not_entrant(), "must be a non-entrant method");
-  assert(SafepointSynchronize::is_at_safepoint(), "must be called during a safepoint");
 
   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
   // count can be greater than the stack traversal count before it hits the
@@ -1121,7 +1124,7 @@
     _method = NULL;            // Clear the method of this dead nmethod
   }
   // Make the class unloaded - i.e., change state and notify sweeper
-  check_safepoint();
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   if (is_in_use()) {
     // Transitioning directly from live to unloaded -- so
     // we need to force a cache clean-up; remember this
@@ -1214,17 +1217,6 @@
       assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
     }
 
-    // When the nmethod becomes zombie it is no longer alive so the
-    // dependencies must be flushed.  nmethods in the not_entrant
-    // state will be flushed later when the transition to zombie
-    // happens or they get unloaded.
-    if (state == zombie) {
-      assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
-      flush_dependencies(NULL);
-    } else {
-      assert(state == not_entrant, "other cases may need to be handled differently");
-    }
-
     was_alive = is_in_use(); // Read state under lock
 
     // Change state
@@ -1235,6 +1227,17 @@
 
   } // leave critical region under Patching_lock
 
+  // When the nmethod becomes zombie it is no longer alive so the
+  // dependencies must be flushed.  nmethods in the not_entrant
+  // state will be flushed later when the transition to zombie
+  // happens or they get unloaded.
+  if (state == zombie) {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    flush_dependencies(NULL);
+  } else {
+    assert(state == not_entrant, "other cases may need to be handled differently");
+  }
+
   if (state == not_entrant) {
     Events::log("Make nmethod not entrant " INTPTR_FORMAT, this);
   } else {
@@ -1304,21 +1307,13 @@
   return true;
 }
 
-
-#ifndef PRODUCT
-void nmethod::check_safepoint() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-}
-#endif
-
-
 void nmethod::flush() {
   // Note that there are no valid oops in the nmethod anymore.
   assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
   assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
 
   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
-  check_safepoint();
+  assert_locked_or_safepoint(CodeCache_lock);
 
   // completely deallocate this method
   EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
@@ -1367,7 +1362,7 @@
 // notifies instanceKlasses that are reachable
 
 void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
+  assert_locked_or_safepoint(CodeCache_lock);
   assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
   "is_alive is non-NULL if and only if we are called during GC");
   if (!has_flushed_dependencies()) {
@@ -1528,7 +1523,8 @@
             }
           }
           ic->set_to_clean();
-          assert(ic->cached_oop() == NULL, "cached oop in IC should be cleared")
+          assert(ic->cached_oop() == NULL,
+                 "cached oop in IC should be cleared");
         }
       }
     }
@@ -2117,7 +2113,7 @@
   ResourceMark rm;
 
   if (!CodeCache::contains(this)) {
-    fatal1("nmethod at " INTPTR_FORMAT " not in zone", this);
+    fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
   }
 
   if(is_native_method() )
@@ -2125,7 +2121,8 @@
 
   nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
   if (nm != this) {
-    fatal1("findNMethod did not find this nmethod (" INTPTR_FORMAT ")", this);
+    fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
+                  this));
   }
 
   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
@@ -2258,7 +2255,6 @@
     tty->print(" for method " INTPTR_FORMAT , (address)method());
     tty->print(" { ");
     if (version())        tty->print("v%d ", version());
-    if (level())          tty->print("l%d ", level());
     if (is_in_use())      tty->print("in_use ");
     if (is_not_entrant()) tty->print("not_entrant ");
     if (is_zombie())      tty->print("zombie ");
--- a/src/share/vm/code/nmethod.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/code/nmethod.hpp	Wed May 19 10:22:39 2010 -0700
@@ -82,7 +82,6 @@
 struct nmFlags {
   friend class VMStructs;
   unsigned int version:8;                    // version number (0 = first version)
-  unsigned int level:4;                      // optimization level
   unsigned int age:4;                        // age (in # of sweep steps)
 
   unsigned int state:2;                      // {alive, zombie, unloaded)
@@ -154,6 +153,9 @@
   // All deoptee's at a MethodHandle call site will resume execution
   // at this location described by this offset.
   int _deoptimize_mh_offset;
+  // Offset of the unwind handler if it exists
+  int _unwind_handler_offset;
+
 #ifdef HAVE_DTRACE_H
   int _trap_offset;
 #endif // def HAVE_DTRACE_H
@@ -341,6 +343,7 @@
   address exception_begin       () const          { return           header_begin() + _exception_offset     ; }
   address deopt_handler_begin   () const          { return           header_begin() + _deoptimize_offset    ; }
   address deopt_mh_handler_begin() const          { return           header_begin() + _deoptimize_mh_offset ; }
+  address unwind_handler_begin  () const          { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
   address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
   address stub_end              () const          { return           header_begin() + _consts_offset        ; }
   address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
@@ -406,14 +409,13 @@
   void flush_dependencies(BoolObjectClosure* is_alive);
   bool  has_flushed_dependencies()                { return flags.hasFlushedDependencies; }
   void  set_has_flushed_dependencies()            {
-    check_safepoint();
     assert(!has_flushed_dependencies(), "should only happen once");
     flags.hasFlushedDependencies = 1;
   }
 
   bool  is_marked_for_reclamation() const         { return flags.markedForReclamation; }
-  void  mark_for_reclamation()                    { check_safepoint(); flags.markedForReclamation = 1; }
-  void  unmark_for_reclamation()                  { check_safepoint(); flags.markedForReclamation = 0; }
+  void  mark_for_reclamation()                    { flags.markedForReclamation = 1; }
+  void  unmark_for_reclamation()                  { flags.markedForReclamation = 0; }
 
   bool  has_unsafe_access() const                 { return flags.has_unsafe_access; }
   void  set_has_unsafe_access(bool z)             { flags.has_unsafe_access = z; }
@@ -424,9 +426,6 @@
   bool  is_speculatively_disconnected() const     { return flags.speculatively_disconnected; }
   void  set_speculatively_disconnected(bool z)     { flags.speculatively_disconnected = z; }
 
-  int   level() const                             { return flags.level; }
-  void  set_level(int newLevel)                   { check_safepoint(); flags.level = newLevel; }
-
   int   comp_level() const                        { return _comp_level; }
 
   int   version() const                           { return flags.version; }
--- a/src/share/vm/code/stubs.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/code/stubs.cpp	Wed May 19 10:22:39 2010 -0700
@@ -62,7 +62,9 @@
                      Mutex* lock, const char* name) : _mutex(lock) {
   intptr_t size = round_to(buffer_size, 2*BytesPerWord);
   BufferBlob* blob = BufferBlob::create(name, size);
-  if( blob == NULL ) vm_exit_out_of_memory1(size, "CodeCache: no room for %s", name);
+  if( blob == NULL) {
+    vm_exit_out_of_memory(size, err_msg("CodeCache: no room for %s", name));
+  }
   _stub_interface  = stub_interface;
   _buffer_size     = blob->instructions_size();
   _buffer_limit    = blob->instructions_size();
--- a/src/share/vm/code/vtableStubs.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/code/vtableStubs.cpp	Wed May 19 10:22:39 2010 -0700
@@ -45,7 +45,9 @@
   if (_chunk == NULL || _chunk + real_size > _chunk_end) {
     const int bytes = chunk_factor * real_size + pd_code_alignment();
     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
-    if( blob == NULL ) vm_exit_out_of_memory1(bytes, "CodeCache: no room for %s", "vtable chunks");
+    if (blob == NULL) {
+      vm_exit_out_of_memory(bytes, "CodeCache: no room for vtable chunks");
+    }
     _chunk = blob->instructions_begin();
     _chunk_end = _chunk + bytes;
     VTune::register_stub("vtable stub", _chunk, _chunk_end);
@@ -189,7 +191,9 @@
   instanceKlass* ik = instanceKlass::cast(klass);
   klassVtable* vt = ik->vtable();
   klass->print();
-  fatal3("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", index %d (vtable length %d)", (address)receiver, index, vt->length());
+  fatal(err_msg("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", "
+                "index %d (vtable length %d)",
+                (address)receiver, index, vt->length()));
 }
 
 #endif // Product
--- a/src/share/vm/compiler/compileBroker.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/compiler/compileBroker.cpp	Wed May 19 10:22:39 2010 -0700
@@ -461,12 +461,25 @@
 //
 // Get the next CompileTask from a CompileQueue
 CompileTask* CompileQueue::get() {
+  NMethodSweeper::possibly_sweep();
+
   MutexLocker locker(lock());
 
   // Wait for an available CompileTask.
   while (_first == NULL) {
     // There is no work to be done right now.  Wait.
-    lock()->wait();
+    if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
+      // During the emergency sweeping periods, wake up and sweep occasionally
+      bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
+      if (timedout) {
+        MutexUnlocker ul(lock());
+        // When otherwise not busy, run nmethod sweeping
+        NMethodSweeper::possibly_sweep();
+      }
+    } else {
+      // During normal operation no need to wake up on timer
+      lock()->wait();
+    }
   }
 
   CompileTask* task = _first;
@@ -1414,9 +1427,14 @@
     intx thread_id = os::current_thread_id();
     for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) {
       const char* dir = (try_temp_dir ? os::get_temp_directory() : NULL);
-      if (dir == NULL)  dir = "";
-      sprintf(fileBuf, "%shs_c" UINTX_FORMAT "_pid%u.log",
-              dir, thread_id, os::current_process_id());
+      if (dir == NULL) {
+        jio_snprintf(fileBuf, sizeof(fileBuf), "hs_c" UINTX_FORMAT "_pid%u.log",
+                     thread_id, os::current_process_id());
+      } else {
+        jio_snprintf(fileBuf, sizeof(fileBuf),
+                     "%s%shs_c" UINTX_FORMAT "_pid%u.log", dir,
+                     os::file_separator(), thread_id, os::current_process_id());
+      }
       fp = fopen(fileBuf, "at");
       if (fp != NULL) {
         file = NEW_C_HEAP_ARRAY(char, strlen(fileBuf)+1);
--- a/src/share/vm/compiler/compileBroker.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/compiler/compileBroker.hpp	Wed May 19 10:22:39 2010 -0700
@@ -310,7 +310,7 @@
 
   static AbstractCompiler* compiler(int level ) {
     if (level == CompLevel_fast_compile) return _compilers[0];
-    assert(level == CompLevel_highest_tier, "what level?")
+    assert(level == CompLevel_highest_tier, "what level?");
     return _compilers[1];
   }
 
--- a/src/share/vm/compiler/compileLog.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/compiler/compileLog.cpp	Wed May 19 10:22:39 2010 -0700
@@ -68,7 +68,7 @@
     return attrs;
   } else {
     // park it in the buffer, so we can put a null on the end
-    assert(!(kind >= buffer && kind < buffer+100), "not obviously in buffer")
+    assert(!(kind >= buffer && kind < buffer+100), "not obviously in buffer");
     int klen = attrs - kind;
     strncpy(buffer, kind, klen);
     buffer[klen] = 0;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp	Wed May 19 10:22:39 2010 -0700
@@ -118,7 +118,7 @@
     // TreeList from the first chunk to the next chunk and update all
     // the TreeList pointers in the chunks in the list.
     if (nextTC == NULL) {
-      assert(prevFC == NULL, "Not last chunk in the list")
+      assert(prevFC == NULL, "Not last chunk in the list");
       set_tail(NULL);
       set_head(NULL);
     } else {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2007-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,11 +32,10 @@
   ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; }
 
   void initialize_gc_policy_counters();
-#if 1
+
   virtual void initialize_size_policy(size_t init_eden_size,
                                       size_t init_promo_size,
                                       size_t init_survivor_size);
-#endif
 
   // Returns true if the incremental mode is enabled.
   virtual bool has_soft_ended_eden();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1815,8 +1815,19 @@
     do_compaction_work(clear_all_soft_refs);
 
     // Has the GC time limit been exceeded?
-    check_gc_time_limit();
-
+    DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
+    size_t max_eden_size = young_gen->max_capacity() -
+                           young_gen->to()->capacity() -
+                           young_gen->from()->capacity();
+    GenCollectedHeap* gch = GenCollectedHeap::heap();
+    GCCause::Cause gc_cause = gch->gc_cause();
+    size_policy()->check_gc_overhead_limit(_young_gen->used(),
+                                           young_gen->eden()->used(),
+                                           _cmsGen->max_capacity(),
+                                           max_eden_size,
+                                           full,
+                                           gc_cause,
+                                           gch->collector_policy());
   } else {
     do_mark_sweep_work(clear_all_soft_refs, first_state,
       should_start_over);
@@ -1828,55 +1839,6 @@
   return;
 }
 
-void CMSCollector::check_gc_time_limit() {
-
-  // Ignore explicit GC's.  Exiting here does not set the flag and
-  // does not reset the count.  Updating of the averages for system
-  // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
-  GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause();
-  if (GCCause::is_user_requested_gc(gc_cause) ||
-      GCCause::is_serviceability_requested_gc(gc_cause)) {
-    return;
-  }
-
-  // Calculate the fraction of the CMS generation was freed during
-  // the last collection.
-  // Only consider the STW compacting cost for now.
-  //
-  // Note that the gc time limit test only works for the collections
-  // of the young gen + tenured gen and not for collections of the
-  // permanent gen.  That is because the calculation of the space
-  // freed by the collection is the free space in the young gen +
-  // tenured gen.
-
-  double fraction_free =
-    ((double)_cmsGen->free())/((double)_cmsGen->max_capacity());
-  if ((100.0 * size_policy()->compacting_gc_cost()) >
-         ((double) GCTimeLimit) &&
-        ((fraction_free * 100) < GCHeapFreeLimit)) {
-    size_policy()->inc_gc_time_limit_count();
-    if (UseGCOverheadLimit &&
-        (size_policy()->gc_time_limit_count() >
-         AdaptiveSizePolicyGCTimeLimitThreshold)) {
-      size_policy()->set_gc_time_limit_exceeded(true);
-      // Avoid consecutive OOM due to the gc time limit by resetting
-      // the counter.
-      size_policy()->reset_gc_time_limit_count();
-      if (PrintGCDetails) {
-        gclog_or_tty->print_cr("      GC is exceeding overhead limit "
-          "of %d%%", GCTimeLimit);
-      }
-    } else {
-      if (PrintGCDetails) {
-        gclog_or_tty->print_cr("      GC would exceed overhead limit "
-          "of %d%%", GCTimeLimit);
-      }
-    }
-  } else {
-    size_policy()->reset_gc_time_limit_count();
-  }
-}
-
 // Resize the perm generation and the tenured generation
 // after obtaining the free list locks for the
 // two generations.
@@ -6182,6 +6144,11 @@
       }
       curAddr = chunk.end();
     }
+    // A successful mostly concurrent collection has been done.
+    // Because only the full (i.e., concurrent mode failure) collections
+    // are being measured for gc overhead limits, clean the "near" flag
+    // and count.
+    sp->reset_gc_overhead_limit_count();
     _collectorState = Idling;
   } else {
     // already have the lock
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -570,10 +570,6 @@
   ConcurrentMarkSweepPolicy* _collector_policy;
   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
 
-  // Check whether the gc time limit has been
-  // exceeded and set the size policy flag
-  // appropriately.
-  void check_gc_time_limit();
   // XXX Move these to CMSStats ??? FIX ME !!!
   elapsedTimer _inter_sweep_timer;   // time between sweeps
   elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,9 +69,9 @@
   G1CollectorPolicy* g1p = g1h->g1_policy();
   if (g1p->adaptive_young_list_length()) {
     int regions_visited = 0;
-    g1h->young_list_rs_length_sampling_init();
-    while (g1h->young_list_rs_length_sampling_more()) {
-      g1h->young_list_rs_length_sampling_next();
+    g1h->young_list()->rs_length_sampling_init();
+    while (g1h->young_list()->rs_length_sampling_more()) {
+      g1h->young_list()->rs_length_sampling_next();
       ++regions_visited;
 
       // we try to yield every time we visit 10 regions
@@ -162,6 +162,7 @@
   if (_worker_id >= cg1r()->worker_thread_num()) {
     run_young_rs_sampling();
     terminate();
+    return;
   }
 
   _vtime_start = os::elapsedVTime();
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed May 19 10:22:39 2010 -0700
@@ -297,6 +297,11 @@
   }
 }
 
+// Currently we do not call this at all. Normally we would call it
+// during the concurrent marking / remark phases but we now call
+// the lock-based version instead. But we might want to resurrect this
+// code in the future. So, we'll leave it here commented out.
+#if 0
 MemRegion CMRegionStack::pop() {
   while (true) {
     // Otherwise...
@@ -321,6 +326,41 @@
     // Otherwise, we need to try again.
   }
 }
+#endif // 0
+
+void CMRegionStack::push_with_lock(MemRegion mr) {
+  assert(mr.word_size() > 0, "Precondition");
+  MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag);
+
+  if (isFull()) {
+    _overflow = true;
+    return;
+  }
+
+  _base[_index] = mr;
+  _index += 1;
+}
+
+MemRegion CMRegionStack::pop_with_lock() {
+  MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag);
+
+  while (true) {
+    if (_index == 0) {
+      return MemRegion();
+    }
+    _index -= 1;
+
+    MemRegion mr = _base[_index];
+    if (mr.start() != NULL) {
+      assert(mr.end() != NULL, "invariant");
+      assert(mr.word_size() > 0, "invariant");
+      return mr;
+    } else {
+      // that entry was invalidated... let's skip it
+      assert(mr.end() == NULL, "invariant");
+    }
+  }
+}
 
 bool CMRegionStack::invalidate_entries_into_cset() {
   bool result = false;
@@ -668,24 +708,46 @@
 //
 
 void ConcurrentMark::clearNextBitmap() {
-   guarantee(!G1CollectedHeap::heap()->mark_in_progress(), "Precondition.");
-
-   // clear the mark bitmap (no grey objects to start with).
-   // We need to do this in chunks and offer to yield in between
-   // each chunk.
-   HeapWord* start  = _nextMarkBitMap->startWord();
-   HeapWord* end    = _nextMarkBitMap->endWord();
-   HeapWord* cur    = start;
-   size_t chunkSize = M;
-   while (cur < end) {
-     HeapWord* next = cur + chunkSize;
-     if (next > end)
-       next = end;
-     MemRegion mr(cur,next);
-     _nextMarkBitMap->clearRange(mr);
-     cur = next;
-     do_yield_check();
-   }
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  G1CollectorPolicy* g1p = g1h->g1_policy();
+
+  // Make sure that the concurrent mark thread looks to still be in
+  // the current cycle.
+  guarantee(cmThread()->during_cycle(), "invariant");
+
+  // We are finishing up the current cycle by clearing the next
+  // marking bitmap and getting it ready for the next cycle. During
+  // this time no other cycle can start. So, let's make sure that this
+  // is the case.
+  guarantee(!g1h->mark_in_progress(), "invariant");
+
+  // clear the mark bitmap (no grey objects to start with).
+  // We need to do this in chunks and offer to yield in between
+  // each chunk.
+  HeapWord* start  = _nextMarkBitMap->startWord();
+  HeapWord* end    = _nextMarkBitMap->endWord();
+  HeapWord* cur    = start;
+  size_t chunkSize = M;
+  while (cur < end) {
+    HeapWord* next = cur + chunkSize;
+    if (next > end)
+      next = end;
+    MemRegion mr(cur,next);
+    _nextMarkBitMap->clearRange(mr);
+    cur = next;
+    do_yield_check();
+
+    // Repeat the asserts from above. We'll do them as asserts here to
+    // minimize their overhead on the product. However, we'll have
+    // them as guarantees at the beginning / end of the bitmap
+    // clearing to get some checking in the product.
+    assert(cmThread()->during_cycle(), "invariant");
+    assert(!g1h->mark_in_progress(), "invariant");
+  }
+
+  // Repeat the asserts from above.
+  guarantee(cmThread()->during_cycle(), "invariant");
+  guarantee(!g1h->mark_in_progress(), "invariant");
 }
 
 class NoteStartOfMarkHRClosure: public HeapRegionClosure {
@@ -705,7 +767,8 @@
   _has_aborted = false;
 
   if (G1PrintReachableAtInitialMark) {
-    print_reachable(true, "before");
+    print_reachable("at-cycle-start",
+                    true /* use_prev_marking */, true /* all */);
   }
 
   // Initialise marking structures. This has to be done in a STW phase.
@@ -1917,19 +1980,21 @@
 
 #ifndef PRODUCT
 
-class ReachablePrinterOopClosure: public OopClosure {
+class PrintReachableOopClosure: public OopClosure {
 private:
   G1CollectedHeap* _g1h;
   CMBitMapRO*      _bitmap;
   outputStream*    _out;
   bool             _use_prev_marking;
+  bool             _all;
 
 public:
-  ReachablePrinterOopClosure(CMBitMapRO*   bitmap,
-                             outputStream* out,
-                             bool          use_prev_marking) :
+  PrintReachableOopClosure(CMBitMapRO*   bitmap,
+                           outputStream* out,
+                           bool          use_prev_marking,
+                           bool          all) :
     _g1h(G1CollectedHeap::heap()),
-    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
+    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
 
   void do_oop(narrowOop* p) { do_oop_work(p); }
   void do_oop(      oop* p) { do_oop_work(p); }
@@ -1939,9 +2004,11 @@
     const char* str = NULL;
     const char* str2 = "";
 
-    if (!_g1h->is_in_g1_reserved(obj))
-      str = "outside G1 reserved";
-    else {
+    if (obj == NULL) {
+      str = "";
+    } else if (!_g1h->is_in_g1_reserved(obj)) {
+      str = " O";
+    } else {
       HeapRegion* hr  = _g1h->heap_region_containing(obj);
       guarantee(hr != NULL, "invariant");
       bool over_tams = false;
@@ -1950,74 +2017,67 @@
       } else {
         over_tams = hr->obj_allocated_since_next_marking(obj);
       }
+      bool marked = _bitmap->isMarked((HeapWord*) obj);
 
       if (over_tams) {
-        str = "over TAMS";
-        if (_bitmap->isMarked((HeapWord*) obj)) {
+        str = " >";
+        if (marked) {
           str2 = " AND MARKED";
         }
-      } else if (_bitmap->isMarked((HeapWord*) obj)) {
-        str = "marked";
+      } else if (marked) {
+        str = " M";
       } else {
-        str = "#### NOT MARKED ####";
+        str = " NOT";
       }
     }
 
-    _out->print_cr("    "PTR_FORMAT" contains "PTR_FORMAT" %s%s",
+    _out->print_cr("  "PTR_FORMAT": "PTR_FORMAT"%s%s",
                    p, (void*) obj, str, str2);
   }
 };
 
-class ReachablePrinterClosure: public BitMapClosure {
+class PrintReachableObjectClosure : public ObjectClosure {
 private:
   CMBitMapRO*   _bitmap;
   outputStream* _out;
   bool          _use_prev_marking;
+  bool          _all;
+  HeapRegion*   _hr;
 
 public:
-  ReachablePrinterClosure(CMBitMapRO*   bitmap,
-                          outputStream* out,
-                          bool          use_prev_marking) :
-    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
-
-  bool do_bit(size_t offset) {
-    HeapWord* addr = _bitmap->offsetToHeapWord(offset);
-    ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking);
-
-    _out->print_cr("  obj "PTR_FORMAT", offset %10d (marked)", addr, offset);
-    oop(addr)->oop_iterate(&oopCl);
-    _out->print_cr("");
-
-    return true;
+  PrintReachableObjectClosure(CMBitMapRO*   bitmap,
+                              outputStream* out,
+                              bool          use_prev_marking,
+                              bool          all,
+                              HeapRegion*   hr) :
+    _bitmap(bitmap), _out(out),
+    _use_prev_marking(use_prev_marking), _all(all), _hr(hr) { }
+
+  void do_object(oop o) {
+    bool over_tams;
+    if (_use_prev_marking) {
+      over_tams = _hr->obj_allocated_since_prev_marking(o);
+    } else {
+      over_tams = _hr->obj_allocated_since_next_marking(o);
+    }
+    bool marked = _bitmap->isMarked((HeapWord*) o);
+    bool print_it = _all || over_tams || marked;
+
+    if (print_it) {
+      _out->print_cr(" "PTR_FORMAT"%s",
+                     o, (over_tams) ? " >" : (marked) ? " M" : "");
+      PrintReachableOopClosure oopCl(_bitmap, _out, _use_prev_marking, _all);
+      o->oop_iterate(&oopCl);
+    }
   }
 };
 
-class ObjInRegionReachablePrinterClosure : public ObjectClosure {
+class PrintReachableRegionClosure : public HeapRegionClosure {
 private:
   CMBitMapRO*   _bitmap;
   outputStream* _out;
   bool          _use_prev_marking;
-
-public:
-  ObjInRegionReachablePrinterClosure(CMBitMapRO*   bitmap,
-                                     outputStream* out,
-                                     bool          use_prev_marking) :
-    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
-
-  void do_object(oop o) {
-    ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking);
-
-    _out->print_cr("  obj "PTR_FORMAT" (over TAMS)", (void*) o);
-    o->oop_iterate(&oopCl);
-    _out->print_cr("");
-  }
-};
-
-class RegionReachablePrinterClosure : public HeapRegionClosure {
-private:
-  CMBitMapRO*   _bitmap;
-  outputStream* _out;
-  bool          _use_prev_marking;
+  bool          _all;
 
 public:
   bool doHeapRegion(HeapRegion* hr) {
@@ -2032,22 +2092,35 @@
     }
     _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
                    "TAMS: "PTR_FORMAT, b, e, t, p);
-    _out->print_cr("");
-
-    ObjInRegionReachablePrinterClosure ocl(_bitmap, _out, _use_prev_marking);
-    hr->object_iterate_mem_careful(MemRegion(p, t), &ocl);
+    _out->cr();
+
+    HeapWord* from = b;
+    HeapWord* to   = t;
+
+    if (to > from) {
+      _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to);
+      _out->cr();
+      PrintReachableObjectClosure ocl(_bitmap, _out,
+                                      _use_prev_marking, _all, hr);
+      hr->object_iterate_mem_careful(MemRegion(from, to), &ocl);
+      _out->cr();
+    }
 
     return false;
   }
 
-  RegionReachablePrinterClosure(CMBitMapRO*   bitmap,
-                                outputStream* out,
-                                bool          use_prev_marking) :
-    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { }
+  PrintReachableRegionClosure(CMBitMapRO*   bitmap,
+                              outputStream* out,
+                              bool          use_prev_marking,
+                              bool          all) :
+    _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { }
 };
 
-void ConcurrentMark::print_reachable(bool use_prev_marking, const char* str) {
-  gclog_or_tty->print_cr("== Doing reachable object dump... ");
+void ConcurrentMark::print_reachable(const char* str,
+                                     bool use_prev_marking,
+                                     bool all) {
+  gclog_or_tty->cr();
+  gclog_or_tty->print_cr("== Doing heap dump... ");
 
   if (G1PrintReachableBaseFile == NULL) {
     gclog_or_tty->print_cr("  #### error: no base file defined");
@@ -2082,19 +2155,14 @@
   out->print_cr("-- USING %s", (use_prev_marking) ? "PTAMS" : "NTAMS");
   out->cr();
 
-  RegionReachablePrinterClosure rcl(bitmap, out, use_prev_marking);
-  out->print_cr("--- ITERATING OVER REGIONS WITH TAMS < TOP");
-  out->cr();
-  _g1h->heap_region_iterate(&rcl);
+  out->print_cr("--- ITERATING OVER REGIONS");
   out->cr();
-
-  ReachablePrinterClosure cl(bitmap, out, use_prev_marking);
-  out->print_cr("--- ITERATING OVER MARKED OBJECTS ON THE BITMAP");
-  out->cr();
-  bitmap->iterate(&cl);
+  PrintReachableRegionClosure rcl(bitmap, out, use_prev_marking, all);
+  _g1h->heap_region_iterate(&rcl);
   out->cr();
 
   gclog_or_tty->print_cr("  done");
+  gclog_or_tty->flush();
 }
 
 #endif // PRODUCT
@@ -3363,7 +3431,7 @@
       gclog_or_tty->print_cr("[%d] draining region stack, size = %d",
                              _task_id, _cm->region_stack_size());
 
-    MemRegion mr = _cm->region_stack_pop();
+    MemRegion mr = _cm->region_stack_pop_with_lock();
     // it returns MemRegion() if the pop fails
     statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
 
@@ -3384,7 +3452,7 @@
         if (has_aborted())
           mr = MemRegion();
         else {
-          mr = _cm->region_stack_pop();
+          mr = _cm->region_stack_pop_with_lock();
           // it returns MemRegion() if the pop fails
           statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
         }
@@ -3417,7 +3485,7 @@
           }
           // Now push the part of the region we didn't scan on the
           // region stack to make sure a task scans it later.
-          _cm->region_stack_push(newRegion);
+          _cm->region_stack_push_with_lock(newRegion);
         }
         // break from while
         mr = MemRegion();
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -252,9 +252,19 @@
   // with other "push" operations (no pops).
   void push(MemRegion mr);
 
+#if 0
+  // This is currently not used. See the comment in the .cpp file.
+
   // Lock-free; assumes that it will only be called in parallel
   // with other "pop" operations (no pushes).
   MemRegion pop();
+#endif // 0
+
+  // These two are the implementations that use a lock. They can be
+  // called concurrently with each other but they should not be called
+  // concurrently with the lock-free versions (push() / pop()).
+  void push_with_lock(MemRegion mr);
+  MemRegion pop_with_lock();
 
   bool isEmpty()    { return _index == 0; }
   bool isFull()     { return _index == _capacity; }
@@ -540,6 +550,10 @@
 
   // Manipulation of the region stack
   bool region_stack_push(MemRegion mr) {
+    // Currently we only call the lock-free version during evacuation
+    // pauses.
+    assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
+
     _regionStack.push(mr);
     if (_regionStack.overflow()) {
       set_has_overflown();
@@ -547,7 +561,33 @@
     }
     return true;
   }
-  MemRegion region_stack_pop()          { return _regionStack.pop(); }
+#if 0
+  // Currently this is not used. See the comment in the .cpp file.
+  MemRegion region_stack_pop() { return _regionStack.pop(); }
+#endif // 0
+
+  bool region_stack_push_with_lock(MemRegion mr) {
+    // Currently we only call the lock-based version during either
+    // concurrent marking or remark.
+    assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
+           "if we are at a safepoint it should be the remark safepoint");
+
+    _regionStack.push_with_lock(mr);
+    if (_regionStack.overflow()) {
+      set_has_overflown();
+      return false;
+    }
+    return true;
+  }
+  MemRegion region_stack_pop_with_lock() {
+    // Currently we only call the lock-based version during either
+    // concurrent marking or remark.
+    assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
+           "if we are at a safepoint it should be the remark safepoint");
+
+    return _regionStack.pop_with_lock();
+  }
+
   int region_stack_size()               { return _regionStack.size(); }
   bool region_stack_overflow()          { return _regionStack.overflow(); }
   bool region_stack_empty()             { return _regionStack.isEmpty(); }
@@ -612,11 +652,24 @@
   // we do nothing.
   void markAndGrayObjectIfNecessary(oop p);
 
-  // This iterates over the marking bitmap (either prev or next) and
-  // prints out all objects that are marked on the bitmap and indicates
-  // whether what they point to is also marked or not. It also iterates
-  // the objects over TAMS (either prev or next).
-  void print_reachable(bool use_prev_marking, const char* str);
+  // It iterates over the heap and for each object it comes across it
+  // will dump the contents of its reference fields, as well as
+  // liveness information for the object and its referents. The dump
+  // will be written to a file with the following name:
+  // G1PrintReachableBaseFile + "." + str. use_prev_marking decides
+  // whether the prev (use_prev_marking == true) or next
+  // (use_prev_marking == false) marking information will be used to
+  // determine the liveness of each object / referent. If all is true,
+  // all objects in the heap will be dumped, otherwise only the live
+  // ones. In the dump the following symbols / abbreviations are used:
+  //   M : an explicitly live object (its bitmap bit is set)
+  //   > : an implicitly live object (over tams)
+  //   O : an object outside the G1 heap (typically: in the perm gen)
+  //   NOT : a reference field whose referent is not live
+  //   AND MARKED : indicates that an object is both explicitly and
+  //   implicitly live (it should be one or the other, not both)
+  void print_reachable(const char* str,
+                       bool use_prev_marking, bool all) PRODUCT_RETURN;
 
   // Clear the next marking bitmap (will be called concurrently).
   void clearNextBitmap();
@@ -680,6 +733,19 @@
   // to determine whether any heap regions are located above the finger.
   void registerCSetRegion(HeapRegion* hr);
 
+  // Registers the maximum region-end associated with a set of
+  // regions with CM. Again this is used to determine whether any
+  // heap regions are located above the finger.
+  void register_collection_set_finger(HeapWord* max_finger) {
+    // max_finger is the highest heap region end of the regions currently
+    // contained in the collection set. If this value is larger than
+    // _min_finger then we need to gray objects.
+    // This routine is like registerCSetRegion but for an entire
+    // collection of regions.
+    if (max_finger > _min_finger)
+      _should_gray_objects = true;
+  }
+
   // Returns "true" if at least one mark has been completed.
   bool at_least_one_mark_complete() { return _at_least_one_mark_complete; }
 
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Wed May 19 10:22:39 2010 -0700
@@ -42,8 +42,8 @@
 
  private:
   ConcurrentMark*                  _cm;
-  bool                             _started;
-  bool                             _in_progress;
+  volatile bool                    _started;
+  volatile bool                    _in_progress;
 
   void sleepBeforeNextCycle();
 
@@ -67,15 +67,25 @@
   // Counting virtual time so far.
   double vtime_count_accum() { return _vtime_count_accum; }
 
-  ConcurrentMark* cm()                           { return _cm;     }
+  ConcurrentMark* cm()     { return _cm; }
+
+  void set_started()       { _started = true;  }
+  void clear_started()     { _started = false; }
+  bool started()           { return _started;  }
+
+  void set_in_progress()   { _in_progress = true;  }
+  void clear_in_progress() { _in_progress = false; }
+  bool in_progress()       { return _in_progress;  }
 
-  void            set_started()                  { _started = true;   }
-  void            clear_started()                { _started = false;  }
-  bool            started()                      { return _started;   }
-
-  void            set_in_progress()              { _in_progress = true;   }
-  void            clear_in_progress()            { _in_progress = false;  }
-  bool            in_progress()                  { return _in_progress;   }
+  // This flag returns true from the moment a marking cycle is
+  // initiated (during the initial-mark pause when started() is set)
+  // to the moment when the cycle completes (just after the next
+  // marking bitmap has been cleared and in_progress() is
+  // cleared). While this flag is true we will not start another cycle
+  // so that cycles do not overlap. We cannot use just in_progress()
+  // as the CM thread might take some time to wake up before noticing
+  // that started() is set and set in_progress().
+  bool during_cycle()      { return started() || in_progress(); }
 
   // Yield for GC
   void            yield();
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Wed May 19 10:22:39 2010 -0700
@@ -517,7 +517,7 @@
   assert(blk_start != NULL && blk_end > blk_start,
          "phantom block");
   assert(blk_end > threshold, "should be past threshold");
-  assert(blk_start <= threshold, "blk_start should be at or before threshold")
+  assert(blk_start <= threshold, "blk_start should be at or before threshold");
   assert(pointer_delta(threshold, blk_start) <= N_words,
          "offset should be <= BlockOffsetSharedArray::N");
   assert(Universe::heap()->is_in_reserved(blk_start),
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
 // turn it on so that the contents of the young list (scan-only /
 // to-be-collected) are printed at "strategic" points before / during
 // / after the collection --- this is useful for debugging
-#define SCAN_ONLY_VERBOSE 0
+#define YOUNG_LIST_VERBOSE 0
 // CURRENT STATUS
 // This file is under construction.  Search for "FIXME".
 
@@ -133,8 +133,7 @@
 
 YoungList::YoungList(G1CollectedHeap* g1h)
   : _g1h(g1h), _head(NULL),
-    _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
-    _length(0), _scan_only_length(0),
+    _length(0),
     _last_sampled_rs_lengths(0),
     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
 {
@@ -166,48 +165,6 @@
   ++_survivor_length;
 }
 
-HeapRegion* YoungList::pop_region() {
-  while (_head != NULL) {
-    assert( length() > 0, "list should not be empty" );
-    HeapRegion* ret = _head;
-    _head = ret->get_next_young_region();
-    ret->set_next_young_region(NULL);
-    --_length;
-    assert(ret->is_young(), "region should be very young");
-
-    // Replace 'Survivor' region type with 'Young'. So the region will
-    // be treated as a young region and will not be 'confused' with
-    // newly created survivor regions.
-    if (ret->is_survivor()) {
-      ret->set_young();
-    }
-
-    if (!ret->is_scan_only()) {
-      return ret;
-    }
-
-    // scan-only, we'll add it to the scan-only list
-    if (_scan_only_tail == NULL) {
-      guarantee( _scan_only_head == NULL, "invariant" );
-
-      _scan_only_head = ret;
-      _curr_scan_only = ret;
-    } else {
-      guarantee( _scan_only_head != NULL, "invariant" );
-      _scan_only_tail->set_next_young_region(ret);
-    }
-    guarantee( ret->get_next_young_region() == NULL, "invariant" );
-    _scan_only_tail = ret;
-
-    // no need to be tagged as scan-only any more
-    ret->set_young();
-
-    ++_scan_only_length;
-  }
-  assert( length() == 0, "list should be empty" );
-  return NULL;
-}
-
 void YoungList::empty_list(HeapRegion* list) {
   while (list != NULL) {
     HeapRegion* next = list->get_next_young_region();
@@ -225,12 +182,6 @@
   _head = NULL;
   _length = 0;
 
-  empty_list(_scan_only_head);
-  _scan_only_head = NULL;
-  _scan_only_tail = NULL;
-  _scan_only_length = 0;
-  _curr_scan_only = NULL;
-
   empty_list(_survivor_head);
   _survivor_head = NULL;
   _survivor_tail = NULL;
@@ -248,11 +199,11 @@
   HeapRegion* curr = _head;
   HeapRegion* last = NULL;
   while (curr != NULL) {
-    if (!curr->is_young() || curr->is_scan_only()) {
+    if (!curr->is_young()) {
       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
-                             "incorrectly tagged (%d, %d)",
+                             "incorrectly tagged (y: %d, surv: %d)",
                              curr->bottom(), curr->end(),
-                             curr->is_young(), curr->is_scan_only());
+                             curr->is_young(), curr->is_survivor());
       ret = false;
     }
     ++length;
@@ -267,47 +218,10 @@
                            length, _length);
   }
 
-  bool scan_only_ret = true;
-  length = 0;
-  curr = _scan_only_head;
-  last = NULL;
-  while (curr != NULL) {
-    if (!curr->is_young() || curr->is_scan_only()) {
-      gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" "
-                             "incorrectly tagged (%d, %d)",
-                             curr->bottom(), curr->end(),
-                             curr->is_young(), curr->is_scan_only());
-      scan_only_ret = false;
-    }
-    ++length;
-    last = curr;
-    curr = curr->get_next_young_region();
-  }
-  scan_only_ret = scan_only_ret && (length == _scan_only_length);
-
-  if ( (last != _scan_only_tail) ||
-       (_scan_only_head == NULL && _scan_only_tail != NULL) ||
-       (_scan_only_head != NULL && _scan_only_tail == NULL) ) {
-     gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly");
-     scan_only_ret = false;
-  }
-
-  if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) {
-    gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly");
-    scan_only_ret = false;
-   }
-
-  if (!scan_only_ret) {
-    gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!");
-    gclog_or_tty->print_cr("###   list has %d entries, _scan_only_length is %d",
-                  length, _scan_only_length);
-  }
-
-  return ret && scan_only_ret;
+  return ret;
 }
 
-bool YoungList::check_list_empty(bool ignore_scan_only_list,
-                                 bool check_sample) {
+bool YoungList::check_list_empty(bool check_sample) {
   bool ret = true;
 
   if (_length != 0) {
@@ -327,28 +241,7 @@
     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
   }
 
-  if (ignore_scan_only_list)
-    return ret;
-
-  bool scan_only_ret = true;
-  if (_scan_only_length != 0) {
-    gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d",
-                  _scan_only_length);
-    scan_only_ret = false;
-  }
-  if (_scan_only_head != NULL) {
-    gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head");
-     scan_only_ret = false;
-  }
-  if (_scan_only_tail != NULL) {
-    gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail");
-    scan_only_ret = false;
-  }
-  if (!scan_only_ret) {
-    gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty");
-  }
-
-  return ret && scan_only_ret;
+  return ret;
 }
 
 void
@@ -365,7 +258,18 @@
 void
 YoungList::rs_length_sampling_next() {
   assert( _curr != NULL, "invariant" );
-  _sampled_rs_lengths += _curr->rem_set()->occupied();
+  size_t rs_length = _curr->rem_set()->occupied();
+
+  _sampled_rs_lengths += rs_length;
+
+  // The current region may not yet have been added to the
+  // incremental collection set (it gets added when it is
+  // retired as the current allocation region).
+  if (_curr->in_collection_set()) {
+    // Update the collection set policy information for this region
+    _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
+  }
+
   _curr = _curr->get_next_young_region();
   if (_curr == NULL) {
     _last_sampled_rs_lengths = _sampled_rs_lengths;
@@ -375,54 +279,46 @@
 
 void
 YoungList::reset_auxilary_lists() {
-  // We could have just "moved" the scan-only list to the young list.
-  // However, the scan-only list is ordered according to the region
-  // age in descending order, so, by moving one entry at a time, we
-  // ensure that it is recreated in ascending order.
-
   guarantee( is_empty(), "young list should be empty" );
   assert(check_list_well_formed(), "young list should be well formed");
 
   // Add survivor regions to SurvRateGroup.
   _g1h->g1_policy()->note_start_adding_survivor_regions();
   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
+
   for (HeapRegion* curr = _survivor_head;
        curr != NULL;
        curr = curr->get_next_young_region()) {
     _g1h->g1_policy()->set_region_survivors(curr);
+
+    // The region is a non-empty survivor so let's add it to
+    // the incremental collection set for the next evacuation
+    // pause.
+    _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
   }
   _g1h->g1_policy()->note_stop_adding_survivor_regions();
 
+  _head   = _survivor_head;
+  _length = _survivor_length;
   if (_survivor_head != NULL) {
-    _head           = _survivor_head;
-    _length         = _survivor_length + _scan_only_length;
-    _survivor_tail->set_next_young_region(_scan_only_head);
-  } else {
-    _head           = _scan_only_head;
-    _length         = _scan_only_length;
-  }
-
-  for (HeapRegion* curr = _scan_only_head;
-       curr != NULL;
-       curr = curr->get_next_young_region()) {
-    curr->recalculate_age_in_surv_rate_group();
-  }
-  _scan_only_head   = NULL;
-  _scan_only_tail   = NULL;
-  _scan_only_length = 0;
-  _curr_scan_only   = NULL;
-
-  _survivor_head    = NULL;
-  _survivor_tail   = NULL;
-  _survivor_length  = 0;
+    assert(_survivor_tail != NULL, "cause it shouldn't be");
+    assert(_survivor_length > 0, "invariant");
+    _survivor_tail->set_next_young_region(NULL);
+  }
+
+  // Don't clear the survivor list handles until the start of
+  // the next evacuation pause - we need it in order to re-tag
+  // the survivor regions from this evacuation pause as 'young'
+  // at the start of the next.
+
   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
 
   assert(check_list_well_formed(), "young list should be well formed");
 }
 
 void YoungList::print() {
-  HeapRegion* lists[] = {_head,   _scan_only_head, _survivor_head};
-  const char* names[] = {"YOUNG", "SCAN-ONLY",     "SURVIVOR"};
+  HeapRegion* lists[] = {_head,   _survivor_head};
+  const char* names[] = {"YOUNG", "SURVIVOR"};
 
   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
@@ -431,7 +327,7 @@
       gclog_or_tty->print_cr("  empty");
     while (curr != NULL) {
       gclog_or_tty->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
-                             "age: %4d, y: %d, s-o: %d, surv: %d",
+                             "age: %4d, y: %d, surv: %d",
                              curr->bottom(), curr->end(),
                              curr->top(),
                              curr->prev_top_at_mark_start(),
@@ -439,7 +335,6 @@
                              curr->top_at_conc_mark_count(),
                              curr->age_in_surv_rate_group_cond(),
                              curr->is_young(),
-                             curr->is_scan_only(),
                              curr->is_survivor());
       curr = curr->get_next_young_region();
     }
@@ -707,6 +602,12 @@
     // region below.
     if (_cur_alloc_region != NULL) {
       // We're finished with the _cur_alloc_region.
+      // As we're builing (at least the young portion) of the collection
+      // set incrementally we'll add the current allocation region to
+      // the collection set here.
+      if (_cur_alloc_region->is_young()) {
+        g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
+      }
       _summary_bytes_used += _cur_alloc_region->used();
       _cur_alloc_region = NULL;
     }
@@ -820,6 +721,12 @@
       _free_regions++;
       free_region(_cur_alloc_region);
     } else {
+      // As we're builing (at least the young portion) of the collection
+      // set incrementally we'll add the current allocation region to
+      // the collection set here.
+      if (_cur_alloc_region->is_young()) {
+        g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region);
+      }
       _summary_bytes_used += _cur_alloc_region->used();
     }
     _cur_alloc_region = NULL;
@@ -902,6 +809,10 @@
 
 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
                                     size_t word_size) {
+  if (GC_locker::check_active_before_gc()) {
+    return; // GC is disabled (e.g. JNI GetXXXCritical operation)
+  }
+
   ResourceMark rm;
 
   if (PrintHeapAtGC) {
@@ -909,16 +820,16 @@
   }
 
   if (full && DisableExplicitGC) {
-    gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n");
     return;
   }
 
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
 
-  if (GC_locker::is_active()) {
-    return; // GC is disabled (e.g. JNI GetXXXCritical operation)
-  }
+  const bool do_clear_all_soft_refs = clear_all_soft_refs ||
+                           collector_policy()->should_clear_all_soft_refs();
+
+  ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 
   {
     IsGCActiveMark x;
@@ -926,7 +837,8 @@
     // Timing
     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
+    TraceTime t(full ? "Full GC (System.gc())" : "Full GC",
+                PrintGC, true, gclog_or_tty);
 
     TraceMemoryManagerStats tms(true /* fullGC */);
 
@@ -970,6 +882,15 @@
     g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
     tear_down_region_lists();
     set_used_regions_to_need_zero_fill();
+
+    // We may have added regions to the current incremental collection
+    // set between the last GC or pause and now. We need to clear the
+    // incremental collection set and then start rebuilding it afresh
+    // after this full GC.
+    abandon_collection_set(g1_policy()->inc_cset_head());
+    g1_policy()->clear_incremental_cset();
+    g1_policy()->stop_incremental_cset_building();
+
     if (g1_policy()->in_young_gc_mode()) {
       empty_young_list();
       g1_policy()->set_full_young_gcs(true);
@@ -985,12 +906,12 @@
     ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
 
     ref_processor()->enable_discovery();
-    ref_processor()->setup_policy(clear_all_soft_refs);
+    ref_processor()->setup_policy(do_clear_all_soft_refs);
 
     // Do collection work
     {
       HandleMark hm;  // Discard invalid handles created during gc
-      G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
+      G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
     }
     // Because freeing humongous regions may have added some unclean
     // regions, it is necessary to tear down again before rebuilding.
@@ -1053,6 +974,15 @@
       perm()->compute_new_size();
     }
 
+    // Start a new incremental collection set for the next pause
+    assert(g1_policy()->collection_set() == NULL, "must be");
+    g1_policy()->start_incremental_cset_building();
+
+    // Clear the _cset_fast_test bitmap in anticipation of adding
+    // regions to the incremental collection set for the next
+    // evacuation pause.
+    clear_cset_fast_test();
+
     double end = os::elapsedTime();
     g1_policy()->record_full_collection_end();
 
@@ -1071,7 +1001,9 @@
 
   if (g1_policy()->in_young_gc_mode()) {
     _young_list->reset_sampled_info();
-    assert( check_young_list_empty(false, false),
+    // At this point there should be no regions in the
+    // entire heap tagged as young.
+    assert( check_young_list_empty(true /* check_heap */),
             "young list should be empty at this point");
   }
 
@@ -1208,6 +1140,9 @@
     return result;
   }
 
+  assert(!collector_policy()->should_clear_all_soft_refs(),
+    "Flag should have been handled and cleared prior to this point");
+
   // What else?  We might try synchronous finalization later.  If the total
   // space available is large enough for the allocation, then a more
   // complete compaction phase than we've tried so far might be
@@ -1565,6 +1500,20 @@
 
   _g1h = this;
 
+   _in_cset_fast_test_length = max_regions();
+   _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
+
+   // We're biasing _in_cset_fast_test to avoid subtracting the
+   // beginning of the heap every time we want to index; basically
+   // it's the same with what we do with the card table.
+   _in_cset_fast_test = _in_cset_fast_test_base -
+                ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
+
+   // Clear the _cset_fast_test bitmap in anticipation of adding
+   // regions to the incremental collection set for the first
+   // evacuation pause.
+   clear_cset_fast_test();
+
   // Create the ConcurrentMark data structure and thread.
   // (Must do this late, so that "max_regions" is defined.)
   _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
@@ -2185,8 +2134,10 @@
     assert(o != NULL, "Huh?");
     if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) {
       o->oop_iterate(&isLive);
-      if (!_hr->obj_allocated_since_prev_marking(o))
-        _live_bytes += (o->size() * HeapWordSize);
+      if (!_hr->obj_allocated_since_prev_marking(o)) {
+        size_t obj_size = o->size();    // Make sure we don't overflow
+        _live_bytes += (obj_size * HeapWordSize);
+      }
     }
   }
   size_t live_bytes() { return _live_bytes; }
@@ -2388,8 +2339,8 @@
       print_on(gclog_or_tty, true /* extended */);
       gclog_or_tty->print_cr("");
       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
-        concurrent_mark()->print_reachable(use_prev_marking,
-                                           "failed-verification");
+        concurrent_mark()->print_reachable("at-verification-failure",
+                                           use_prev_marking, false /* all */);
       }
       gclog_or_tty->flush();
     }
@@ -2658,6 +2609,10 @@
 
 void
 G1CollectedHeap::do_collection_pause_at_safepoint() {
+  if (GC_locker::check_active_before_gc()) {
+    return; // GC is disabled (e.g. JNI GetXXXCritical operation)
+  }
+
   if (PrintHeapAtGC) {
     Universe::print_heap_before_gc();
   }
@@ -2665,6 +2620,11 @@
   {
     ResourceMark rm;
 
+    // This call will decide whether this pause is an initial-mark
+    // pause. If it is, during_initial_mark_pause() will return true
+    // for the duration of this pause.
+    g1_policy()->decide_on_conc_mark_initiation();
+
     char verbose_str[128];
     sprintf(verbose_str, "GC pause ");
     if (g1_policy()->in_young_gc_mode()) {
@@ -2673,7 +2633,7 @@
       else
         strcat(verbose_str, "(partial)");
     }
-    if (g1_policy()->should_initiate_conc_mark())
+    if (g1_policy()->during_initial_mark_pause())
       strcat(verbose_str, " (initial-mark)");
 
     // if PrintGCDetails is on, we'll print long statistics information
@@ -2697,10 +2657,6 @@
              "young list should be well formed");
     }
 
-    if (GC_locker::is_active()) {
-      return; // GC is disabled (e.g. JNI GetXXXCritical operation)
-    }
-
     bool abandoned = false;
     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
       IsGCActiveMark x;
@@ -2736,27 +2692,21 @@
       double start_time_sec = os::elapsedTime();
       size_t start_used_bytes = used();
 
+#if YOUNG_LIST_VERBOSE
+      gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
+      _young_list->print();
+      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+#endif // YOUNG_LIST_VERBOSE
+
       g1_policy()->record_collection_pause_start(start_time_sec,
                                                  start_used_bytes);
 
-      guarantee(_in_cset_fast_test == NULL, "invariant");
-      guarantee(_in_cset_fast_test_base == NULL, "invariant");
-      _in_cset_fast_test_length = max_regions();
-      _in_cset_fast_test_base =
-                             NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
-      memset(_in_cset_fast_test_base, false,
-                                     _in_cset_fast_test_length * sizeof(bool));
-      // We're biasing _in_cset_fast_test to avoid subtracting the
-      // beginning of the heap every time we want to index; basically
-      // it's the same with what we do with the card table.
-      _in_cset_fast_test = _in_cset_fast_test_base -
-              ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
-
-#if SCAN_ONLY_VERBOSE
+#if YOUNG_LIST_VERBOSE
+      gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
       _young_list->print();
-#endif // SCAN_ONLY_VERBOSE
-
-      if (g1_policy()->should_initiate_conc_mark()) {
+#endif // YOUNG_LIST_VERBOSE
+
+      if (g1_policy()->during_initial_mark_pause()) {
         concurrent_mark()->checkpointRootsInitialPre();
       }
       save_marks();
@@ -2781,12 +2731,15 @@
       if (mark_in_progress())
         concurrent_mark()->newCSet();
 
-      // Now choose the CS.
-      g1_policy()->choose_collection_set();
-
-      // We may abandon a pause if we find no region that will fit in the MMU
-      // pause.
-      bool abandoned = (g1_policy()->collection_set() == NULL);
+#if YOUNG_LIST_VERBOSE
+      gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
+      _young_list->print();
+      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+#endif // YOUNG_LIST_VERBOSE
+
+      // Now choose the CS. We may abandon a pause if we find no
+      // region that will fit in the MMU pause.
+      bool abandoned = g1_policy()->choose_collection_set();
 
       // Nothing to do if we were unable to choose a collection set.
       if (!abandoned) {
@@ -2804,40 +2757,64 @@
 
         // Actually do the work...
         evacuate_collection_set();
+
         free_collection_set(g1_policy()->collection_set());
         g1_policy()->clear_collection_set();
 
-        FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
-        // this is more for peace of mind; we're nulling them here and
-        // we're expecting them to be null at the beginning of the next GC
-        _in_cset_fast_test = NULL;
-        _in_cset_fast_test_base = NULL;
-
         cleanup_surviving_young_words();
 
+        // Start a new incremental collection set for the next pause.
+        g1_policy()->start_incremental_cset_building();
+
+        // Clear the _cset_fast_test bitmap in anticipation of adding
+        // regions to the incremental collection set for the next
+        // evacuation pause.
+        clear_cset_fast_test();
+
         if (g1_policy()->in_young_gc_mode()) {
           _young_list->reset_sampled_info();
-          assert(check_young_list_empty(true),
-                 "young list should be empty");
-
-#if SCAN_ONLY_VERBOSE
+
+          // Don't check the whole heap at this point as the
+          // GC alloc regions from this pause have been tagged
+          // as survivors and moved on to the survivor list.
+          // Survivor regions will fail the !is_young() check.
+          assert(check_young_list_empty(false /* check_heap */),
+              "young list should be empty");
+
+#if YOUNG_LIST_VERBOSE
+          gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
           _young_list->print();
-#endif // SCAN_ONLY_VERBOSE
+#endif // YOUNG_LIST_VERBOSE
 
           g1_policy()->record_survivor_regions(_young_list->survivor_length(),
                                           _young_list->first_survivor_region(),
                                           _young_list->last_survivor_region());
+
           _young_list->reset_auxilary_lists();
         }
       } else {
-        if (_in_cset_fast_test != NULL) {
-          assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't");
-          FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
-          //  this is more for peace of mind; we're nulling them here and
-          // we're expecting them to be null at the beginning of the next GC
-          _in_cset_fast_test = NULL;
-          _in_cset_fast_test_base = NULL;
-        }
+        // We have abandoned the current collection. This can only happen
+        // if we're not doing young or partially young collections, and
+        // we didn't find an old region that we're able to collect within
+        // the allowed time.
+
+        assert(g1_policy()->collection_set() == NULL, "should be");
+        assert(_young_list->length() == 0, "because it should be");
+
+        // This should be a no-op.
+        abandon_collection_set(g1_policy()->inc_cset_head());
+
+        g1_policy()->clear_incremental_cset();
+        g1_policy()->stop_incremental_cset_building();
+
+        // Start a new incremental collection set for the next pause.
+        g1_policy()->start_incremental_cset_building();
+
+        // Clear the _cset_fast_test bitmap in anticipation of adding
+        // regions to the incremental collection set for the next
+        // evacuation pause.
+        clear_cset_fast_test();
+
         // This looks confusing, because the DPT should really be empty
         // at this point -- since we have not done any collection work,
         // there should not be any derived pointers in the table to update;
@@ -2858,7 +2835,7 @@
       }
 
       if (g1_policy()->in_young_gc_mode() &&
-          g1_policy()->should_initiate_conc_mark()) {
+          g1_policy()->during_initial_mark_pause()) {
         concurrent_mark()->checkpointRootsInitialPost();
         set_marking_started();
         // CAUTION: after the doConcurrentMark() call below,
@@ -2871,9 +2848,11 @@
         doConcurrentMark();
       }
 
-#if SCAN_ONLY_VERBOSE
+#if YOUNG_LIST_VERBOSE
+      gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
       _young_list->print();
-#endif // SCAN_ONLY_VERBOSE
+      g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
+#endif // YOUNG_LIST_VERBOSE
 
       double end_time_sec = os::elapsedTime();
       double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
@@ -2931,12 +2910,34 @@
   }
 }
 
+size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
+{
+  size_t gclab_word_size;
+  switch (purpose) {
+    case GCAllocForSurvived:
+      gclab_word_size = YoungPLABSize;
+      break;
+    case GCAllocForTenured:
+      gclab_word_size = OldPLABSize;
+      break;
+    default:
+      assert(false, "unknown GCAllocPurpose");
+      gclab_word_size = OldPLABSize;
+      break;
+  }
+  return gclab_word_size;
+}
+
+
 void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
   assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
   // make sure we don't call set_gc_alloc_region() multiple times on
   // the same region
   assert(r == NULL || !r->is_gc_alloc_region(),
          "shouldn't already be a GC alloc region");
+  assert(r == NULL || !r->isHumongous(),
+         "humongous regions shouldn't be used as GC alloc regions");
+
   HeapWord* original_top = NULL;
   if (r != NULL)
     original_top = r->top();
@@ -3079,12 +3080,17 @@
 
       if (alloc_region->in_collection_set() ||
           alloc_region->top() == alloc_region->end() ||
-          alloc_region->top() == alloc_region->bottom()) {
-        // we will discard the current GC alloc region if it's in the
-        // collection set (it can happen!), if it's already full (no
-        // point in using it), or if it's empty (this means that it
-        // was emptied during a cleanup and it should be on the free
-        // list now).
+          alloc_region->top() == alloc_region->bottom() ||
+          alloc_region->isHumongous()) {
+        // we will discard the current GC alloc region if
+        // * it's in the collection set (it can happen!),
+        // * it's already full (no point in using it),
+        // * it's empty (this means that it was emptied during
+        // a cleanup and it should be on the free list now), or
+        // * it's humongous (this means that it was emptied
+        // during a cleanup and was added to the free list, but
+        // has been subseqently used to allocate a humongous
+        // object that may be less than the region size).
 
         alloc_region = NULL;
       }
@@ -3096,6 +3102,11 @@
     } else {
       // the region was retained from the last collection
       ++_gc_alloc_region_counts[ap];
+      if (G1PrintHeapRegions) {
+        gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
+                               "top "PTR_FORMAT,
+                               alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top());
+      }
     }
 
     if (alloc_region != NULL) {
@@ -3652,6 +3663,8 @@
     _g1_rem(g1h->g1_rem_set()),
     _hash_seed(17), _queue_num(queue_num),
     _term_attempts(0),
+    _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
+    _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
     _age_table(false),
 #if G1_DETAILED_STATS
     _pushes(0), _pops(0), _steals(0),
@@ -3678,6 +3691,9 @@
 
   _overflowed_refs = new OverflowQueue(10);
 
+  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
+  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
+
   _start = os::elapsedTime();
 }
 
@@ -3975,16 +3991,13 @@
 
     OopsInHeapRegionClosure        *scan_root_cl;
     OopsInHeapRegionClosure        *scan_perm_cl;
-    OopsInHeapRegionClosure        *scan_so_cl;
-
-    if (_g1h->g1_policy()->should_initiate_conc_mark()) {
+
+    if (_g1h->g1_policy()->during_initial_mark_pause()) {
       scan_root_cl = &scan_mark_root_cl;
       scan_perm_cl = &scan_mark_perm_cl;
-      scan_so_cl   = &scan_mark_heap_rs_cl;
     } else {
       scan_root_cl = &only_scan_root_cl;
       scan_perm_cl = &only_scan_perm_cl;
-      scan_so_cl   = &only_scan_heap_rs_cl;
     }
 
     pss.start_strong_roots();
@@ -3992,7 +4005,6 @@
                                   SharedHeap::SO_AllClasses,
                                   scan_root_cl,
                                   &push_heap_rs_cl,
-                                  scan_so_cl,
                                   scan_perm_cl,
                                   i);
     pss.end_strong_roots();
@@ -4054,7 +4066,6 @@
                         SharedHeap::ScanningOption so,
                         OopClosure* scan_non_heap_roots,
                         OopsInHeapRegionClosure* scan_rs,
-                        OopsInHeapRegionClosure* scan_so,
                         OopsInGenClosure* scan_perm,
                         int worker_i) {
   // First scan the strong roots, including the perm gen.
@@ -4074,6 +4085,7 @@
                        &buf_scan_non_heap_roots,
                        &eager_scan_code_roots,
                        &buf_scan_perm);
+
   // Finish up any enqueued closure apps.
   buf_scan_non_heap_roots.done();
   buf_scan_perm.done();
@@ -4096,9 +4108,6 @@
 
   // XXX What should this be doing in the parallel case?
   g1_policy()->record_collection_pause_end_CH_strong_roots();
-  if (scan_so != NULL) {
-    scan_scan_only_set(scan_so, worker_i);
-  }
   // Now scan the complement of the collection set.
   if (scan_rs != NULL) {
     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
@@ -4112,54 +4121,6 @@
 }
 
 void
-G1CollectedHeap::scan_scan_only_region(HeapRegion* r,
-                                       OopsInHeapRegionClosure* oc,
-                                       int worker_i) {
-  HeapWord* startAddr = r->bottom();
-  HeapWord* endAddr = r->used_region().end();
-
-  oc->set_region(r);
-
-  HeapWord* p = r->bottom();
-  HeapWord* t = r->top();
-  guarantee( p == r->next_top_at_mark_start(), "invariant" );
-  while (p < t) {
-    oop obj = oop(p);
-    p += obj->oop_iterate(oc);
-  }
-}
-
-void
-G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
-                                    int worker_i) {
-  double start = os::elapsedTime();
-
-  BufferingOopsInHeapRegionClosure boc(oc);
-
-  FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc);
-  FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark());
-
-  OopsInHeapRegionClosure *foc;
-  if (g1_policy()->should_initiate_conc_mark())
-    foc = &scan_and_mark;
-  else
-    foc = &scan_only;
-
-  HeapRegion* hr;
-  int n = 0;
-  while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) {
-    scan_scan_only_region(hr, foc, worker_i);
-    ++n;
-  }
-  boc.done();
-
-  double closure_app_s = boc.closure_app_seconds();
-  g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0);
-  double ms = (os::elapsedTime() - start - closure_app_s)*1000.0;
-  g1_policy()->record_scan_only_time(worker_i, ms, n);
-}
-
-void
 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
                                        OopClosure* non_root_closure) {
   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
@@ -4357,17 +4318,14 @@
 class G1ParCleanupCTTask : public AbstractGangTask {
   CardTableModRefBS* _ct_bs;
   G1CollectedHeap* _g1h;
-  HeapRegion* volatile _so_head;
   HeapRegion* volatile _su_head;
 public:
   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
                      G1CollectedHeap* g1h,
-                     HeapRegion* scan_only_list,
                      HeapRegion* survivor_list) :
     AbstractGangTask("G1 Par Cleanup CT Task"),
     _ct_bs(ct_bs),
     _g1h(g1h),
-    _so_head(scan_only_list),
     _su_head(survivor_list)
   { }
 
@@ -4376,14 +4334,13 @@
     while (r = _g1h->pop_dirty_cards_region()) {
       clear_cards(r);
     }
-    // Redirty the cards of the scan-only and survivor regions.
-    dirty_list(&this->_so_head);
+    // Redirty the cards of the survivor regions.
     dirty_list(&this->_su_head);
   }
 
   void clear_cards(HeapRegion* r) {
-    // Cards for Survivor and Scan-Only regions will be dirtied later.
-    if (!r->is_scan_only() && !r->is_survivor()) {
+    // Cards for Survivor regions will be dirtied later.
+    if (!r->is_survivor()) {
       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
     }
   }
@@ -4416,7 +4373,7 @@
   virtual bool doHeapRegion(HeapRegion* r)
   {
     MemRegion mr(r->bottom(), r->end());
-    if (r->is_scan_only() || r->is_survivor()) {
+    if (r->is_survivor()) {
       _ct_bs->verify_dirty_region(mr);
     } else {
       _ct_bs->verify_clean_region(mr);
@@ -4432,8 +4389,8 @@
 
   // Iterate over the dirty cards region list.
   G1ParCleanupCTTask cleanup_task(ct_bs, this,
-                                  _young_list->first_scan_only_region(),
                                   _young_list->first_survivor_region());
+
   if (ParallelGCThreads > 0) {
     set_par_threads(workers()->total_workers());
     workers()->run_task(&cleanup_task);
@@ -4449,12 +4406,12 @@
       }
       r->set_next_dirty_cards_region(NULL);
     }
-    // now, redirty the cards of the scan-only and survivor regions
+    // now, redirty the cards of the survivor regions
     // (it seemed faster to do it this way, instead of iterating over
     // all regions and then clearing / dirtying as appropriate)
-    dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
     dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
   }
+
   double elapsed = os::elapsedTime() - start;
   g1_policy()->record_clear_ct_time( elapsed * 1000.0);
 #ifndef PRODUCT
@@ -4475,6 +4432,11 @@
   double young_time_ms     = 0.0;
   double non_young_time_ms = 0.0;
 
+  // Since the collection set is a superset of the the young list,
+  // all we need to do to clear the young list is clear its
+  // head and length, and unlink any young regions in the code below
+  _young_list->clear();
+
   G1CollectorPolicy* policy = g1_policy();
 
   double start_sec = os::elapsedTime();
@@ -4518,6 +4480,12 @@
       guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
       size_t words_survived = _surviving_young_words[index];
       cur->record_surv_words_in_group(words_survived);
+
+      // At this point the we have 'popped' cur from the collection set
+      // (linked via next_in_collection_set()) but it is still in the
+      // young list (linked via next_young_region()). Clear the
+      // _next_young_region field.
+      cur->set_next_young_region(NULL);
     } else {
       int index = cur->young_index_in_cset();
       guarantee( index == -1, "invariant" );
@@ -4533,7 +4501,6 @@
              "Should not have empty regions in a CS.");
       free_region(cur);
     } else {
-      guarantee( !cur->is_scan_only(), "should not be scan only" );
       cur->uninstall_surv_rate_group();
       if (cur->is_young())
         cur->set_young_index_in_cset(-1);
@@ -4557,6 +4524,27 @@
   policy->record_non_young_free_cset_time_ms(non_young_time_ms);
 }
 
+// This routine is similar to the above but does not record
+// any policy statistics or update free lists; we are abandoning
+// the current incremental collection set in preparation of a
+// full collection. After the full GC we will start to build up
+// the incremental collection set again.
+// This is only called when we're doing a full collection
+// and is immediately followed by the tearing down of the young list.
+
+void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
+  HeapRegion* cur = cs_head;
+
+  while (cur != NULL) {
+    HeapRegion* next = cur->next_in_collection_set();
+    assert(cur->in_collection_set(), "bad CS");
+    cur->set_next_in_collection_set(NULL);
+    cur->set_in_collection_set(false);
+    cur->set_young_index_in_cset(-1);
+    cur = next;
+  }
+}
+
 HeapRegion*
 G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) {
   assert(ZF_mon->owned_by_self(), "Precondition");
@@ -4923,12 +4911,10 @@
   bool success() { return _success; }
 };
 
-bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list,
-                                             bool check_sample) {
-  bool ret = true;
-
-  ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample);
-  if (!ignore_scan_only_list) {
+bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
+  bool ret = _young_list->check_list_empty(check_sample);
+
+  if (check_heap) {
     NoYoungRegionsClosure closure;
     heap_region_iterate(&closure);
     ret = ret && closure.success();
@@ -4979,7 +4965,7 @@
   MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
   while (pop_unclean_region_list_locked() != NULL) ;
   assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0,
-         "Postconditions of loop.")
+         "Postconditions of loop.");
   while (pop_free_region_list_locked() != NULL) ;
   assert(_free_region_list == NULL, "Postcondition of loop.");
   if (_free_region_list_size != 0) {
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,33 +81,29 @@
 
   HeapRegion* _head;
 
-  HeapRegion* _scan_only_head;
-  HeapRegion* _scan_only_tail;
+  HeapRegion* _survivor_head;
+  HeapRegion* _survivor_tail;
+
+  HeapRegion* _curr;
+
   size_t      _length;
-  size_t      _scan_only_length;
+  size_t      _survivor_length;
 
   size_t      _last_sampled_rs_lengths;
   size_t      _sampled_rs_lengths;
-  HeapRegion* _curr;
-  HeapRegion* _curr_scan_only;
 
-  HeapRegion* _survivor_head;
-  HeapRegion* _survivor_tail;
-  size_t      _survivor_length;
-
-  void          empty_list(HeapRegion* list);
+  void         empty_list(HeapRegion* list);
 
 public:
   YoungList(G1CollectedHeap* g1h);
 
-  void          push_region(HeapRegion* hr);
-  void          add_survivor_region(HeapRegion* hr);
-  HeapRegion*   pop_region();
-  void          empty_list();
-  bool          is_empty() { return _length == 0; }
-  size_t        length() { return _length; }
-  size_t        scan_only_length() { return _scan_only_length; }
-  size_t        survivor_length() { return _survivor_length; }
+  void         push_region(HeapRegion* hr);
+  void         add_survivor_region(HeapRegion* hr);
+
+  void         empty_list();
+  bool         is_empty() { return _length == 0; }
+  size_t       length() { return _length; }
+  size_t       survivor_length() { return _survivor_length; }
 
   void rs_length_sampling_init();
   bool rs_length_sampling_more();
@@ -120,22 +116,21 @@
 
   // for development purposes
   void reset_auxilary_lists();
+  void clear() { _head = NULL; _length = 0; }
+
+  void clear_survivors() {
+    _survivor_head    = NULL;
+    _survivor_tail    = NULL;
+    _survivor_length  = 0;
+  }
+
   HeapRegion* first_region() { return _head; }
-  HeapRegion* first_scan_only_region() { return _scan_only_head; }
   HeapRegion* first_survivor_region() { return _survivor_head; }
   HeapRegion* last_survivor_region() { return _survivor_tail; }
-  HeapRegion* par_get_next_scan_only_region() {
-    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
-    HeapRegion* ret = _curr_scan_only;
-    if (ret != NULL)
-      _curr_scan_only = ret->get_next_young_region();
-    return ret;
-  }
 
   // debugging
   bool          check_list_well_formed();
-  bool          check_list_empty(bool ignore_scan_only_list,
-                                 bool check_sample = true);
+  bool          check_list_empty(bool check_sample = true);
   void          print();
 };
 
@@ -232,6 +227,9 @@
   // current collection.
   HeapRegion* _gc_alloc_region_list;
 
+  // Determines PLAB size for a particular allocation purpose.
+  static size_t desired_plab_sz(GCAllocPurpose purpose);
+
   // When called by par thread, require par_alloc_during_gc_lock() to be held.
   void push_gc_alloc_region(HeapRegion* hr);
 
@@ -402,8 +400,7 @@
     assert(_in_cset_fast_test_base != NULL, "sanity");
     assert(r->in_collection_set(), "invariant");
     int index = r->hrs_index();
-    assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length,
-           "invariant");
+    assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant");
     assert(!_in_cset_fast_test_base[index], "invariant");
     _in_cset_fast_test_base[index] = true;
   }
@@ -428,6 +425,12 @@
     }
   }
 
+  void clear_cset_fast_test() {
+    assert(_in_cset_fast_test_base != NULL, "sanity");
+    memset(_in_cset_fast_test_base, false,
+        _in_cset_fast_test_length * sizeof(bool));
+  }
+
 protected:
 
   // Shrink the garbage-first heap by at most the given size (in bytes!).
@@ -473,6 +476,10 @@
   // regions.
   void free_collection_set(HeapRegion* cs_head);
 
+  // Abandon the current collection set without recording policy
+  // statistics or updating free lists.
+  void abandon_collection_set(HeapRegion* cs_head);
+
   // Applies "scan_non_heap_roots" to roots outside the heap,
   // "scan_rs" to roots inside the heap (having done "set_region" to
   // indicate the region in which the root resides), and does "scan_perm"
@@ -485,16 +492,9 @@
                                SharedHeap::ScanningOption so,
                                OopClosure* scan_non_heap_roots,
                                OopsInHeapRegionClosure* scan_rs,
-                               OopsInHeapRegionClosure* scan_so,
                                OopsInGenClosure* scan_perm,
                                int worker_i);
 
-  void scan_scan_only_set(OopsInHeapRegionClosure* oc,
-                          int worker_i);
-  void scan_scan_only_region(HeapRegion* hr,
-                             OopsInHeapRegionClosure* oc,
-                             int worker_i);
-
   // Apply "blk" to all the weak roots of the system.  These include
   // JNI weak roots, the code cache, system dictionary, symbol table,
   // string table, and referents of reachable weak refs.
@@ -1133,36 +1133,14 @@
   void set_region_short_lived_locked(HeapRegion* hr);
   // add appropriate methods for any other surv rate groups
 
-  void young_list_rs_length_sampling_init() {
-    _young_list->rs_length_sampling_init();
-  }
-  bool young_list_rs_length_sampling_more() {
-    return _young_list->rs_length_sampling_more();
-  }
-  void young_list_rs_length_sampling_next() {
-    _young_list->rs_length_sampling_next();
-  }
-  size_t young_list_sampled_rs_lengths() {
-    return _young_list->sampled_rs_lengths();
-  }
-
-  size_t young_list_length()   { return _young_list->length(); }
-  size_t young_list_scan_only_length() {
-                                      return _young_list->scan_only_length(); }
-
-  HeapRegion* pop_region_from_young_list() {
-    return _young_list->pop_region();
-  }
-
-  HeapRegion* young_list_first_region() {
-    return _young_list->first_region();
-  }
+  YoungList* young_list() { return _young_list; }
 
   // debugging
   bool check_young_list_well_formed() {
     return _young_list->check_list_well_formed();
   }
-  bool check_young_list_empty(bool ignore_scan_only_list,
+
+  bool check_young_list_empty(bool check_heap,
                               bool check_sample = true);
 
   // *** Stuff related to concurrent marking.  It's not clear to me that so
@@ -1367,12 +1345,18 @@
     return BitsPerWord << shifter();
   }
 
-  static size_t gclab_word_size() {
-    return G1ParallelGCAllocBufferSize / HeapWordSize;
+  size_t gclab_word_size() const {
+    return _gclab_word_size;
   }
 
-  static size_t bitmap_size_in_bits() {
-    size_t bits_in_bitmap = gclab_word_size() >> shifter();
+  // Calculates actual GCLab size in words
+  size_t gclab_real_word_size() const {
+    return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word))
+           / BitsPerWord;
+  }
+
+  static size_t bitmap_size_in_bits(size_t gclab_word_size) {
+    size_t bits_in_bitmap = gclab_word_size >> shifter();
     // We are going to ensure that the beginning of a word in this
     // bitmap also corresponds to the beginning of a word in the
     // global marking bitmap. To handle the case where a GCLab
@@ -1382,13 +1366,13 @@
     return bits_in_bitmap + BitsPerWord - 1;
   }
 public:
-  GCLabBitMap(HeapWord* heap_start)
-    : BitMap(bitmap_size_in_bits()),
+  GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size)
+    : BitMap(bitmap_size_in_bits(gclab_word_size)),
       _cm(G1CollectedHeap::heap()->concurrent_mark()),
       _shifter(shifter()),
       _bitmap_word_covers_words(bitmap_word_covers_words()),
       _heap_start(heap_start),
-      _gclab_word_size(gclab_word_size()),
+      _gclab_word_size(gclab_word_size),
       _real_start_word(NULL),
       _real_end_word(NULL),
       _start_word(NULL)
@@ -1483,7 +1467,7 @@
       mark_bitmap->mostly_disjoint_range_union(this,
                                 0, // always start from the start of the bitmap
                                 _start_word,
-                                size_in_words());
+                                gclab_real_word_size());
       _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
 
 #ifndef PRODUCT
@@ -1495,9 +1479,10 @@
     }
   }
 
-  static size_t bitmap_size_in_words() {
-    return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
+  size_t bitmap_size_in_words() const {
+    return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord;
   }
+
 };
 
 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
@@ -1507,10 +1492,10 @@
   GCLabBitMap _bitmap;
 
 public:
-  G1ParGCAllocBuffer() :
-    ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize),
+  G1ParGCAllocBuffer(size_t gclab_word_size) :
+    ParGCAllocBuffer(gclab_word_size),
     _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
-    _bitmap(G1CollectedHeap::heap()->reserved_region().start()),
+    _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size),
     _retired(false)
   { }
 
@@ -1549,8 +1534,10 @@
   typedef GrowableArray<StarTask> OverflowQueue;
   OverflowQueue* _overflowed_refs;
 
-  G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
-  ageTable           _age_table;
+  G1ParGCAllocBuffer  _surviving_alloc_buffer;
+  G1ParGCAllocBuffer  _tenured_alloc_buffer;
+  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+  ageTable            _age_table;
 
   size_t           _alloc_buffer_waste;
   size_t           _undo_waste;
@@ -1619,7 +1606,7 @@
   ageTable*         age_table()       { return &_age_table;       }
 
   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
-    return &_alloc_buffers[purpose];
+    return _alloc_buffers[purpose];
   }
 
   size_t alloc_buffer_waste()                    { return _alloc_buffer_waste; }
@@ -1684,15 +1671,15 @@
   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
 
     HeapWord* obj = NULL;
-    if (word_sz * 100 <
-        (size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
-                                                  ParallelGCBufferWastePct) {
+    size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
+    if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
       G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
+      assert(gclab_word_size == alloc_buf->word_sz(),
+             "dynamic resizing is not supported");
       add_to_alloc_buffer_waste(alloc_buf->words_remaining());
       alloc_buf->retire(false, false);
 
-      HeapWord* buf =
-        _g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
+      HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
       if (buf == NULL) return NULL; // Let caller handle allocation failure.
       // Otherwise.
       alloc_buf->set_buf(buf);
@@ -1786,9 +1773,9 @@
 
   void retire_alloc_buffers() {
     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-      size_t waste = _alloc_buffers[ap].words_remaining();
+      size_t waste = _alloc_buffers[ap]->words_remaining();
       add_to_alloc_buffer_waste(waste);
-      _alloc_buffers[ap].retire(true, false);
+      _alloc_buffers[ap]->retire(true, false);
     }
   }
 
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,10 +42,6 @@
   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
 };
 
-static double cost_per_scan_only_region_ms_defaults[] = {
-  1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
-};
-
 // all the same
 static double fully_young_cards_per_entry_ratio_defaults[] = {
   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
@@ -125,7 +121,6 @@
   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
-  _cost_per_scan_only_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   _partially_young_cards_per_entry_ratio_seq(
                                          new TruncatedSeq(TruncatedSeqLength)),
@@ -133,7 +128,6 @@
   _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
-  _cost_per_scan_only_region_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   _non_young_other_cost_per_region_ms_seq(
@@ -178,14 +172,30 @@
   // so the hack is to do the cast  QQQ FIXME
   _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
   _n_marks_since_last_pause(0),
-  _conc_mark_initiated(false),
-  _should_initiate_conc_mark(false),
+  _initiate_conc_mark_if_possible(false),
+  _during_initial_mark_pause(false),
   _should_revert_to_full_young_gcs(false),
   _last_full_young_gc(false),
 
   _prev_collection_pause_used_at_end_bytes(0),
 
   _collection_set(NULL),
+  _collection_set_size(0),
+  _collection_set_bytes_used_before(0),
+
+  // Incremental CSet attributes
+  _inc_cset_build_state(Inactive),
+  _inc_cset_head(NULL),
+  _inc_cset_tail(NULL),
+  _inc_cset_size(0),
+  _inc_cset_young_index(0),
+  _inc_cset_bytes_used_before(0),
+  _inc_cset_max_finger(NULL),
+  _inc_cset_recorded_young_bytes(0),
+  _inc_cset_recorded_rs_lengths(0),
+  _inc_cset_predicted_elapsed_time_ms(0.0),
+  _inc_cset_predicted_bytes_to_copy(0),
+
 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 #endif // _MSC_VER
@@ -198,7 +208,9 @@
   _recorded_survivor_regions(0),
   _recorded_survivor_head(NULL),
   _recorded_survivor_tail(NULL),
-  _survivors_age_table(true)
+  _survivors_age_table(true),
+
+  _gc_overhead_perc(0.0)
 
 {
   // Set up the region size and associated fields. Given that the
@@ -207,13 +219,20 @@
   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
   HeapRegionRemSet::setup_remset_size();
 
+  // Verify PLAB sizes
+  const uint region_size = HeapRegion::GrainWords;
+  if (YoungPLABSize > region_size || OldPLABSize > region_size) {
+    char buffer[128];
+    jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
+                 OldPLABSize > region_size ? "Old" : "Young", region_size);
+    vm_exit_during_initialization(buffer);
+  }
+
   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 
   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
   _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
-  _par_last_scan_only_times_ms = new double[_parallel_gc_threads];
-  _par_last_scan_only_regions_scanned = new double[_parallel_gc_threads];
 
   _par_last_update_rs_start_times_ms = new double[_parallel_gc_threads];
   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
@@ -243,8 +262,6 @@
   _pending_card_diff_seq->add(0.0);
   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
-  _cost_per_scan_only_region_ms_seq->add(
-                                 cost_per_scan_only_region_ms_defaults[index]);
   _fully_young_cards_per_entry_ratio_seq->add(
                             fully_young_cards_per_entry_ratio_defaults[index]);
   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
@@ -272,9 +289,14 @@
 
   // if G1FixedSurvivorSpaceSize is 0 which means the size is not
   // fixed, then _max_survivor_regions will be calculated at
-  // calculate_young_list_target_config during initialization
+  // calculate_young_list_target_length during initialization
   _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
 
+  assert(GCTimeRatio > 0,
+         "we should have set it to a default value set_g1_gc_flags() "
+         "if a user set it to 0");
+  _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
+
   initialize_all();
 }
 
@@ -341,15 +363,18 @@
       set_adaptive_young_list_length(false);
       _young_list_fixed_length = initial_region_num;
     }
-     _free_regions_at_end_of_collection = _g1->free_regions();
-     _scan_only_regions_at_end_of_collection = 0;
-     calculate_young_list_min_length();
-     guarantee( _young_list_min_length == 0, "invariant, not enough info" );
-     calculate_young_list_target_config();
-   } else {
+    _free_regions_at_end_of_collection = _g1->free_regions();
+    calculate_young_list_min_length();
+    guarantee( _young_list_min_length == 0, "invariant, not enough info" );
+    calculate_young_list_target_length();
+  } else {
      _young_list_fixed_length = 0;
     _in_young_gc_mode = false;
   }
+
+  // We may immediately start allocating regions and placing them on the
+  // collection set list. Initialize the per-collection set info
+  start_incremental_cset_building();
 }
 
 // Create the jstat counters for the policy.
@@ -369,112 +394,29 @@
     double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
     double alloc_rate_ms = predict_alloc_rate_ms();
     int min_regions = (int) ceil(alloc_rate_ms * when_ms);
-    int current_region_num = (int) _g1->young_list_length();
+    int current_region_num = (int) _g1->young_list()->length();
     _young_list_min_length = min_regions + current_region_num;
   }
 }
 
-void G1CollectorPolicy::calculate_young_list_target_config() {
+void G1CollectorPolicy::calculate_young_list_target_length() {
   if (adaptive_young_list_length()) {
     size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
-    calculate_young_list_target_config(rs_lengths);
+    calculate_young_list_target_length(rs_lengths);
   } else {
     if (full_young_gcs())
       _young_list_target_length = _young_list_fixed_length;
     else
       _young_list_target_length = _young_list_fixed_length / 2;
+
     _young_list_target_length = MAX2(_young_list_target_length, (size_t)1);
-    size_t so_length = calculate_optimal_so_length(_young_list_target_length);
-    guarantee( so_length < _young_list_target_length, "invariant" );
-    _young_list_so_prefix_length = so_length;
   }
   calculate_survivors_policy();
 }
 
-// This method calculate the optimal scan-only set for a fixed young
-// gen size. I couldn't work out how to reuse the more elaborate one,
-// i.e. calculate_young_list_target_config(rs_length), as the loops are
-// fundamentally different (the other one finds a config for different
-// S-O lengths, whereas here we need to do the opposite).
-size_t G1CollectorPolicy::calculate_optimal_so_length(
-                                                    size_t young_list_length) {
-  if (!G1UseScanOnlyPrefix)
-    return 0;
-
-  if (_all_pause_times_ms->num() < 3) {
-    // we won't use a scan-only set at the beginning to allow the rest
-    // of the predictors to warm up
-    return 0;
-  }
-
-  if (_cost_per_scan_only_region_ms_seq->num() < 3) {
-    // then, we'll only set the S-O set to 1 for a little bit of time,
-    // to get enough information on the scanning cost
-    return 1;
-  }
-
-  size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
-  size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
-  size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
-  size_t scanned_cards;
-  if (full_young_gcs())
-    scanned_cards = predict_young_card_num(adj_rs_lengths);
-  else
-    scanned_cards = predict_non_young_card_num(adj_rs_lengths);
-  double base_time_ms = predict_base_elapsed_time_ms(pending_cards,
-                                                     scanned_cards);
-
-  size_t so_length = 0;
-  double max_gc_eff = 0.0;
-  for (size_t i = 0; i < young_list_length; ++i) {
-    double gc_eff = 0.0;
-    double pause_time_ms = 0.0;
-    predict_gc_eff(young_list_length, i, base_time_ms,
-                   &gc_eff, &pause_time_ms);
-    if (gc_eff > max_gc_eff) {
-      max_gc_eff = gc_eff;
-      so_length = i;
-    }
-  }
-
-  // set it to 95% of the optimal to make sure we sample the "area"
-  // around the optimal length to get up-to-date survival rate data
-  return so_length * 950 / 1000;
-}
-
-// This is a really cool piece of code! It finds the best
-// target configuration (young length / scan-only prefix length) so
-// that GC efficiency is maximized and that we also meet a pause
-// time. It's a triple nested loop. These loops are explained below
-// from the inside-out :-)
-//
-// (a) The innermost loop will try to find the optimal young length
-// for a fixed S-O length. It uses a binary search to speed up the
-// process. We assume that, for a fixed S-O length, as we add more
-// young regions to the CSet, the GC efficiency will only go up (I'll
-// skip the proof). So, using a binary search to optimize this process
-// makes perfect sense.
-//
-// (b) The middle loop will fix the S-O length before calling the
-// innermost one. It will vary it between two parameters, increasing
-// it by a given increment.
-//
-// (c) The outermost loop will call the middle loop three times.
-//   (1) The first time it will explore all possible S-O length values
-//   from 0 to as large as it can get, using a coarse increment (to
-//   quickly "home in" to where the optimal seems to be).
-//   (2) The second time it will explore the values around the optimal
-//   that was found by the first iteration using a fine increment.
-//   (3) Once the optimal config has been determined by the second
-//   iteration, we'll redo the calculation, but setting the S-O length
-//   to 95% of the optimal to make sure we sample the "area"
-//   around the optimal length to get up-to-date survival rate data
-//
-// Termination conditions for the iterations are several: the pause
-// time is over the limit, we do not have enough to-space, etc.
-
-void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
+void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
   guarantee( adaptive_young_list_length(), "pre-condition" );
+  guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" );
 
   double start_time_sec = os::elapsedTime();
   size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
@@ -488,285 +430,80 @@
     double survivor_regions_evac_time =
         predict_survivor_regions_evac_time();
 
-    size_t min_so_length = 0;
-    size_t max_so_length = 0;
-
-    if (G1UseScanOnlyPrefix) {
-      if (_all_pause_times_ms->num() < 3) {
-        // we won't use a scan-only set at the beginning to allow the rest
-        // of the predictors to warm up
-        min_so_length = 0;
-        max_so_length = 0;
-      } else if (_cost_per_scan_only_region_ms_seq->num() < 3) {
-        // then, we'll only set the S-O set to 1 for a little bit of time,
-        // to get enough information on the scanning cost
-        min_so_length = 1;
-        max_so_length = 1;
-      } else if (_in_marking_window || _last_full_young_gc) {
-        // no S-O prefix during a marking phase either, as at the end
-        // of the marking phase we'll have to use a very small young
-        // length target to fill up the rest of the CSet with
-        // non-young regions and, if we have lots of scan-only regions
-        // left-over, we will not be able to add any more non-young
-        // regions.
-        min_so_length = 0;
-        max_so_length = 0;
-      } else {
-        // this is the common case; we'll never reach the maximum, we
-        // one of the end conditions will fire well before that
-        // (hopefully!)
-        min_so_length = 0;
-        max_so_length = _free_regions_at_end_of_collection - 1;
-      }
-    } else {
-      // no S-O prefix, as the switch is not set, but we still need to
-      // do one iteration to calculate the best young target that
-      // meets the pause time; this way we reuse the same code instead
-      // of replicating it
-      min_so_length = 0;
-      max_so_length = 0;
-    }
-
     double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
     size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
     size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
-    size_t scanned_cards;
-    if (full_young_gcs())
-      scanned_cards = predict_young_card_num(adj_rs_lengths);
-    else
-      scanned_cards = predict_non_young_card_num(adj_rs_lengths);
-    // calculate this once, so that we don't have to recalculate it in
-    // the innermost loop
+    size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
     double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
                           + survivor_regions_evac_time;
+
     // the result
     size_t final_young_length = 0;
-    size_t final_so_length = 0;
-    double final_gc_eff = 0.0;
-    // we'll also keep track of how many times we go into the inner loop
-    // this is for profiling reasons
-    size_t calculations = 0;
-
-    // this determines which of the three iterations the outer loop is in
-    typedef enum {
-      pass_type_coarse,
-      pass_type_fine,
-      pass_type_final
-    } pass_type_t;
-
-    // range of the outer loop's iteration
-    size_t from_so_length   = min_so_length;
-    size_t to_so_length     = max_so_length;
-    guarantee( from_so_length <= to_so_length, "invariant" );
-
-    // this will keep the S-O length that's found by the second
-    // iteration of the outer loop; we'll keep it just in case the third
-    // iteration fails to find something
-    size_t fine_so_length   = 0;
-
-    // the increment step for the coarse (first) iteration
-    size_t so_coarse_increments = 5;
-
-    // the common case, we'll start with the coarse iteration
-    pass_type_t pass = pass_type_coarse;
-    size_t so_length_incr = so_coarse_increments;
-
-    if (from_so_length == to_so_length) {
-      // not point in doing the coarse iteration, we'll go directly into
-      // the fine one (we essentially trying to find the optimal young
-      // length for a fixed S-O length).
-      so_length_incr = 1;
-      pass = pass_type_final;
-    } else if (to_so_length - from_so_length < 3 * so_coarse_increments) {
-      // again, the range is too short so no point in foind the coarse
-      // iteration either
-      so_length_incr = 1;
-      pass = pass_type_fine;
-    }
-
-    bool done = false;
-    // this is the outermost loop
-    while (!done) {
-#ifdef TRACE_CALC_YOUNG_CONFIG
-      // leave this in for debugging, just in case
-      gclog_or_tty->print_cr("searching between " SIZE_FORMAT " and " SIZE_FORMAT
-                             ", incr " SIZE_FORMAT ", pass %s",
-                             from_so_length, to_so_length, so_length_incr,
-                             (pass == pass_type_coarse) ? "coarse" :
-                             (pass == pass_type_fine) ? "fine" : "final");
-#endif // TRACE_CALC_YOUNG_CONFIG
-
-      size_t so_length = from_so_length;
-      size_t init_free_regions =
-        MAX2((size_t)0,
-             _free_regions_at_end_of_collection +
-             _scan_only_regions_at_end_of_collection - reserve_regions);
-
-      // this determines whether a configuration was found
-      bool gc_eff_set = false;
-      // this is the middle loop
-      while (so_length <= to_so_length) {
-        // base time, which excludes region-related time; again we
-        // calculate it once to avoid recalculating it in the
-        // innermost loop
-        double base_time_with_so_ms =
-                           base_time_ms + predict_scan_only_time_ms(so_length);
-        // it's already over the pause target, go around
-        if (base_time_with_so_ms > target_pause_time_ms)
-          break;
-
-        size_t starting_young_length = so_length+1;
-
-        // we make sure that the short young length that makes sense
-        // (one more than the S-O length) is feasible
-        size_t min_young_length = starting_young_length;
-        double min_gc_eff;
-        bool min_ok;
-        ++calculations;
-        min_ok = predict_gc_eff(min_young_length, so_length,
-                                base_time_with_so_ms,
-                                init_free_regions, target_pause_time_ms,
-                                &min_gc_eff);
-
-        if (min_ok) {
-          // the shortest young length is indeed feasible; we'll know
-          // set up the max young length and we'll do a binary search
-          // between min_young_length and max_young_length
-          size_t max_young_length = _free_regions_at_end_of_collection - 1;
-          double max_gc_eff = 0.0;
-          bool max_ok = false;
-
-          // the innermost loop! (finally!)
-          while (max_young_length > min_young_length) {
-            // we'll make sure that min_young_length is always at a
-            // feasible config
-            guarantee( min_ok, "invariant" );
-
-            ++calculations;
-            max_ok = predict_gc_eff(max_young_length, so_length,
-                                    base_time_with_so_ms,
-                                    init_free_regions, target_pause_time_ms,
-                                    &max_gc_eff);
+
+    size_t init_free_regions =
+      MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions);
+
+    // if we're still under the pause target...
+    if (base_time_ms <= target_pause_time_ms) {
+      // We make sure that the shortest young length that makes sense
+      // fits within the target pause time.
+      size_t min_young_length = 1;
+
+      if (predict_will_fit(min_young_length, base_time_ms,
+                                     init_free_regions, target_pause_time_ms)) {
+        // The shortest young length will fit within the target pause time;
+        // we'll now check whether the absolute maximum number of young
+        // regions will fit in the target pause time. If not, we'll do
+        // a binary search between min_young_length and max_young_length
+        size_t abs_max_young_length = _free_regions_at_end_of_collection - 1;
+        size_t max_young_length = abs_max_young_length;
+
+        if (max_young_length > min_young_length) {
+          // Let's check if the initial max young length will fit within the
+          // target pause. If so then there is no need to search for a maximal
+          // young length - we'll return the initial maximum
+
+          if (predict_will_fit(max_young_length, base_time_ms,
+                                init_free_regions, target_pause_time_ms)) {
+            // The maximum young length will satisfy the target pause time.
+            // We are done so set min young length to this maximum length.
+            // The code after the loop will then set final_young_length using
+            // the value cached in the minimum length.
+            min_young_length = max_young_length;
+          } else {
+            // The maximum possible number of young regions will not fit within
+            // the target pause time so let's search....
 
             size_t diff = (max_young_length - min_young_length) / 2;
-            if (max_ok) {
-              min_young_length = max_young_length;
-              min_gc_eff = max_gc_eff;
-              min_ok = true;
+            max_young_length = min_young_length + diff;
+
+            while (max_young_length > min_young_length) {
+              if (predict_will_fit(max_young_length, base_time_ms,
+                                        init_free_regions, target_pause_time_ms)) {
+
+                // The current max young length will fit within the target
+                // pause time. Note we do not exit the loop here. By setting
+                // min = max, and then increasing the max below means that
+                // we will continue searching for an upper bound in the
+                // range [max..max+diff]
+                min_young_length = max_young_length;
+              }
+              diff = (max_young_length - min_young_length) / 2;
+              max_young_length = min_young_length + diff;
             }
-            max_young_length = min_young_length + diff;
+            // the above loop found a maximal young length that will fit
+            // within the target pause time.
           }
-
-          // the innermost loop found a config
-          guarantee( min_ok, "invariant" );
-          if (min_gc_eff > final_gc_eff) {
-            // it's the best config so far, so we'll keep it
-            final_gc_eff = min_gc_eff;
-            final_young_length = min_young_length;
-            final_so_length = so_length;
-            gc_eff_set = true;
-          }
+          assert(min_young_length <= abs_max_young_length, "just checking");
         }
-
-        // incremental the fixed S-O length and go around
-        so_length += so_length_incr;
+        final_young_length = min_young_length;
       }
-
-      // this is the end of the outermost loop and we need to decide
-      // what to do during the next iteration
-      if (pass == pass_type_coarse) {
-        // we just did the coarse pass (first iteration)
-
-        if (!gc_eff_set)
-          // we didn't find a feasible config so we'll just bail out; of
-          // course, it might be the case that we missed it; but I'd say
-          // it's a bit unlikely
-          done = true;
-        else {
-          // We did find a feasible config with optimal GC eff during
-          // the first pass. So the second pass we'll only consider the
-          // S-O lengths around that config with a fine increment.
-
-          guarantee( so_length_incr == so_coarse_increments, "invariant" );
-          guarantee( final_so_length >= min_so_length, "invariant" );
-
-#ifdef TRACE_CALC_YOUNG_CONFIG
-          // leave this in for debugging, just in case
-          gclog_or_tty->print_cr("  coarse pass: SO length " SIZE_FORMAT,
-                                 final_so_length);
-#endif // TRACE_CALC_YOUNG_CONFIG
-
-          from_so_length =
-            (final_so_length - min_so_length > so_coarse_increments) ?
-            final_so_length - so_coarse_increments + 1 : min_so_length;
-          to_so_length =
-            (max_so_length - final_so_length > so_coarse_increments) ?
-            final_so_length + so_coarse_increments - 1 : max_so_length;
-
-          pass = pass_type_fine;
-          so_length_incr = 1;
-        }
-      } else if (pass == pass_type_fine) {
-        // we just finished the second pass
-
-        if (!gc_eff_set) {
-          // we didn't find a feasible config (yes, it's possible;
-          // notice that, sometimes, we go directly into the fine
-          // iteration and skip the coarse one) so we bail out
-          done = true;
-        } else {
-          // We did find a feasible config with optimal GC eff
-          guarantee( so_length_incr == 1, "invariant" );
-
-          if (final_so_length == 0) {
-            // The config is of an empty S-O set, so we'll just bail out
-            done = true;
-          } else {
-            // we'll go around once more, setting the S-O length to 95%
-            // of the optimal
-            size_t new_so_length = 950 * final_so_length / 1000;
-
-#ifdef TRACE_CALC_YOUNG_CONFIG
-            // leave this in for debugging, just in case
-            gclog_or_tty->print_cr("  fine pass: SO length " SIZE_FORMAT
-                                   ", setting it to " SIZE_FORMAT,
-                                    final_so_length, new_so_length);
-#endif // TRACE_CALC_YOUNG_CONFIG
-
-            from_so_length = new_so_length;
-            to_so_length = new_so_length;
-            fine_so_length = final_so_length;
-
-            pass = pass_type_final;
-          }
-        }
-      } else if (pass == pass_type_final) {
-        // we just finished the final (third) pass
-
-        if (!gc_eff_set)
-          // we didn't find a feasible config, so we'll just use the one
-          // we found during the second pass, which we saved
-          final_so_length = fine_so_length;
-
-        // and we're done!
-        done = true;
-      } else {
-        guarantee( false, "should never reach here" );
-      }
-
-      // we now go around the outermost loop
     }
+    // and we're done!
 
     // we should have at least one region in the target young length
     _young_list_target_length =
         MAX2((size_t) 1, final_young_length + _recorded_survivor_regions);
-    if (final_so_length >= final_young_length)
-      // and we need to ensure that the S-O length is not greater than
-      // the target young length (this is being a bit careful)
-      final_so_length = 0;
-    _young_list_so_prefix_length = final_so_length;
-    guarantee( !_in_marking_window || !_last_full_young_gc ||
-               _young_list_so_prefix_length == 0, "invariant" );
 
     // let's keep an eye of how long we spend on this calculation
     // right now, I assume that we'll print it when we need it; we
@@ -774,142 +511,91 @@
     double end_time_sec = os::elapsedTime();
     double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
 
-#ifdef TRACE_CALC_YOUNG_CONFIG
+#ifdef TRACE_CALC_YOUNG_LENGTH
     // leave this in for debugging, just in case
-    gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT
-                           ", SO = " SIZE_FORMAT ", "
-                           "elapsed %1.2lf ms, calcs: " SIZE_FORMAT " (%s%s) "
-                           SIZE_FORMAT SIZE_FORMAT,
+    gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", "
+                           "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT,
                            target_pause_time_ms,
-                           _young_list_target_length - _young_list_so_prefix_length,
-                           _young_list_so_prefix_length,
+                           _young_list_target_length
                            elapsed_time_ms,
-                           calculations,
                            full_young_gcs() ? "full" : "partial",
-                           should_initiate_conc_mark() ? " i-m" : "",
+                           during_initial_mark_pause() ? " i-m" : "",
                            _in_marking_window,
                            _in_marking_window_im);
-#endif // TRACE_CALC_YOUNG_CONFIG
+#endif // TRACE_CALC_YOUNG_LENGTH
 
     if (_young_list_target_length < _young_list_min_length) {
-      // bummer; this means that, if we do a pause when the optimal
-      // config dictates, we'll violate the pause spacing target (the
+      // bummer; this means that, if we do a pause when the maximal
+      // length dictates, we'll violate the pause spacing target (the
       // min length was calculate based on the application's current
       // alloc rate);
 
       // so, we have to bite the bullet, and allocate the minimum
       // number. We'll violate our target, but we just can't meet it.
 
-      size_t so_length = 0;
-      // a note further up explains why we do not want an S-O length
-      // during marking
-      if (!_in_marking_window && !_last_full_young_gc)
-        // but we can still try to see whether we can find an optimal
-        // S-O length
-        so_length = calculate_optimal_so_length(_young_list_min_length);
-
-#ifdef TRACE_CALC_YOUNG_CONFIG
+#ifdef TRACE_CALC_YOUNG_LENGTH
       // leave this in for debugging, just in case
       gclog_or_tty->print_cr("adjusted target length from "
-                             SIZE_FORMAT " to " SIZE_FORMAT
-                             ", SO " SIZE_FORMAT,
-                             _young_list_target_length, _young_list_min_length,
-                             so_length);
-#endif // TRACE_CALC_YOUNG_CONFIG
-
-      _young_list_target_length =
-        MAX2(_young_list_min_length, (size_t)1);
-      _young_list_so_prefix_length = so_length;
+                             SIZE_FORMAT " to " SIZE_FORMAT,
+                             _young_list_target_length, _young_list_min_length);
+#endif // TRACE_CALC_YOUNG_LENGTH
+
+      _young_list_target_length = _young_list_min_length;
     }
   } else {
     // we are in a partially-young mode or we've run out of regions (due
     // to evacuation failure)
 
-#ifdef TRACE_CALC_YOUNG_CONFIG
+#ifdef TRACE_CALC_YOUNG_LENGTH
     // leave this in for debugging, just in case
     gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
-                           ", SO " SIZE_FORMAT,
-                           _young_list_min_length, 0);
-#endif // TRACE_CALC_YOUNG_CONFIG
-
-    // we'll do the pause as soon as possible and with no S-O prefix
-    // (see above for the reasons behind the latter)
+                           _young_list_min_length);
+#endif // TRACE_CALC_YOUNG_LENGTH
+    // we'll do the pause as soon as possible by choosing the minimum
     _young_list_target_length =
       MAX2(_young_list_min_length, (size_t) 1);
-    _young_list_so_prefix_length = 0;
   }
 
   _rs_lengths_prediction = rs_lengths;
 }
 
-// This is used by: calculate_optimal_so_length(length). It returns
-// the GC eff and predicted pause time for a particular config
-void
-G1CollectorPolicy::predict_gc_eff(size_t young_length,
-                                  size_t so_length,
-                                  double base_time_ms,
-                                  double* ret_gc_eff,
-                                  double* ret_pause_time_ms) {
-  double so_time_ms = predict_scan_only_time_ms(so_length);
-  double accum_surv_rate_adj = 0.0;
-  if (so_length > 0)
-    accum_surv_rate_adj = accum_yg_surv_rate_pred((int)(so_length - 1));
-  double accum_surv_rate =
-    accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
-  size_t bytes_to_copy =
-    (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
-  double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
-  double young_other_time_ms =
-                       predict_young_other_time_ms(young_length - so_length);
-  double pause_time_ms =
-                base_time_ms + so_time_ms + copy_time_ms + young_other_time_ms;
-  size_t reclaimed_bytes =
-    (young_length - so_length) * HeapRegion::GrainBytes - bytes_to_copy;
-  double gc_eff = (double) reclaimed_bytes / pause_time_ms;
-
-  *ret_gc_eff = gc_eff;
-  *ret_pause_time_ms = pause_time_ms;
-}
-
-// This is used by: calculate_young_list_target_config(rs_length). It
-// returns the GC eff of a particular config. It returns false if that
-// config violates any of the end conditions of the search in the
-// calling method, or true upon success. The end conditions were put
-// here since it's called twice and it was best not to replicate them
-// in the caller. Also, passing the parameteres avoids having to
-// recalculate them in the innermost loop.
+// This is used by: calculate_young_list_target_length(rs_length). It
+// returns true iff:
+//   the predicted pause time for the given young list will not overflow
+//   the target pause time
+// and:
+//   the predicted amount of surviving data will not overflow the
+//   the amount of free space available for survivor regions.
+//
 bool
-G1CollectorPolicy::predict_gc_eff(size_t young_length,
-                                  size_t so_length,
-                                  double base_time_with_so_ms,
-                                  size_t init_free_regions,
-                                  double target_pause_time_ms,
-                                  double* ret_gc_eff) {
-  *ret_gc_eff = 0.0;
+G1CollectorPolicy::predict_will_fit(size_t young_length,
+                                    double base_time_ms,
+                                    size_t init_free_regions,
+                                    double target_pause_time_ms) {
 
   if (young_length >= init_free_regions)
     // end condition 1: not enough space for the young regions
     return false;
 
   double accum_surv_rate_adj = 0.0;
-  if (so_length > 0)
-    accum_surv_rate_adj = accum_yg_surv_rate_pred((int)(so_length - 1));
   double accum_surv_rate =
     accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
+
   size_t bytes_to_copy =
     (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
+
   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
+
   double young_other_time_ms =
-                       predict_young_other_time_ms(young_length - so_length);
+                       predict_young_other_time_ms(young_length);
+
   double pause_time_ms =
-                   base_time_with_so_ms + copy_time_ms + young_other_time_ms;
+                   base_time_ms + copy_time_ms + young_other_time_ms;
 
   if (pause_time_ms > target_pause_time_ms)
     // end condition 2: over the target pause time
     return false;
 
-  size_t reclaimed_bytes =
-    (young_length - so_length) * HeapRegion::GrainBytes - bytes_to_copy;
   size_t free_bytes =
                  (init_free_regions - young_length) * HeapRegion::GrainBytes;
 
@@ -918,9 +604,6 @@
     return false;
 
   // success!
-  double gc_eff = (double) reclaimed_bytes / pause_time_ms;
-  *ret_gc_eff = gc_eff;
-
   return true;
 }
 
@@ -937,11 +620,11 @@
 void G1CollectorPolicy::check_prediction_validity() {
   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 
-  size_t rs_lengths = _g1->young_list_sampled_rs_lengths();
+  size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   if (rs_lengths > _rs_lengths_prediction) {
     // add 10% to avoid having to recalculate often
     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
-    calculate_young_list_target_config(rs_lengths_prediction);
+    calculate_young_list_target_length(rs_lengths_prediction);
   }
 }
 
@@ -963,7 +646,7 @@
 
 #ifndef PRODUCT
 bool G1CollectorPolicy::verify_young_ages() {
-  HeapRegion* head = _g1->young_list_first_region();
+  HeapRegion* head = _g1->young_list()->first_region();
   return
     verify_young_ages(head, _short_lived_surv_rate_group);
   // also call verify_young_ages on any additional surv rate groups
@@ -1033,13 +716,13 @@
   set_full_young_gcs(true);
   _last_full_young_gc = false;
   _should_revert_to_full_young_gcs = false;
-  _should_initiate_conc_mark = false;
+  clear_initiate_conc_mark_if_possible();
+  clear_during_initial_mark_pause();
   _known_garbage_bytes = 0;
   _known_garbage_ratio = 0.0;
   _in_marking_window = false;
   _in_marking_window_im = false;
 
-  _short_lived_surv_rate_group->record_scan_only_prefix(0);
   _short_lived_surv_rate_group->start_adding_regions();
   // also call this on any additional surv rate groups
 
@@ -1049,11 +732,10 @@
   _prev_region_num_tenured = _region_num_tenured;
 
   _free_regions_at_end_of_collection = _g1->free_regions();
-  _scan_only_regions_at_end_of_collection = 0;
   // Reset survivors SurvRateGroup.
   _survivor_surv_rate_group->reset();
   calculate_young_list_min_length();
-  calculate_young_list_target_config();
+  calculate_young_list_target_length();
  }
 
 void G1CollectorPolicy::record_before_bytes(size_t bytes) {
@@ -1102,8 +784,6 @@
   for (int i = 0; i < _parallel_gc_threads; ++i) {
     _par_last_ext_root_scan_times_ms[i] = -666.0;
     _par_last_mark_stack_scan_times_ms[i] = -666.0;
-    _par_last_scan_only_times_ms[i] = -666.0;
-    _par_last_scan_only_regions_scanned[i] = -666.0;
     _par_last_update_rs_start_times_ms[i] = -666.0;
     _par_last_update_rs_times_ms[i] = -666.0;
     _par_last_update_rs_processed_buffers[i] = -666.0;
@@ -1126,47 +806,13 @@
   if (in_young_gc_mode())
     _last_young_gc_full = false;
 
-
   // do that for any other surv rate groups
   _short_lived_surv_rate_group->stop_adding_regions();
-  size_t short_lived_so_length = _young_list_so_prefix_length;
-  _short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length);
-  tag_scan_only(short_lived_so_length);
   _survivors_age_table.clear();
 
   assert( verify_young_ages(), "region age verification" );
 }
 
-void G1CollectorPolicy::tag_scan_only(size_t short_lived_scan_only_length) {
-  // done in a way that it can be extended for other surv rate groups too...
-
-  HeapRegion* head = _g1->young_list_first_region();
-  bool finished_short_lived = (short_lived_scan_only_length == 0);
-
-  if (finished_short_lived)
-    return;
-
-  for (HeapRegion* curr = head;
-       curr != NULL;
-       curr = curr->get_next_young_region()) {
-    SurvRateGroup* surv_rate_group = curr->surv_rate_group();
-    int age = curr->age_in_surv_rate_group();
-
-    if (surv_rate_group == _short_lived_surv_rate_group) {
-      if ((size_t)age < short_lived_scan_only_length)
-        curr->set_scan_only();
-      else
-        finished_short_lived = true;
-    }
-
-
-    if (finished_short_lived)
-      return;
-  }
-
-  guarantee( false, "we should never reach here" );
-}
-
 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
   _mark_closure_time_ms = mark_closure_time_ms;
 }
@@ -1179,7 +825,8 @@
 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
                                                    mark_init_elapsed_time_ms) {
   _during_marking = true;
-  _should_initiate_conc_mark = false;
+  assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
+  clear_during_initial_mark_pause();
   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 }
 
@@ -1250,7 +897,6 @@
   }
   _n_pauses_at_mark_end = _n_pauses;
   _n_marks_since_last_pause++;
-  _conc_mark_initiated = false;
 }
 
 void
@@ -1260,7 +906,7 @@
     _last_full_young_gc = true;
     _in_marking_window = false;
     if (adaptive_young_list_length())
-      calculate_young_list_target_config();
+      calculate_young_list_target_length();
   }
 }
 
@@ -1446,17 +1092,24 @@
 #endif // PRODUCT
 
   if (in_young_gc_mode()) {
-    last_pause_included_initial_mark = _should_initiate_conc_mark;
+    last_pause_included_initial_mark = during_initial_mark_pause();
     if (last_pause_included_initial_mark)
       record_concurrent_mark_init_end_pre(0.0);
 
     size_t min_used_targ =
       (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
 
-    if (cur_used_bytes > min_used_targ) {
-      if (cur_used_bytes <= _prev_collection_pause_used_at_end_bytes) {
-      } else if (!_g1->mark_in_progress() && !_last_full_young_gc) {
-        _should_initiate_conc_mark = true;
+
+    if (!_g1->mark_in_progress() && !_last_full_young_gc) {
+      assert(!last_pause_included_initial_mark, "invariant");
+      if (cur_used_bytes > min_used_targ &&
+          cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
+        assert(!during_initial_mark_pause(), "we should not see this here");
+
+        // Note: this might have already been set, if during the last
+        // pause we decided to start a cycle but at the beginning of
+        // this pause we decided to postpone it. That's OK.
+        set_initiate_conc_mark_if_possible();
       }
     }
 
@@ -1488,6 +1141,7 @@
   size_t freed_bytes =
     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
+
   double survival_fraction =
     (double)surviving_bytes/
     (double)_collection_set_bytes_used_before;
@@ -1575,9 +1229,6 @@
 
   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
   double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
-  double scan_only_time = avg_value(_par_last_scan_only_times_ms);
-  double scan_only_regions_scanned =
-    sum_of_values(_par_last_scan_only_regions_scanned);
   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
   double update_rs_processed_buffers =
     sum_of_values(_par_last_update_rs_processed_buffers);
@@ -1587,7 +1238,7 @@
 
   double parallel_other_time = _cur_collection_par_time_ms -
     (update_rs_time + ext_root_scan_time + mark_stack_scan_time +
-     scan_only_time + scan_rs_time + obj_copy_time + termination_time);
+     scan_rs_time + obj_copy_time + termination_time);
   if (update_stats) {
     MainBodySummary* body_summary = summary->main_body_summary();
     guarantee(body_summary != NULL, "should not be null!");
@@ -1598,7 +1249,6 @@
       body_summary->record_satb_drain_time_ms(0.0);
     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
     body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
-    body_summary->record_scan_only_time_ms(scan_only_time);
     body_summary->record_update_rs_time_ms(update_rs_time);
     body_summary->record_scan_rs_time_ms(scan_rs_time);
     body_summary->record_obj_copy_time_ms(obj_copy_time);
@@ -1652,7 +1302,7 @@
     else
       other_time_ms -=
         update_rs_time +
-        ext_root_scan_time + mark_stack_scan_time + scan_only_time +
+        ext_root_scan_time + mark_stack_scan_time +
         scan_rs_time + obj_copy_time;
   }
 
@@ -1677,9 +1327,6 @@
                           _par_last_update_rs_processed_buffers, true);
         print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
         print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
-        print_par_stats(2, "Scan-Only Scanning", _par_last_scan_only_times_ms);
-        print_par_buffers(3, "Scan-Only Regions",
-                          _par_last_scan_only_regions_scanned, true);
         print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
         print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
         print_par_stats(2, "Termination", _par_last_termination_times_ms);
@@ -1691,7 +1338,6 @@
                     (int)update_rs_processed_buffers);
         print_stats(1, "Ext Root Scanning", ext_root_scan_time);
         print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
-        print_stats(1, "Scan-Only Scanning", scan_only_time);
         print_stats(1, "Scan RS", scan_rs_time);
         print_stats(1, "Object Copying", obj_copy_time);
       }
@@ -1706,6 +1352,8 @@
     }
 #endif
     print_stats(1, "Other", other_time_ms);
+    print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
+
     for (int i = 0; i < _aux_num; ++i) {
       if (_cur_aux_times_set[i]) {
         char buffer[96];
@@ -1747,7 +1395,7 @@
 
   bool new_in_marking_window = _in_marking_window;
   bool new_in_marking_window_im = false;
-  if (_should_initiate_conc_mark) {
+  if (during_initial_mark_pause()) {
     new_in_marking_window = true;
     new_in_marking_window_im = true;
   }
@@ -1791,16 +1439,6 @@
       _cost_per_card_ms_seq->add(cost_per_card_ms);
     }
 
-    double cost_per_scan_only_region_ms = 0.0;
-    if (scan_only_regions_scanned > 0.0) {
-      cost_per_scan_only_region_ms =
-        scan_only_time / scan_only_regions_scanned;
-      if (_in_marking_window_im)
-        _cost_per_scan_only_region_ms_during_cm_seq->add(cost_per_scan_only_region_ms);
-      else
-        _cost_per_scan_only_region_ms_seq->add(cost_per_scan_only_region_ms);
-    }
-
     size_t cards_scanned = _g1->cards_scanned();
 
     double cost_per_entry_ms = 0.0;
@@ -1836,7 +1474,7 @@
     }
 
     double all_other_time_ms = pause_time_ms -
-      (update_rs_time + scan_only_time + scan_rs_time + obj_copy_time +
+      (update_rs_time + scan_rs_time + obj_copy_time +
        _mark_closure_time_ms + termination_time);
 
     double young_other_time_ms = 0.0;
@@ -1883,11 +1521,10 @@
     if (PREDICTIONS_VERBOSE) {
       gclog_or_tty->print_cr("");
       gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
-                    "REGIONS %d %d %d %d "
+                    "REGIONS %d %d %d "
                     "PENDING_CARDS %d %d "
                     "CARDS_SCANNED %d %d "
                     "RS_LENGTHS %d %d "
-                    "SCAN_ONLY_SCAN %1.6lf %1.6lf "
                     "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
                     "SURVIVAL_RATIO %1.6lf %1.6lf "
                     "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
@@ -1900,12 +1537,10 @@
                     (last_pause_included_initial_mark) ? 1 : 0,
                     _recorded_region_num,
                     _recorded_young_regions,
-                    _recorded_scan_only_regions,
                     _recorded_non_young_regions,
                     _predicted_pending_cards, _pending_cards,
                     _predicted_cards_scanned, cards_scanned,
                     _predicted_rs_lengths, _max_rs_lengths,
-                    _predicted_scan_only_scan_time_ms, scan_only_time,
                     _predicted_rs_update_time_ms, update_rs_time,
                     _predicted_rs_scan_time_ms, scan_rs_time,
                     _predicted_survival_ratio, survival_ratio,
@@ -1930,14 +1565,12 @@
   _in_marking_window = new_in_marking_window;
   _in_marking_window_im = new_in_marking_window_im;
   _free_regions_at_end_of_collection = _g1->free_regions();
-  _scan_only_regions_at_end_of_collection = _g1->young_list_length();
   calculate_young_list_min_length();
-  calculate_young_list_target_config();
+  calculate_young_list_target_length();
 
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
-
   // </NEW PREDICTION>
 
   _target_pause_time_ms = -1.0;
@@ -1992,13 +1625,13 @@
   guarantee( adjustment == 0 || adjustment == 1, "invariant" );
 
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  size_t young_num = g1h->young_list_length();
+  size_t young_num = g1h->young_list()->length();
   if (young_num == 0)
     return 0.0;
 
   young_num += adjustment;
   size_t pending_cards = predict_pending_cards();
-  size_t rs_lengths = g1h->young_list_sampled_rs_lengths() +
+  size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
                       predict_rs_length_diff();
   size_t card_num;
   if (full_young_gcs())
@@ -2082,31 +1715,22 @@
 void
 G1CollectorPolicy::start_recording_regions() {
   _recorded_rs_lengths            = 0;
-  _recorded_scan_only_regions     = 0;
   _recorded_young_regions         = 0;
   _recorded_non_young_regions     = 0;
 
 #if PREDICTIONS_VERBOSE
-  _predicted_rs_lengths           = 0;
-  _predicted_cards_scanned        = 0;
-
   _recorded_marked_bytes          = 0;
   _recorded_young_bytes           = 0;
   _predicted_bytes_to_copy        = 0;
+  _predicted_rs_lengths           = 0;
+  _predicted_cards_scanned        = 0;
 #endif // PREDICTIONS_VERBOSE
 }
 
 void
-G1CollectorPolicy::record_cset_region(HeapRegion* hr, bool young) {
-  if (young) {
-    ++_recorded_young_regions;
-  } else {
-    ++_recorded_non_young_regions;
-  }
+G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
 #if PREDICTIONS_VERBOSE
-  if (young) {
-    _recorded_young_bytes += hr->used();
-  } else {
+  if (!young) {
     _recorded_marked_bytes += hr->max_live_bytes();
   }
   _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
@@ -2117,12 +1741,37 @@
 }
 
 void
-G1CollectorPolicy::record_scan_only_regions(size_t scan_only_length) {
-  _recorded_scan_only_regions = scan_only_length;
+G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
+  assert(!hr->is_young(), "should not call this");
+  ++_recorded_non_young_regions;
+  record_cset_region_info(hr, false);
+}
+
+void
+G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
+  _recorded_young_regions = n_regions;
+}
+
+void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
+#if PREDICTIONS_VERBOSE
+  _recorded_young_bytes = bytes;
+#endif // PREDICTIONS_VERBOSE
+}
+
+void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
+  _recorded_rs_lengths = rs_lengths;
+}
+
+void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
+  _predicted_bytes_to_copy = bytes;
 }
 
 void
 G1CollectorPolicy::end_recording_regions() {
+  // The _predicted_pause_time_ms field is referenced in code
+  // not under PREDICTIONS_VERBOSE. Let's initialize it.
+  _predicted_pause_time_ms = -1.0;
+
 #if PREDICTIONS_VERBOSE
   _predicted_pending_cards = predict_pending_cards();
   _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
@@ -2133,8 +1782,6 @@
       predict_non_young_card_num(_predicted_rs_lengths);
   _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
 
-  _predicted_scan_only_scan_time_ms =
-    predict_scan_only_time_ms(_recorded_scan_only_regions);
   _predicted_rs_update_time_ms =
     predict_rs_update_time_ms(_g1->pending_card_num());
   _predicted_rs_scan_time_ms =
@@ -2149,7 +1796,6 @@
     predict_non_young_other_time_ms(_recorded_non_young_regions);
 
   _predicted_pause_time_ms =
-    _predicted_scan_only_scan_time_ms +
     _predicted_rs_update_time_ms +
     _predicted_rs_scan_time_ms +
     _predicted_object_copy_time_ms +
@@ -2166,7 +1812,13 @@
   if (predicted_time_ms > _expensive_region_limit_ms) {
     if (!in_young_gc_mode()) {
         set_full_young_gcs(true);
-      _should_initiate_conc_mark = true;
+        // We might want to do something different here. However,
+        // right now we don't support the non-generational G1 mode
+        // (and in fact we are planning to remove the associated code,
+        // see CR 6814390). So, let's leave it as is and this will be
+        // removed some time in the future
+        ShouldNotReachHere();
+        set_during_initial_mark_pause();
     } else
       // no point in doing another partial one
       _should_revert_to_full_young_gcs = true;
@@ -2288,7 +1940,7 @@
 }
 
 size_t G1CollectorPolicy::expansion_amount() {
-  if ((int)(recent_avg_pause_time_ratio() * 100.0) > G1GCPercent) {
+  if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) {
     // We will double the existing space, or take
     // G1ExpandByPercentOfAvailable % of the available expansion
     // space, whichever is smaller, bounded below by a minimum
@@ -2433,8 +2085,6 @@
                       body_summary->get_ext_root_scan_seq());
         print_summary(2, "Mark Stack Scanning",
                       body_summary->get_mark_stack_scan_seq());
-        print_summary(2, "Scan-Only Scanning",
-                      body_summary->get_scan_only_seq());
         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
         print_summary(2, "Termination", body_summary->get_termination_seq());
@@ -2444,7 +2094,6 @@
             body_summary->get_update_rs_seq(),
             body_summary->get_ext_root_scan_seq(),
             body_summary->get_mark_stack_scan_seq(),
-            body_summary->get_scan_only_seq(),
             body_summary->get_scan_rs_seq(),
             body_summary->get_obj_copy_seq(),
             body_summary->get_termination_seq()
@@ -2462,8 +2111,6 @@
                       body_summary->get_ext_root_scan_seq());
         print_summary(1, "Mark Stack Scanning",
                       body_summary->get_mark_stack_scan_seq());
-        print_summary(1, "Scan-Only Scanning",
-                      body_summary->get_scan_only_seq());
         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
       }
@@ -2489,7 +2136,6 @@
             body_summary->get_update_rs_seq(),
             body_summary->get_ext_root_scan_seq(),
             body_summary->get_mark_stack_scan_seq(),
-            body_summary->get_scan_only_seq(),
             body_summary->get_scan_rs_seq(),
             body_summary->get_obj_copy_seq()
           };
@@ -2583,7 +2229,7 @@
 G1CollectorPolicy::should_add_next_region_to_young_list() {
   assert(in_young_gc_mode(), "should be in young GC mode");
   bool ret;
-  size_t young_list_length = _g1->young_list_length();
+  size_t young_list_length = _g1->young_list()->length();
   size_t young_list_max_length = _young_list_target_length;
   if (G1FixedEdenSize) {
     young_list_max_length -= _max_survivor_regions;
@@ -2646,7 +2292,7 @@
   assert(_g1->regions_accounted_for(), "Region leakage!");
   double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 
-  size_t young_list_length = _g1->young_list_length();
+  size_t young_list_length = _g1->young_list()->length();
   size_t young_list_max_length = _young_list_target_length;
   if (G1FixedEdenSize) {
     young_list_max_length -= _max_survivor_regions;
@@ -2655,7 +2301,7 @@
 
   if (in_young_gc_mode()) {
     if (reached_target_length) {
-      assert( young_list_length > 0 && _g1->young_list_length() > 0,
+      assert( young_list_length > 0 && _g1->young_list()->length() > 0,
               "invariant" );
       _target_pause_time_ms = max_pause_time_ms;
       return true;
@@ -2690,6 +2336,50 @@
 #endif
 
 void
+G1CollectorPolicy::decide_on_conc_mark_initiation() {
+  // We are about to decide on whether this pause will be an
+  // initial-mark pause.
+
+  // First, during_initial_mark_pause() should not be already set. We
+  // will set it here if we have to. However, it should be cleared by
+  // the end of the pause (it's only set for the duration of an
+  // initial-mark pause).
+  assert(!during_initial_mark_pause(), "pre-condition");
+
+  if (initiate_conc_mark_if_possible()) {
+    // We had noticed on a previous pause that the heap occupancy has
+    // gone over the initiating threshold and we should start a
+    // concurrent marking cycle. So we might initiate one.
+
+    bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
+    if (!during_cycle) {
+      // The concurrent marking thread is not "during a cycle", i.e.,
+      // it has completed the last one. So we can go ahead and
+      // initiate a new cycle.
+
+      set_during_initial_mark_pause();
+
+      // And we can now clear initiate_conc_mark_if_possible() as
+      // we've already acted on it.
+      clear_initiate_conc_mark_if_possible();
+    } else {
+      // The concurrent marking thread is still finishing up the
+      // previous cycle. If we start one right now the two cycles
+      // overlap. In particular, the concurrent marking thread might
+      // be in the process of clearing the next marking bitmap (which
+      // we will use for the next cycle if we start one). Starting a
+      // cycle now will be bad given that parts of the marking
+      // information might get cleared by the marking thread. And we
+      // cannot wait for the marking thread to finish the cycle as it
+      // periodically yields while clearing the next marking bitmap
+      // and, if it's in a yield point, it's waiting for us to
+      // finish. So, at this point we will not start a cycle and we'll
+      // let the concurrent marking thread complete the last one.
+    }
+  }
+}
+
+void
 G1CollectorPolicy_BestRegionsFirst::
 record_collection_pause_start(double start_time_sec, size_t start_used) {
   G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
@@ -2872,22 +2562,24 @@
   }
 }
 
-// Add the heap region to the collection set and return the conservative
-// estimate of the number of live bytes.
+// Add the heap region at the head of the non-incremental collection set
 void G1CollectorPolicy::
 add_to_collection_set(HeapRegion* hr) {
+  assert(_inc_cset_build_state == Active, "Precondition");
+  assert(!hr->is_young(), "non-incremental add of young region");
+
   if (G1PrintHeapRegions) {
-    gclog_or_tty->print_cr("added region to cset %d:["PTR_FORMAT", "PTR_FORMAT"], "
-                  "top "PTR_FORMAT", young %s",
-                  hr->hrs_index(), hr->bottom(), hr->end(),
-                  hr->top(), (hr->is_young()) ? "YES" : "NO");
+    gclog_or_tty->print_cr("added region to cset "
+                           "%d:["PTR_FORMAT", "PTR_FORMAT"], "
+                           "top "PTR_FORMAT", %s",
+                           hr->hrs_index(), hr->bottom(), hr->end(),
+                           hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG");
   }
 
   if (_g1->mark_in_progress())
     _g1->concurrent_mark()->registerCSetRegion(hr);
 
-  assert(!hr->in_collection_set(),
-              "should not already be in the CSet");
+  assert(!hr->in_collection_set(), "should not already be in the CSet");
   hr->set_in_collection_set(true);
   hr->set_next_in_collection_set(_collection_set);
   _collection_set = hr;
@@ -2896,10 +2588,230 @@
   _g1->register_region_with_in_cset_fast_test(hr);
 }
 
-void
-G1CollectorPolicy_BestRegionsFirst::
-choose_collection_set() {
-  double non_young_start_time_sec;
+// Initialize the per-collection-set information
+void G1CollectorPolicy::start_incremental_cset_building() {
+  assert(_inc_cset_build_state == Inactive, "Precondition");
+
+  _inc_cset_head = NULL;
+  _inc_cset_tail = NULL;
+  _inc_cset_size = 0;
+  _inc_cset_bytes_used_before = 0;
+
+  if (in_young_gc_mode()) {
+    _inc_cset_young_index = 0;
+  }
+
+  _inc_cset_max_finger = 0;
+  _inc_cset_recorded_young_bytes = 0;
+  _inc_cset_recorded_rs_lengths = 0;
+  _inc_cset_predicted_elapsed_time_ms = 0;
+  _inc_cset_predicted_bytes_to_copy = 0;
+  _inc_cset_build_state = Active;
+}
+
+void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
+  // This routine is used when:
+  // * adding survivor regions to the incremental cset at the end of an
+  //   evacuation pause,
+  // * adding the current allocation region to the incremental cset
+  //   when it is retired, and
+  // * updating existing policy information for a region in the
+  //   incremental cset via young list RSet sampling.
+  // Therefore this routine may be called at a safepoint by the
+  // VM thread, or in-between safepoints by mutator threads (when
+  // retiring the current allocation region) or a concurrent
+  // refine thread (RSet sampling).
+
+  double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
+  size_t used_bytes = hr->used();
+
+  _inc_cset_recorded_rs_lengths += rs_length;
+  _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
+
+  _inc_cset_bytes_used_before += used_bytes;
+
+  // Cache the values we have added to the aggregated informtion
+  // in the heap region in case we have to remove this region from
+  // the incremental collection set, or it is updated by the
+  // rset sampling code
+  hr->set_recorded_rs_length(rs_length);
+  hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
+
+#if PREDICTIONS_VERBOSE
+  size_t bytes_to_copy = predict_bytes_to_copy(hr);
+  _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
+
+  // Record the number of bytes used in this region
+  _inc_cset_recorded_young_bytes += used_bytes;
+
+  // Cache the values we have added to the aggregated informtion
+  // in the heap region in case we have to remove this region from
+  // the incremental collection set, or it is updated by the
+  // rset sampling code
+  hr->set_predicted_bytes_to_copy(bytes_to_copy);
+#endif // PREDICTIONS_VERBOSE
+}
+
+void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
+  // This routine is currently only called as part of the updating of
+  // existing policy information for regions in the incremental cset that
+  // is performed by the concurrent refine thread(s) as part of young list
+  // RSet sampling. Therefore we should not be at a safepoint.
+
+  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
+  assert(hr->is_young(), "it should be");
+
+  size_t used_bytes = hr->used();
+  size_t old_rs_length = hr->recorded_rs_length();
+  double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
+
+  // Subtract the old recorded/predicted policy information for
+  // the given heap region from the collection set info.
+  _inc_cset_recorded_rs_lengths -= old_rs_length;
+  _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
+
+  _inc_cset_bytes_used_before -= used_bytes;
+
+  // Clear the values cached in the heap region
+  hr->set_recorded_rs_length(0);
+  hr->set_predicted_elapsed_time_ms(0);
+
+#if PREDICTIONS_VERBOSE
+  size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
+  _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
+
+  // Subtract the number of bytes used in this region
+  _inc_cset_recorded_young_bytes -= used_bytes;
+
+  // Clear the values cached in the heap region
+  hr->set_predicted_bytes_to_copy(0);
+#endif // PREDICTIONS_VERBOSE
+}
+
+void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
+  // Update the collection set information that is dependent on the new RS length
+  assert(hr->is_young(), "Precondition");
+
+  remove_from_incremental_cset_info(hr);
+  add_to_incremental_cset_info(hr, new_rs_length);
+}
+
+void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
+  assert( hr->is_young(), "invariant");
+  assert( hr->young_index_in_cset() == -1, "invariant" );
+  assert(_inc_cset_build_state == Active, "Precondition");
+
+  // We need to clear and set the cached recorded/cached collection set
+  // information in the heap region here (before the region gets added
+  // to the collection set). An individual heap region's cached values
+  // are calculated, aggregated with the policy collection set info,
+  // and cached in the heap region here (initially) and (subsequently)
+  // by the Young List sampling code.
+
+  size_t rs_length = hr->rem_set()->occupied();
+  add_to_incremental_cset_info(hr, rs_length);
+
+  HeapWord* hr_end = hr->end();
+  _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
+
+  assert(!hr->in_collection_set(), "invariant");
+  hr->set_in_collection_set(true);
+  assert( hr->next_in_collection_set() == NULL, "invariant");
+
+  _inc_cset_size++;
+  _g1->register_region_with_in_cset_fast_test(hr);
+
+  hr->set_young_index_in_cset((int) _inc_cset_young_index);
+  ++_inc_cset_young_index;
+}
+
+// Add the region at the RHS of the incremental cset
+void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
+  // We should only ever be appending survivors at the end of a pause
+  assert( hr->is_survivor(), "Logic");
+
+  // Do the 'common' stuff
+  add_region_to_incremental_cset_common(hr);
+
+  // Now add the region at the right hand side
+  if (_inc_cset_tail == NULL) {
+    assert(_inc_cset_head == NULL, "invariant");
+    _inc_cset_head = hr;
+  } else {
+    _inc_cset_tail->set_next_in_collection_set(hr);
+  }
+  _inc_cset_tail = hr;
+
+  if (G1PrintHeapRegions) {
+    gclog_or_tty->print_cr(" added region to incremental cset (RHS) "
+                  "%d:["PTR_FORMAT", "PTR_FORMAT"], "
+                  "top "PTR_FORMAT", young %s",
+                  hr->hrs_index(), hr->bottom(), hr->end(),
+                  hr->top(), (hr->is_young()) ? "YES" : "NO");
+  }
+}
+
+// Add the region to the LHS of the incremental cset
+void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
+  // Survivors should be added to the RHS at the end of a pause
+  assert(!hr->is_survivor(), "Logic");
+
+  // Do the 'common' stuff
+  add_region_to_incremental_cset_common(hr);
+
+  // Add the region at the left hand side
+  hr->set_next_in_collection_set(_inc_cset_head);
+  if (_inc_cset_head == NULL) {
+    assert(_inc_cset_tail == NULL, "Invariant");
+    _inc_cset_tail = hr;
+  }
+  _inc_cset_head = hr;
+
+  if (G1PrintHeapRegions) {
+    gclog_or_tty->print_cr(" added region to incremental cset (LHS) "
+                  "%d:["PTR_FORMAT", "PTR_FORMAT"], "
+                  "top "PTR_FORMAT", young %s",
+                  hr->hrs_index(), hr->bottom(), hr->end(),
+                  hr->top(), (hr->is_young()) ? "YES" : "NO");
+  }
+}
+
+#ifndef PRODUCT
+void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
+  assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
+
+  st->print_cr("\nCollection_set:");
+  HeapRegion* csr = list_head;
+  while (csr != NULL) {
+    HeapRegion* next = csr->next_in_collection_set();
+    assert(csr->in_collection_set(), "bad CS");
+    st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
+                 "age: %4d, y: %d, surv: %d",
+                        csr->bottom(), csr->end(),
+                        csr->top(),
+                        csr->prev_top_at_mark_start(),
+                        csr->next_top_at_mark_start(),
+                        csr->top_at_conc_mark_count(),
+                        csr->age_in_surv_rate_group_cond(),
+                        csr->is_young(),
+                        csr->is_survivor());
+    csr = next;
+  }
+}
+#endif // !PRODUCT
+
+bool
+G1CollectorPolicy_BestRegionsFirst::choose_collection_set() {
+  // Set this here - in case we're not doing young collections.
+  double non_young_start_time_sec = os::elapsedTime();
+
+  // The result that this routine will return. This will be set to
+  // false if:
+  // * we're doing a young or partially young collection and we
+  //   have added the youg regions to collection set, or
+  // * we add old regions to the collection set.
+  bool abandon_collection = true;
+
   start_recording_regions();
 
   guarantee(_target_pause_time_ms > -1.0
@@ -2952,47 +2864,79 @@
 
     if (G1PolicyVerbose > 0) {
       gclog_or_tty->print_cr("Adding %d young regions to the CSet",
-                    _g1->young_list_length());
+                    _g1->young_list()->length());
     }
+
     _young_cset_length  = 0;
     _last_young_gc_full = full_young_gcs() ? true : false;
+
     if (_last_young_gc_full)
       ++_full_young_pause_num;
     else
       ++_partial_young_pause_num;
-    hr = _g1->pop_region_from_young_list();
+
+    // The young list is laid with the survivor regions from the previous
+    // pause are appended to the RHS of the young list, i.e.
+    //   [Newly Young Regions ++ Survivors from last pause].
+
+    hr = _g1->young_list()->first_survivor_region();
     while (hr != NULL) {
-
-      assert( hr->young_index_in_cset() == -1, "invariant" );
-      assert( hr->age_in_surv_rate_group() != -1, "invariant" );
-      hr->set_young_index_in_cset((int) _young_cset_length);
-
-      ++_young_cset_length;
-      double predicted_time_ms = predict_region_elapsed_time_ms(hr, true);
-      time_remaining_ms -= predicted_time_ms;
-      predicted_pause_time_ms += predicted_time_ms;
-      assert(!hr->in_collection_set(), "invariant");
-      add_to_collection_set(hr);
-      record_cset_region(hr, true);
-      max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
-      if (G1PolicyVerbose > 0) {
-        gclog_or_tty->print_cr("  Added [" PTR_FORMAT ", " PTR_FORMAT") to CS.",
-                      hr->bottom(), hr->end());
-        gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
-                      max_live_bytes/K);
-      }
-      hr = _g1->pop_region_from_young_list();
+      assert(hr->is_survivor(), "badly formed young list");
+      hr->set_young();
+      hr = hr->get_next_young_region();
     }
 
-    record_scan_only_regions(_g1->young_list_scan_only_length());
+    // Clear the fields that point to the survivor list - they are
+    // all young now.
+    _g1->young_list()->clear_survivors();
+
+    if (_g1->mark_in_progress())
+      _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
+
+    _young_cset_length = _inc_cset_young_index;
+    _collection_set = _inc_cset_head;
+    _collection_set_size = _inc_cset_size;
+    _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
+
+    // For young regions in the collection set, we assume the worst
+    // case of complete survival
+    max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes;
+
+    time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
+    predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
+
+    // The number of recorded young regions is the incremental
+    // collection set's current size
+    set_recorded_young_regions(_inc_cset_size);
+    set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
+    set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
+#if PREDICTIONS_VERBOSE
+    set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
+#endif // PREDICTIONS_VERBOSE
+
+    if (G1PolicyVerbose > 0) {
+      gclog_or_tty->print_cr("  Added " PTR_FORMAT " Young Regions to CS.",
+                             _inc_cset_size);
+      gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
+                            max_live_bytes/K);
+    }
+
+    assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
+    if (_inc_cset_size > 0) {
+      assert(_collection_set != NULL, "Invariant");
+      abandon_collection = false;
+    }
 
     double young_end_time_sec = os::elapsedTime();
     _recorded_young_cset_choice_time_ms =
       (young_end_time_sec - young_start_time_sec) * 1000.0;
 
-    non_young_start_time_sec = os::elapsedTime();
-
-    if (_young_cset_length > 0 && _last_young_gc_full) {
+    // We are doing young collections so reset this.
+    non_young_start_time_sec = young_end_time_sec;
+
+    // Note we can use either _collection_set_size or
+    // _young_cset_length here
+    if (_collection_set_size > 0 && _last_young_gc_full) {
       // don't bother adding more regions...
       goto choose_collection_set_end;
     }
@@ -3002,6 +2946,11 @@
     bool should_continue = true;
     NumberSeq seq;
     double avg_prediction = 100000000000000000.0; // something very large
+
+    // Save the current size of the collection set to detect
+    // if we actually added any old regions.
+    size_t n_young_regions = _collection_set_size;
+
     do {
       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
                                                       avg_prediction);
@@ -3010,7 +2959,7 @@
         time_remaining_ms -= predicted_time_ms;
         predicted_pause_time_ms += predicted_time_ms;
         add_to_collection_set(hr);
-        record_cset_region(hr, false);
+        record_non_young_cset_region(hr);
         max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
         if (G1PolicyVerbose > 0) {
           gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
@@ -3028,9 +2977,17 @@
     if (!adaptive_young_list_length() &&
         _collection_set_size < _young_list_fixed_length)
       _should_revert_to_full_young_gcs  = true;
+
+    if (_collection_set_size > n_young_regions) {
+      // We actually added old regions to the collection set
+      // so we are not abandoning this collection.
+      abandon_collection = false;
+    }
   }
 
 choose_collection_set_end:
+  stop_incremental_cset_building();
+
   count_CS_bytes_used();
 
   end_recording_regions();
@@ -3038,6 +2995,8 @@
   double non_young_end_time_sec = os::elapsedTime();
   _recorded_non_young_cset_choice_time_ms =
     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
+
+  return abandon_collection;
 }
 
 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,6 @@
   define_num_seq(parallel) // parallel only
     define_num_seq(ext_root_scan)
     define_num_seq(mark_stack_scan)
-    define_num_seq(scan_only)
     define_num_seq(update_rs)
     define_num_seq(scan_rs)
     define_num_seq(scan_new_refs) // Only for temp use; added to
@@ -174,8 +173,6 @@
 
   double* _par_last_ext_root_scan_times_ms;
   double* _par_last_mark_stack_scan_times_ms;
-  double* _par_last_scan_only_times_ms;
-  double* _par_last_scan_only_regions_scanned;
   double* _par_last_update_rs_start_times_ms;
   double* _par_last_update_rs_times_ms;
   double* _par_last_update_rs_processed_buffers;
@@ -196,7 +193,6 @@
   bool _adaptive_young_list_length;
   size_t _young_list_min_length;
   size_t _young_list_target_length;
-  size_t _young_list_so_prefix_length;
   size_t _young_list_fixed_length;
 
   size_t _young_cset_length;
@@ -215,6 +211,8 @@
   SurvRateGroup*        _survivor_surv_rate_group;
   // add here any more surv rate groups
 
+  double                _gc_overhead_perc;
+
   bool during_marking() {
     return _during_marking;
   }
@@ -232,7 +230,6 @@
   TruncatedSeq* _pending_card_diff_seq;
   TruncatedSeq* _rs_length_diff_seq;
   TruncatedSeq* _cost_per_card_ms_seq;
-  TruncatedSeq* _cost_per_scan_only_region_ms_seq;
   TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
   TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
   TruncatedSeq* _cost_per_entry_ms_seq;
@@ -247,19 +244,16 @@
   TruncatedSeq* _rs_lengths_seq;
 
   TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
-  TruncatedSeq* _cost_per_scan_only_region_ms_during_cm_seq;
 
   TruncatedSeq* _young_gc_eff_seq;
 
   TruncatedSeq* _max_conc_overhead_seq;
 
   size_t _recorded_young_regions;
-  size_t _recorded_scan_only_regions;
   size_t _recorded_non_young_regions;
   size_t _recorded_region_num;
 
   size_t _free_regions_at_end_of_collection;
-  size_t _scan_only_regions_at_end_of_collection;
 
   size_t _recorded_rs_lengths;
   size_t _max_rs_lengths;
@@ -275,7 +269,6 @@
   double _predicted_survival_ratio;
   double _predicted_rs_update_time_ms;
   double _predicted_rs_scan_time_ms;
-  double _predicted_scan_only_scan_time_ms;
   double _predicted_object_copy_time_ms;
   double _predicted_constant_other_time_ms;
   double _predicted_young_other_time_ms;
@@ -342,8 +335,6 @@
   bool verify_young_ages();
 #endif // PRODUCT
 
-  void tag_scan_only(size_t short_lived_scan_only_length);
-
   double get_new_prediction(TruncatedSeq* seq) {
     return MAX2(seq->davg() + sigma() * seq->dsd(),
                 seq->davg() * confidence_factor(seq->num()));
@@ -429,23 +420,6 @@
         get_new_prediction(_partially_young_cost_per_entry_ms_seq);
   }
 
-  double predict_scan_only_time_ms_during_cm(size_t scan_only_region_num) {
-    if (_cost_per_scan_only_region_ms_during_cm_seq->num() < 3)
-      return 1.5 * (double) scan_only_region_num *
-        get_new_prediction(_cost_per_scan_only_region_ms_seq);
-    else
-      return (double) scan_only_region_num *
-        get_new_prediction(_cost_per_scan_only_region_ms_during_cm_seq);
-  }
-
-  double predict_scan_only_time_ms(size_t scan_only_region_num) {
-    if (_in_marking_window_im)
-      return predict_scan_only_time_ms_during_cm(scan_only_region_num);
-    else
-      return (double) scan_only_region_num *
-        get_new_prediction(_cost_per_scan_only_region_ms_seq);
-  }
-
   double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
     if (_cost_per_byte_ms_during_cm_seq->num() < 3)
       return 1.1 * (double) bytes_to_copy *
@@ -488,24 +462,21 @@
   size_t predict_bytes_to_copy(HeapRegion* hr);
   double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
 
-  // for use by: calculate_optimal_so_length(length)
-  void predict_gc_eff(size_t young_region_num,
-                      size_t so_length,
-                      double base_time_ms,
-                      double *gc_eff,
-                      double *pause_time_ms);
-
-  // for use by: calculate_young_list_target_config(rs_length)
-  bool predict_gc_eff(size_t young_region_num,
-                      size_t so_length,
-                      double base_time_with_so_ms,
-                      size_t init_free_regions,
-                      double target_pause_time_ms,
-                      double* gc_eff);
+    // for use by: calculate_young_list_target_length(rs_length)
+  bool predict_will_fit(size_t young_region_num,
+                        double base_time_ms,
+                        size_t init_free_regions,
+                        double target_pause_time_ms);
 
   void start_recording_regions();
-  void record_cset_region(HeapRegion* hr, bool young);
-  void record_scan_only_regions(size_t scan_only_length);
+  void record_cset_region_info(HeapRegion* hr, bool young);
+  void record_non_young_cset_region(HeapRegion* hr);
+
+  void set_recorded_young_regions(size_t n_regions);
+  void set_recorded_young_bytes(size_t bytes);
+  void set_recorded_rs_lengths(size_t rs_lengths);
+  void set_predicted_bytes_to_copy(size_t bytes);
+
   void end_recording_regions();
 
   void record_vtime_diff_ms(double vtime_diff_ms) {
@@ -636,11 +607,74 @@
   void update_recent_gc_times(double end_time_sec, double elapsed_ms);
 
   // The head of the list (via "next_in_collection_set()") representing the
-  // current collection set.
+  // current collection set. Set from the incrementally built collection
+  // set at the start of the pause.
   HeapRegion* _collection_set;
+
+  // The number of regions in the collection set. Set from the incrementally
+  // built collection set at the start of an evacuation pause.
   size_t _collection_set_size;
+
+  // The number of bytes in the collection set before the pause. Set from
+  // the incrementally built collection set at the start of an evacuation
+  // pause.
   size_t _collection_set_bytes_used_before;
 
+  // The associated information that is maintained while the incremental
+  // collection set is being built with young regions. Used to populate
+  // the recorded info for the evacuation pause.
+
+  enum CSetBuildType {
+    Active,             // We are actively building the collection set
+    Inactive            // We are not actively building the collection set
+  };
+
+  CSetBuildType _inc_cset_build_state;
+
+  // The head of the incrementally built collection set.
+  HeapRegion* _inc_cset_head;
+
+  // The tail of the incrementally built collection set.
+  HeapRegion* _inc_cset_tail;
+
+  // The number of regions in the incrementally built collection set.
+  // Used to set _collection_set_size at the start of an evacuation
+  // pause.
+  size_t _inc_cset_size;
+
+  // Used as the index in the surving young words structure
+  // which tracks the amount of space, for each young region,
+  // that survives the pause.
+  size_t _inc_cset_young_index;
+
+  // The number of bytes in the incrementally built collection set.
+  // Used to set _collection_set_bytes_used_before at the start of
+  // an evacuation pause.
+  size_t _inc_cset_bytes_used_before;
+
+  // Used to record the highest end of heap region in collection set
+  HeapWord* _inc_cset_max_finger;
+
+  // The number of recorded used bytes in the young regions
+  // of the collection set. This is the sum of the used() bytes
+  // of retired young regions in the collection set.
+  size_t _inc_cset_recorded_young_bytes;
+
+  // The RSet lengths recorded for regions in the collection set
+  // (updated by the periodic sampling of the regions in the
+  // young list/collection set).
+  size_t _inc_cset_recorded_rs_lengths;
+
+  // The predicted elapsed time it will take to collect the regions
+  // in the collection set (updated by the periodic sampling of the
+  // regions in the young list/collection set).
+  double _inc_cset_predicted_elapsed_time_ms;
+
+  // The predicted bytes to copy for the regions in the collection
+  // set (updated by the periodic sampling of the regions in the
+  // young list/collection set).
+  size_t _inc_cset_predicted_bytes_to_copy;
+
   // Info about marking.
   int _n_marks; // Sticky at 2, so we know when we've done at least 2.
 
@@ -722,11 +756,31 @@
 
   size_t _n_marks_since_last_pause;
 
-  // True iff CM has been initiated.
-  bool _conc_mark_initiated;
+  // At the end of a pause we check the heap occupancy and we decide
+  // whether we will start a marking cycle during the next pause. If
+  // we decide that we want to do that, we will set this parameter to
+  // true. So, this parameter will stay true between the end of a
+  // pause and the beginning of a subsequent pause (not necessarily
+  // the next one, see the comments on the next field) when we decide
+  // that we will indeed start a marking cycle and do the initial-mark
+  // work.
+  volatile bool _initiate_conc_mark_if_possible;
 
-  // True iff CM should be initiated
-  bool _should_initiate_conc_mark;
+  // If initiate_conc_mark_if_possible() is set at the beginning of a
+  // pause, it is a suggestion that the pause should start a marking
+  // cycle by doing the initial-mark work. However, it is possible
+  // that the concurrent marking thread is still finishing up the
+  // previous marking cycle (e.g., clearing the next marking
+  // bitmap). If that is the case we cannot start a new cycle and
+  // we'll have to wait for the concurrent marking thread to finish
+  // what it is doing. In this case we will postpone the marking cycle
+  // initiation decision for the next pause. When we eventually decide
+  // to start a cycle, we will set _during_initial_mark_pause which
+  // will stay true until the end of the initial-mark pause and it's
+  // the condition that indicates that a pause is doing the
+  // initial-mark work.
+  volatile bool _during_initial_mark_pause;
+
   bool _should_revert_to_full_young_gcs;
   bool _last_full_young_gc;
 
@@ -739,9 +793,8 @@
   double _mark_closure_time_ms;
 
   void   calculate_young_list_min_length();
-  void   calculate_young_list_target_config();
-  void   calculate_young_list_target_config(size_t rs_lengths);
-  size_t calculate_optimal_so_length(size_t young_list_length);
+  void   calculate_young_list_target_length();
+  void   calculate_young_list_target_length(size_t rs_lengths);
 
 public:
 
@@ -846,11 +899,6 @@
     _par_last_mark_stack_scan_times_ms[worker_i] = ms;
   }
 
-  void record_scan_only_time(int worker_i, double ms, int n) {
-    _par_last_scan_only_times_ms[worker_i] = ms;
-    _par_last_scan_only_regions_scanned[worker_i] = (double) n;
-  }
-
   void record_satb_drain_time(double ms) {
     _cur_satb_drain_time_ms = ms;
     _satb_drain_time_set    = true;
@@ -965,23 +1013,82 @@
   // Choose a new collection set.  Marks the chosen regions as being
   // "in_collection_set", and links them together.  The head and number of
   // the collection set are available via access methods.
-  virtual void choose_collection_set() = 0;
-
-  void clear_collection_set() { _collection_set = NULL; }
+  virtual bool choose_collection_set() = 0;
 
   // The head of the list (via "next_in_collection_set()") representing the
   // current collection set.
   HeapRegion* collection_set() { return _collection_set; }
 
+  void clear_collection_set() { _collection_set = NULL; }
+
   // The number of elements in the current collection set.
   size_t collection_set_size() { return _collection_set_size; }
 
   // Add "hr" to the CS.
   void add_to_collection_set(HeapRegion* hr);
 
-  bool should_initiate_conc_mark()      { return _should_initiate_conc_mark; }
-  void set_should_initiate_conc_mark()  { _should_initiate_conc_mark = true; }
-  void unset_should_initiate_conc_mark(){ _should_initiate_conc_mark = false; }
+  // Incremental CSet Support
+
+  // The head of the incrementally built collection set.
+  HeapRegion* inc_cset_head() { return _inc_cset_head; }
+
+  // The tail of the incrementally built collection set.
+  HeapRegion* inc_set_tail() { return _inc_cset_tail; }
+
+  // The number of elements in the incrementally built collection set.
+  size_t inc_cset_size() { return _inc_cset_size; }
+
+  // Initialize incremental collection set info.
+  void start_incremental_cset_building();
+
+  void clear_incremental_cset() {
+    _inc_cset_head = NULL;
+    _inc_cset_tail = NULL;
+  }
+
+  // Stop adding regions to the incremental collection set
+  void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
+
+  // Add/remove information about hr to the aggregated information
+  // for the incrementally built collection set.
+  void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
+  void remove_from_incremental_cset_info(HeapRegion* hr);
+
+  // Update information about hr in the aggregated information for
+  // the incrementally built collection set.
+  void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
+
+private:
+  // Update the incremental cset information when adding a region
+  // (should not be called directly).
+  void add_region_to_incremental_cset_common(HeapRegion* hr);
+
+public:
+  // Add hr to the LHS of the incremental collection set.
+  void add_region_to_incremental_cset_lhs(HeapRegion* hr);
+
+  // Add hr to the RHS of the incremental collection set.
+  void add_region_to_incremental_cset_rhs(HeapRegion* hr);
+
+#ifndef PRODUCT
+  void print_collection_set(HeapRegion* list_head, outputStream* st);
+#endif // !PRODUCT
+
+  bool initiate_conc_mark_if_possible()       { return _initiate_conc_mark_if_possible;  }
+  void set_initiate_conc_mark_if_possible()   { _initiate_conc_mark_if_possible = true;  }
+  void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
+
+  bool during_initial_mark_pause()      { return _during_initial_mark_pause;  }
+  void set_during_initial_mark_pause()  { _during_initial_mark_pause = true;  }
+  void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
+
+  // This is called at the very beginning of an evacuation pause (it
+  // has to be the first thing that the pause does). If
+  // initiate_conc_mark_if_possible() is true, and the concurrent
+  // marking thread has completed its work during the previous cycle,
+  // it will set during_initial_mark_pause() to so that the pause does
+  // the initial-mark work and start a marking cycle.
+  void decide_on_conc_mark_initiation();
 
   // If an expansion would be appropriate, because recent GC overhead had
   // exceeded the desired limit, return an amount to expand by.
@@ -1157,7 +1264,7 @@
   // If the estimated is less then desirable, resize if possible.
   void expand_if_possible(size_t numRegions);
 
-  virtual void choose_collection_set();
+  virtual bool choose_collection_set();
   virtual void record_collection_pause_start(double start_time_sec,
                                              size_t start_used);
   virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,12 @@
                                       bool clear_all_softrefs) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 
+  SharedHeap* sh = SharedHeap::heap();
+#ifdef ASSERT
+  if (sh->collector_policy()->should_clear_all_soft_refs()) {
+    assert(clear_all_softrefs, "Policy should have been checked earler");
+  }
+#endif
   // hook up weak ref data so it can be used during Mark-Sweep
   assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
   assert(rp != NULL, "should be non-NULL");
@@ -44,7 +50,6 @@
 
   // Increment the invocation count for the permanent generation, since it is
   // implicitly collected whenever we do a full mark sweep collection.
-  SharedHeap* sh = SharedHeap::heap();
   sh->perm_gen()->stat_record()->invocations++;
 
   bool marked_for_unloading = false;
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,9 +28,6 @@
 
 #define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \
                                                                             \
-  product(intx, G1ParallelGCAllocBufferSize, 8*K,                           \
-          "Size of parallel G1 allocation buffers in to-space.")            \
-                                                                            \
   product(intx, G1ConfidencePercent, 50,                                    \
           "Confidence level for MMU/pause predictions")                     \
                                                                             \
@@ -40,9 +37,6 @@
   develop(bool, G1Gen, true,                                                \
           "If true, it will enable the generational G1")                    \
                                                                             \
-  develop(intx, G1GCPercent, 10,                                            \
-          "The desired percent time spent on GC")                           \
-                                                                            \
   develop(intx, G1PolicyVerbose, 0,                                         \
           "The verbosity level on G1 policy decisions")                     \
                                                                             \
@@ -232,10 +226,6 @@
           "the number of regions for which we'll print a surv rate "        \
           "summary.")                                                       \
                                                                             \
-  develop(bool, G1UseScanOnlyPrefix, false,                                 \
-          "It determines whether the system will calculate an optimum "     \
-          "scan-only set.")                                                 \
-                                                                            \
   product(intx, G1ReservePercent, 10,                                       \
           "It determines the minimum reserve we should have in the heap "   \
           "to minimize the probability of promotion failure.")              \
@@ -270,11 +260,11 @@
   product(uintx, G1HeapRegionSize, 0,                                       \
           "Size of the G1 regions.")                                        \
                                                                             \
-  experimental(bool, G1UseParallelRSetUpdating, false,                      \
+  experimental(bool, G1UseParallelRSetUpdating, true,                       \
           "Enables the parallelization of remembered set updating "         \
           "during evacuation pauses")                                       \
                                                                             \
-  experimental(bool, G1UseParallelRSetScanning, false,                      \
+  experimental(bool, G1UseParallelRSetScanning, true,                       \
           "Enables the parallelization of remembered set scanning "         \
           "during evacuation pauses")                                       \
                                                                             \
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,6 +75,16 @@
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   virtual void do_oop(      oop* p) { do_oop_work(p); }
 
+  void print_object(outputStream* out, oop obj) {
+#ifdef PRODUCT
+    klassOop k = obj->klass();
+    const char* class_name = instanceKlass::cast(k)->external_name();
+    out->print_cr("class name %s", class_name);
+#else // PRODUCT
+    obj->print_on(out);
+#endif // PRODUCT
+  }
+
   template <class T> void do_oop_work(T* p) {
     assert(_containing_obj != NULL, "Precondition");
     assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking),
@@ -90,21 +100,29 @@
           gclog_or_tty->print_cr("----------");
         }
         if (!_g1h->is_in_closed_subset(obj)) {
-          gclog_or_tty->print_cr("Field "PTR_FORMAT
-                        " of live obj "PTR_FORMAT
-                        " points to obj "PTR_FORMAT
-                        " not in the heap.",
-                        p, (void*) _containing_obj, (void*) obj);
-        } else {
+          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
           gclog_or_tty->print_cr("Field "PTR_FORMAT
-                        " of live obj "PTR_FORMAT
-                        " points to dead obj "PTR_FORMAT".",
-                        p, (void*) _containing_obj, (void*) obj);
+                                 " of live obj "PTR_FORMAT" in region "
+                                 "["PTR_FORMAT", "PTR_FORMAT")",
+                                 p, (void*) _containing_obj,
+                                 from->bottom(), from->end());
+          print_object(gclog_or_tty, _containing_obj);
+          gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
+                                 (void*) obj);
+        } else {
+          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
+          HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
+          gclog_or_tty->print_cr("Field "PTR_FORMAT
+                                 " of live obj "PTR_FORMAT" in region "
+                                 "["PTR_FORMAT", "PTR_FORMAT")",
+                                 p, (void*) _containing_obj,
+                                 from->bottom(), from->end());
+          print_object(gclog_or_tty, _containing_obj);
+          gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
+                                 "["PTR_FORMAT", "PTR_FORMAT")",
+                                 (void*) obj, to->bottom(), to->end());
+          print_object(gclog_or_tty, obj);
         }
-        gclog_or_tty->print_cr("Live obj:");
-        _containing_obj->print_on(gclog_or_tty);
-        gclog_or_tty->print_cr("Bad referent:");
-        obj->print_on(gclog_or_tty);
         gclog_or_tty->print_cr("----------");
         _failures = true;
         failed = true;
@@ -432,7 +450,9 @@
     _young_type(NotYoung), _next_young_region(NULL),
     _next_dirty_cards_region(NULL),
     _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
-    _rem_set(NULL), _zfs(NotZeroFilled)
+    _rem_set(NULL), _zfs(NotZeroFilled),
+    _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
+    _predicted_bytes_to_copy(0)
 {
   _orig_end = mr.end();
   // Note that initialize() will set the start of the unmarked area of the
@@ -715,7 +735,7 @@
   else
     st->print("   ");
   if (is_young())
-    st->print(is_scan_only() ? " SO" : (is_survivor() ? " SU" : " Y "));
+    st->print(is_survivor() ? " SU" : " Y ");
   else
     st->print("   ");
   if (is_empty())
@@ -723,6 +743,8 @@
   else
     st->print("  ");
   st->print(" %5d", _gc_time_stamp);
+  st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
+            prev_top_at_mark_start(), next_top_at_mark_start());
   G1OffsetTableContigSpace::print_on(st);
 }
 
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -247,7 +247,6 @@
 
   enum YoungType {
     NotYoung,                   // a region is not young
-    ScanOnly,                   // a region is young and scan-only
     Young,                      // a region is young
     Survivor                    // a region is young and it contains
                                 // survivor
@@ -292,6 +291,20 @@
     _young_type = new_type;
   }
 
+  // Cached attributes used in the collection set policy information
+
+  // The RSet length that was added to the total value
+  // for the collection set.
+  size_t _recorded_rs_length;
+
+  // The predicted elapsed time that was added to total value
+  // for the collection set.
+  double _predicted_elapsed_time_ms;
+
+  // The predicted number of bytes to copy that was added to
+  // the total value for the collection set.
+  size_t _predicted_bytes_to_copy;
+
  public:
   // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
   HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
@@ -614,7 +627,6 @@
   // </PREDICTION>
 
   bool is_young() const     { return _young_type != NotYoung; }
-  bool is_scan_only() const { return _young_type == ScanOnly; }
   bool is_survivor() const  { return _young_type == Survivor; }
 
   int  young_index_in_cset() const { return _young_index_in_cset; }
@@ -629,12 +641,6 @@
     return _surv_rate_group->age_in_group(_age_index);
   }
 
-  void recalculate_age_in_surv_rate_group() {
-    assert( _surv_rate_group != NULL, "pre-condition" );
-    assert( _age_index > -1, "pre-condition" );
-    _age_index = _surv_rate_group->recalculate_age_index(_age_index);
-  }
-
   void record_surv_words_in_group(size_t words_survived) {
     assert( _surv_rate_group != NULL, "pre-condition" );
     assert( _age_index > -1, "pre-condition" );
@@ -676,8 +682,6 @@
 
   void set_young() { set_young_type(Young); }
 
-  void set_scan_only() { set_young_type(ScanOnly); }
-
   void set_survivor() { set_young_type(Survivor); }
 
   void set_not_young() { set_young_type(NotYoung); }
@@ -775,6 +779,22 @@
     _zero_filler = NULL;
   }
 
+  size_t recorded_rs_length() const        { return _recorded_rs_length; }
+  double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
+  size_t predicted_bytes_to_copy() const   { return _predicted_bytes_to_copy; }
+
+  void set_recorded_rs_length(size_t rs_length) {
+    _recorded_rs_length = rs_length;
+  }
+
+  void set_predicted_elapsed_time_ms(double ms) {
+    _predicted_elapsed_time_ms = ms;
+  }
+
+  void set_predicted_bytes_to_copy(size_t bytes) {
+    _predicted_bytes_to_copy = bytes;
+  }
+
 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
   virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
   SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed May 19 10:22:39 2010 -0700
@@ -662,8 +662,6 @@
         prt = PosParPRT::alloc(from_hr);
       }
       prt->init(from_hr);
-      // Record the outgoing pointer in the from_region's outgoing bitmap.
-      from_hr->rem_set()->add_outgoing_reference(hr());
 
       PosParPRT* first_prt = _fine_grain_regions[ind];
       prt->set_next(first_prt);  // XXX Maybe move to init?
@@ -1073,11 +1071,7 @@
 
 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                                    HeapRegion* hr)
-    : _bosa(bosa), _other_regions(hr),
-      _outgoing_region_map(G1CollectedHeap::heap()->max_regions(),
-                           false /* in-resource-area */),
-      _iter_state(Unclaimed)
-{}
+  : _bosa(bosa), _other_regions(hr), _iter_state(Unclaimed) { }
 
 
 void HeapRegionRemSet::setup_remset_size() {
@@ -1148,30 +1142,11 @@
   PosParPRT::par_contract_all();
 }
 
-void HeapRegionRemSet::add_outgoing_reference(HeapRegion* to_hr) {
-  _outgoing_region_map.par_at_put(to_hr->hrs_index(), 1);
-}
-
 void HeapRegionRemSet::clear() {
-  clear_outgoing_entries();
-  _outgoing_region_map.clear();
   _other_regions.clear();
   assert(occupied() == 0, "Should be clear.");
 }
 
-void HeapRegionRemSet::clear_outgoing_entries() {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  size_t i = _outgoing_region_map.get_next_one_offset(0);
-  while (i < _outgoing_region_map.size()) {
-    HeapRegion* to_region = g1h->region_at(i);
-    if (!to_region->in_collection_set()) {
-      to_region->rem_set()->clear_incoming_entry(hr());
-    }
-    i = _outgoing_region_map.get_next_one_offset(i+1);
-  }
-}
-
-
 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
                              BitMap* region_bm, BitMap* card_bm) {
   _other_regions.scrub(ctbs, region_bm, card_bm);
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed May 19 10:22:39 2010 -0700
@@ -179,13 +179,6 @@
 
   OtherRegionsTable _other_regions;
 
-  // One set bit for every region that has an entry for this one.
-  BitMap _outgoing_region_map;
-
-  // Clear entries for the current region in any rem sets named in
-  // the _outgoing_region_map.
-  void clear_outgoing_entries();
-
   enum ParIterState { Unclaimed, Claimed, Complete };
   volatile ParIterState _iter_state;
   volatile jlong _iter_claimed;
@@ -243,10 +236,6 @@
     _other_regions.add_reference(from, tid);
   }
 
-  // Records the fact that the current region contains an outgoing
-  // reference into "to_hr".
-  void add_outgoing_reference(HeapRegion* to_hr);
-
   // Removes any entries shown by the given bitmaps to contain only dead
   // objects.
   void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
--- a/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,6 @@
 void SurvRateGroup::reset()
 {
   _all_regions_allocated = 0;
-  _scan_only_prefix      = 0;
   _setup_seq_num         = 0;
   _stats_arrays_length   = 0;
   _accum_surv_rate       = 0.0;
@@ -74,7 +73,7 @@
 void
 SurvRateGroup::start_adding_regions() {
   _setup_seq_num   = _stats_arrays_length;
-  _region_num      = _scan_only_prefix;
+  _region_num      = 0;
   _accum_surv_rate = 0.0;
 
 #if 0
@@ -164,12 +163,6 @@
 }
 
 void
-SurvRateGroup::record_scan_only_prefix(size_t scan_only_prefix) {
-  guarantee( scan_only_prefix <= _region_num, "pre-condition" );
-  _scan_only_prefix = scan_only_prefix;
-}
-
-void
 SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
   guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num,
              "pre-condition" );
@@ -218,13 +211,12 @@
 #ifndef PRODUCT
 void
 SurvRateGroup::print() {
-  gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries, %d scan-only)",
-                _name, _region_num, _scan_only_prefix);
+  gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries)",
+                _name, _region_num);
   for (size_t i = 0; i < _region_num; ++i) {
-    gclog_or_tty->print_cr("    age %4d   surv rate %6.2lf %%   pred %6.2lf %%%s",
+    gclog_or_tty->print_cr("    age %4d   surv rate %6.2lf %%   pred %6.2lf %%",
                   i, _surv_rate[i] * 100.0,
-                  _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0,
-                  (i < _scan_only_prefix) ? " S-O" : "    ");
+                  _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0);
   }
 }
 
--- a/src/share/vm/gc_implementation/g1/survRateGroup.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/g1/survRateGroup.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,6 @@
 
   int _all_regions_allocated;
   size_t _region_num;
-  size_t _scan_only_prefix;
   size_t _setup_seq_num;
 
 public:
@@ -51,13 +50,11 @@
   void reset();
   void start_adding_regions();
   void stop_adding_regions();
-  void record_scan_only_prefix(size_t scan_only_prefix);
   void record_surviving_words(int age_in_group, size_t surv_words);
   void all_surviving_words_recorded(bool propagate);
   const char* name() { return _name; }
 
   size_t region_num() { return _region_num; }
-  size_t scan_only_length() { return _scan_only_prefix; }
   double accum_surv_rate_pred(int age) {
     assert(age >= 0, "must be");
     if ((size_t)age < _stats_arrays_length)
@@ -82,17 +79,12 @@
 
   int next_age_index();
   int age_in_group(int age_index) {
-    int ret = (int) (_all_regions_allocated -  age_index);
+    int ret = (int) (_all_regions_allocated - age_index);
     assert( ret >= 0, "invariant" );
     return ret;
   }
-  int recalculate_age_index(int age_index) {
-    int new_age_index = (int) _scan_only_prefix - age_in_group(age_index);
-    guarantee( new_age_index >= 0, "invariant" );
-    return new_age_index;
-  }
   void finished_recalculating_age_indexes() {
-    _all_regions_allocated = (int) _scan_only_prefix;
+    _all_regions_allocated = 0;
   }
 
 #ifndef PRODUCT
--- a/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+// Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -161,8 +161,10 @@
 parMarkBitMap.hpp			bitMap.inline.hpp
 parMarkBitMap.hpp			psVirtualspace.hpp
 
+psAdaptiveSizePolicy.cpp                collectorPolicy.hpp
 psAdaptiveSizePolicy.cpp                gcPolicyCounters.hpp
 psAdaptiveSizePolicy.cpp		gcCause.hpp
+psAdaptiveSizePolicy.cpp                generationSizer.hpp
 psAdaptiveSizePolicy.cpp                psAdaptiveSizePolicy.hpp
 psAdaptiveSizePolicy.cpp                psGCAdaptivePolicyCounters.hpp
 psAdaptiveSizePolicy.cpp                psScavenge.hpp
@@ -215,6 +217,7 @@
 psMarkSweep.cpp                         fprofiler.hpp
 psMarkSweep.cpp                         gcCause.hpp
 psMarkSweep.cpp                         gcLocker.inline.hpp
+psMarkSweep.cpp                         generationSizer.hpp
 psMarkSweep.cpp                         isGCActiveMark.hpp
 psMarkSweep.cpp                         oop.inline.hpp
 psMarkSweep.cpp                         memoryService.hpp
@@ -256,6 +259,7 @@
 psParallelCompact.cpp			gcCause.hpp
 psParallelCompact.cpp			gcLocker.inline.hpp
 psParallelCompact.cpp                   gcTaskManager.hpp
+psParallelCompact.cpp                   generationSizer.hpp
 psParallelCompact.cpp			isGCActiveMark.hpp
 psParallelCompact.cpp			management.hpp
 psParallelCompact.cpp			memoryService.hpp
@@ -344,10 +348,12 @@
 psScavenge.cpp                          psAdaptiveSizePolicy.hpp
 psScavenge.cpp                          biasedLocking.hpp
 psScavenge.cpp                          cardTableExtension.hpp
+psScavenge.cpp                          collectorPolicy.hpp
 psScavenge.cpp                          fprofiler.hpp
 psScavenge.cpp                          gcCause.hpp
 psScavenge.cpp                          gcLocker.inline.hpp
 psScavenge.cpp                          gcTaskManager.hpp
+psScavenge.cpp                          generationSizer.hpp
 psScavenge.cpp                          handles.inline.hpp
 psScavenge.cpp                          isGCActiveMark.hpp
 psScavenge.cpp                          oop.inline.hpp
--- a/src/share/vm/gc_implementation/includeDB_gc_serial	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/includeDB_gc_serial	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+// Copyright 2007-2010 Sun Microsystems, Inc.  All Rights Reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //   
 // This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 adaptiveSizePolicy.hpp			universe.hpp
 
 adaptiveSizePolicy.cpp			adaptiveSizePolicy.hpp
+adaptiveSizePolicy.cpp			collectorPolicy.hpp
 adaptiveSizePolicy.cpp			gcCause.hpp
 adaptiveSizePolicy.cpp			ostream.hpp
 adaptiveSizePolicy.cpp			timer.hpp
--- a/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Wed May 19 10:22:39 2010 -0700
@@ -325,7 +325,7 @@
 
     eden_size = align_size_down(eden_size, alignment);
     eden_end = eden_start + eden_size;
-    assert(eden_end >= eden_start, "addition overflowed")
+    assert(eden_end >= eden_start, "addition overflowed");
 
     // To may resize into from space as long as it is clear of live data.
     // From space must remain page aligned, though, so we need to do some
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -892,6 +892,10 @@
     }
     swap_spaces();
 
+    // A successful scavenge should restart the GC time limit count which is
+    // for full GC's.
+    size_policy->reset_gc_overhead_limit_count();
+
     assert(to()->is_empty(), "to space should be empty now");
   } else {
     assert(HandlePromotionFailure,
--- a/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp	Wed May 19 10:22:39 2010 -0700
@@ -326,7 +326,7 @@
     }
 
     eden_end = eden_start + eden_size;
-    assert(eden_end >= eden_start, "addition overflowed")
+    assert(eden_end >= eden_start, "addition overflowed");
 
     // To may resize into from space as long as it is clear of live data.
     // From space must remain page aligned, though, so we need to do some
@@ -413,7 +413,7 @@
                        pointer_delta(to_start, eden_start, sizeof(char)));
     }
     eden_end = eden_start + eden_size;
-    assert(eden_end >= eden_start, "addition overflowed")
+    assert(eden_end >= eden_start, "addition overflowed");
 
     // Don't let eden shrink down to 0 or less.
     eden_end = MAX2(eden_end, eden_start + alignment);
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,15 +54,16 @@
   CollectedHeap::pre_initialize();
 
   // Cannot be initialized until after the flags are parsed
-  GenerationSizer flag_parser;
+  // GenerationSizer flag_parser;
+  _collector_policy = new GenerationSizer();
 
-  size_t yg_min_size = flag_parser.min_young_gen_size();
-  size_t yg_max_size = flag_parser.max_young_gen_size();
-  size_t og_min_size = flag_parser.min_old_gen_size();
-  size_t og_max_size = flag_parser.max_old_gen_size();
+  size_t yg_min_size = _collector_policy->min_young_gen_size();
+  size_t yg_max_size = _collector_policy->max_young_gen_size();
+  size_t og_min_size = _collector_policy->min_old_gen_size();
+  size_t og_max_size = _collector_policy->max_old_gen_size();
   // Why isn't there a min_perm_gen_size()?
-  size_t pg_min_size = flag_parser.perm_gen_size();
-  size_t pg_max_size = flag_parser.max_perm_gen_size();
+  size_t pg_min_size = _collector_policy->perm_gen_size();
+  size_t pg_max_size = _collector_policy->max_perm_gen_size();
 
   trace_gen_sizes("ps heap raw",
                   pg_min_size, pg_max_size,
@@ -89,12 +90,14 @@
   // move to the common code.
   yg_min_size = align_size_up(yg_min_size, yg_align);
   yg_max_size = align_size_up(yg_max_size, yg_align);
-  size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align);
+  size_t yg_cur_size =
+    align_size_up(_collector_policy->young_gen_size(), yg_align);
   yg_cur_size = MAX2(yg_cur_size, yg_min_size);
 
   og_min_size = align_size_up(og_min_size, og_align);
   og_max_size = align_size_up(og_max_size, og_align);
-  size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align);
+  size_t og_cur_size =
+    align_size_up(_collector_policy->old_gen_size(), og_align);
   og_cur_size = MAX2(og_cur_size, og_min_size);
 
   pg_min_size = align_size_up(pg_min_size, pg_align);
@@ -355,6 +358,11 @@
   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 
+  // In general gc_overhead_limit_was_exceeded should be false so
+  // set it so here and reset it to true only if the gc time
+  // limit is being exceeded as checked below.
+  *gc_overhead_limit_was_exceeded = false;
+
   HeapWord* result = young_gen()->allocate(size, is_tlab);
 
   uint loop_count = 0;
@@ -428,24 +436,6 @@
 
     if (result == NULL) {
 
-      // Exit the loop if if the gc time limit has been exceeded.
-      // The allocation must have failed above (result must be NULL),
-      // and the most recent collection must have exceeded the
-      // gc time limit.  Exit the loop so that an out-of-memory
-      // will be thrown (returning a NULL will do that), but
-      // clear gc_time_limit_exceeded so that the next collection
-      // will succeeded if the applications decides to handle the
-      // out-of-memory and tries to go on.
-      *gc_overhead_limit_was_exceeded = size_policy()->gc_time_limit_exceeded();
-      if (size_policy()->gc_time_limit_exceeded()) {
-        size_policy()->set_gc_time_limit_exceeded(false);
-        if (PrintGCDetails && Verbose) {
-        gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
-          "return NULL because gc_time_limit_exceeded is set");
-        }
-        return NULL;
-      }
-
       // Generate a VM operation
       VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
       VMThread::execute(&op);
@@ -463,16 +453,34 @@
           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
           continue;  // retry and/or stall as necessary
         }
-        // If a NULL result is being returned, an out-of-memory
-        // will be thrown now.  Clear the gc_time_limit_exceeded
-        // flag to avoid the following situation.
-        //      gc_time_limit_exceeded is set during a collection
-        //      the collection fails to return enough space and an OOM is thrown
-        //      the next GC is skipped because the gc_time_limit_exceeded
-        //        flag is set and another OOM is thrown
-        if (op.result() == NULL) {
-          size_policy()->set_gc_time_limit_exceeded(false);
+
+        // Exit the loop if the gc time limit has been exceeded.
+        // The allocation must have failed above ("result" guarding
+        // this path is NULL) and the most recent collection has exceeded the
+        // gc overhead limit (although enough may have been collected to
+        // satisfy the allocation).  Exit the loop so that an out-of-memory
+        // will be thrown (return a NULL ignoring the contents of
+        // op.result()),
+        // but clear gc_overhead_limit_exceeded so that the next collection
+        // starts with a clean slate (i.e., forgets about previous overhead
+        // excesses).  Fill op.result() with a filler object so that the
+        // heap remains parsable.
+        const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
+        const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
+        assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
+        if (limit_exceeded && softrefs_clear) {
+          *gc_overhead_limit_was_exceeded = true;
+          size_policy()->set_gc_overhead_limit_exceeded(false);
+          if (PrintGCDetails && Verbose) {
+            gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
+              "return NULL because gc_overhead_limit_exceeded is set");
+          }
+          if (op.result() != NULL) {
+            CollectedHeap::fill_with_object(op.result(), size);
+          }
+          return NULL;
         }
+
         return op.result();
       }
     }
@@ -613,14 +621,15 @@
       // and the most recent collection must have exceeded the
       // gc time limit.  Exit the loop so that an out-of-memory
       // will be thrown (returning a NULL will do that), but
-      // clear gc_time_limit_exceeded so that the next collection
+      // clear gc_overhead_limit_exceeded so that the next collection
       // will succeeded if the applications decides to handle the
       // out-of-memory and tries to go on.
-      if (size_policy()->gc_time_limit_exceeded()) {
-        size_policy()->set_gc_time_limit_exceeded(false);
+      const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
+      if (limit_exceeded) {
+        size_policy()->set_gc_overhead_limit_exceeded(false);
         if (PrintGCDetails && Verbose) {
-        gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: "
-          "return NULL because gc_time_limit_exceeded is set");
+          gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
+            " return NULL because gc_overhead_limit_exceeded is set");
         }
         assert(result == NULL, "Allocation did not fail");
         return NULL;
@@ -643,14 +652,15 @@
           continue;  // retry and/or stall as necessary
         }
         // If a NULL results is being returned, an out-of-memory
-        // will be thrown now.  Clear the gc_time_limit_exceeded
+        // will be thrown now.  Clear the gc_overhead_limit_exceeded
         // flag to avoid the following situation.
-        //      gc_time_limit_exceeded is set during a collection
+        //      gc_overhead_limit_exceeded is set during a collection
         //      the collection fails to return enough space and an OOM is thrown
-        //      the next GC is skipped because the gc_time_limit_exceeded
-        //        flag is set and another OOM is thrown
+        //      a subsequent GC prematurely throws an out-of-memory because
+        //        the gc_overhead_limit_exceeded counts did not start
+        //        again from 0.
         if (op.result() == NULL) {
-          size_policy()->set_gc_time_limit_exceeded(false);
+          size_policy()->reset_gc_overhead_limit_count();
         }
         return op.result();
       }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
 class AdjoiningGenerations;
 class GCTaskManager;
 class PSAdaptiveSizePolicy;
+class GenerationSizer;
+class CollectorPolicy;
 
 class ParallelScavengeHeap : public CollectedHeap {
   friend class VMStructs;
@@ -43,6 +45,8 @@
   size_t _young_gen_alignment;
   size_t _old_gen_alignment;
 
+  GenerationSizer* _collector_policy;
+
   inline size_t set_alignment(size_t& var, size_t val);
 
   // Collection of generations that are adjacent in the
@@ -72,6 +76,9 @@
     return CollectedHeap::ParallelScavengeHeap;
   }
 
+CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
+  // GenerationSizer* collector_policy() const { return _collector_policy; }
+
   static PSYoungGen* young_gen()     { return _young_gen; }
   static PSOldGen* old_gen()         { return _old_gen; }
   static PSPermGen* perm_gen()       { return _perm_gen; }
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -184,18 +184,19 @@
   set_change_young_gen_for_maj_pauses(0);
 }
 
-
 // If this is not a full GC, only test and modify the young generation.
 
-void PSAdaptiveSizePolicy::compute_generation_free_space(size_t young_live,
-                                               size_t eden_live,
-                                               size_t old_live,
-                                               size_t perm_live,
-                                               size_t cur_eden,
-                                               size_t max_old_gen_size,
-                                               size_t max_eden_size,
-                                               bool   is_full_gc,
-                                               GCCause::Cause gc_cause) {
+void PSAdaptiveSizePolicy::compute_generation_free_space(
+                                           size_t young_live,
+                                           size_t eden_live,
+                                           size_t old_live,
+                                           size_t perm_live,
+                                           size_t cur_eden,
+                                           size_t max_old_gen_size,
+                                           size_t max_eden_size,
+                                           bool   is_full_gc,
+                                           GCCause::Cause gc_cause,
+                                           CollectorPolicy* collector_policy) {
 
   // Update statistics
   // Time statistics are updated as we go, update footprint stats here
@@ -380,91 +381,16 @@
   // Is too much time being spent in GC?
   //   Is the heap trying to grow beyond it's limits?
 
-  const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
+  const size_t free_in_old_gen =
+    (size_t)(max_old_gen_size - avg_old_live()->average());
   if (desired_promo_size > free_in_old_gen && desired_eden_size > eden_limit) {
-
-    // eden_limit is the upper limit on the size of eden based on
-    // the maximum size of the young generation and the sizes
-    // of the survivor space.
-    // The question being asked is whether the gc costs are high
-    // and the space being recovered by a collection is low.
-    // free_in_young_gen is the free space in the young generation
-    // after a collection and promo_live is the free space in the old
-    // generation after a collection.
-    //
-    // Use the minimum of the current value of the live in the
-    // young gen or the average of the live in the young gen.
-    // If the current value drops quickly, that should be taken
-    // into account (i.e., don't trigger if the amount of free
-    // space has suddenly jumped up).  If the current is much
-    // higher than the average, use the average since it represents
-    // the longer term behavor.
-    const size_t live_in_eden = MIN2(eden_live, (size_t) avg_eden_live()->average());
-    const size_t free_in_eden = eden_limit > live_in_eden ?
-      eden_limit - live_in_eden : 0;
-    const size_t total_free_limit = free_in_old_gen + free_in_eden;
-    const size_t total_mem = max_old_gen_size + max_eden_size;
-    const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
-    if (PrintAdaptiveSizePolicy && (Verbose ||
-        (total_free_limit < (size_t) mem_free_limit))) {
-      gclog_or_tty->print_cr(
-            "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
-            " promo_limit: " SIZE_FORMAT
-            " eden_limit: " SIZE_FORMAT
-            " total_free_limit: " SIZE_FORMAT
-            " max_old_gen_size: " SIZE_FORMAT
-            " max_eden_size: " SIZE_FORMAT
-            " mem_free_limit: " SIZE_FORMAT,
-            promo_limit, eden_limit, total_free_limit,
-            max_old_gen_size, max_eden_size,
-            (size_t) mem_free_limit);
-    }
-
-    if (is_full_gc) {
-      if (gc_cost() > gc_cost_limit &&
-        total_free_limit < (size_t) mem_free_limit) {
-        // Collections, on average, are taking too much time, and
-        //      gc_cost() > gc_cost_limit
-        // we have too little space available after a full gc.
-        //      total_free_limit < mem_free_limit
-        // where
-        //   total_free_limit is the free space available in
-        //     both generations
-        //   total_mem is the total space available for allocation
-        //     in both generations (survivor spaces are not included
-        //     just as they are not included in eden_limit).
-        //   mem_free_limit is a fraction of total_mem judged to be an
-        //     acceptable amount that is still unused.
-        // The heap can ask for the value of this variable when deciding
-        // whether to thrown an OutOfMemory error.
-        // Note that the gc time limit test only works for the collections
-        // of the young gen + tenured gen and not for collections of the
-        // permanent gen.  That is because the calculation of the space
-        // freed by the collection is the free space in the young gen +
-        // tenured gen.
-        // Ignore explicit GC's. Ignoring explicit GC's at this level
-        // is the equivalent of the GC did not happen as far as the
-        // overhead calculation is concerted (i.e., the flag is not set
-        // and the count is not affected).  Also the average will not
-        // have been updated unless UseAdaptiveSizePolicyWithSystemGC is on.
-        if (!GCCause::is_user_requested_gc(gc_cause) &&
-            !GCCause::is_serviceability_requested_gc(gc_cause)) {
-          inc_gc_time_limit_count();
-          if (UseGCOverheadLimit &&
-              (gc_time_limit_count() > AdaptiveSizePolicyGCTimeLimitThreshold)){
-            // All conditions have been met for throwing an out-of-memory
-            _gc_time_limit_exceeded = true;
-            // Avoid consecutive OOM due to the gc time limit by resetting
-            // the counter.
-            reset_gc_time_limit_count();
-          }
-          _print_gc_time_limit_would_be_exceeded = true;
-        }
-      } else {
-        // Did not exceed overhead limits
-        reset_gc_time_limit_count();
-      }
-    }
+    check_gc_overhead_limit(young_live,
+                            eden_live,
+                            max_old_gen_size,
+                            max_eden_size,
+                            is_full_gc,
+                            gc_cause,
+                            collector_policy);
   }
 
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,7 @@
 
 // Forward decls
 class elapsedTimer;
+class GenerationSizer;
 
 class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
  friend class PSGCAdaptivePolicyCounters;
@@ -340,7 +341,8 @@
                                      size_t max_old_gen_size,
                                      size_t max_eden_size,
                                      bool   is_full_gc,
-                                     GCCause::Cause gc_cause);
+                                     GCCause::Cause gc_cause,
+                                     CollectorPolicy* collector_policy);
 
   // Calculates new survivor space size;  returns a new tenuring threshold
   // value. Stores new survivor size in _survivor_size.
--- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -117,11 +117,13 @@
       PerfData::U_Bytes, (jlong) ps_size_policy()->avg_base_footprint()->average(), CHECK);
 
     cname = PerfDataManager::counter_name(name_space(), "gcTimeLimitExceeded");
-    _gc_time_limit_exceeded = PerfDataManager::create_variable(SUN_GC, cname,
-      PerfData::U_Events, ps_size_policy()->gc_time_limit_exceeded(), CHECK);
+    _gc_overhead_limit_exceeded_counter =
+      PerfDataManager::create_variable(SUN_GC, cname,
+      PerfData::U_Events, ps_size_policy()->gc_overhead_limit_exceeded(), CHECK);
 
     cname = PerfDataManager::counter_name(name_space(), "liveAtLastFullGc");
-    _live_at_last_full_gc = PerfDataManager::create_variable(SUN_GC, cname,
+    _live_at_last_full_gc_counter =
+      PerfDataManager::create_variable(SUN_GC, cname,
       PerfData::U_Bytes, ps_size_policy()->live_at_last_full_gc(), CHECK);
 
     cname = PerfDataManager::counter_name(name_space(), "majorPauseOldSlope");
@@ -189,6 +191,8 @@
     update_minor_pause_old_slope();
     update_major_pause_young_slope();
     update_minor_collection_slope_counter();
+    update_gc_overhead_limit_exceeded_counter();
+    update_live_at_last_full_gc_counter();
   }
 }
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,8 +44,8 @@
   PerfVariable* _live_space;
   PerfVariable* _free_space;
   PerfVariable* _avg_base_footprint;
-  PerfVariable* _gc_time_limit_exceeded;
-  PerfVariable* _live_at_last_full_gc;
+  PerfVariable* _gc_overhead_limit_exceeded_counter;
+  PerfVariable* _live_at_last_full_gc_counter;
   PerfVariable* _old_capacity;
   PerfVariable* _boundary_moved;
 
@@ -169,6 +169,14 @@
       (jlong)(ps_size_policy()->major_pause_young_slope() * 1000)
     );
   }
+  inline void update_gc_overhead_limit_exceeded_counter() {
+    _gc_overhead_limit_exceeded_counter->set_value(
+      (jlong) ps_size_policy()->gc_overhead_limit_exceeded());
+  }
+  inline void update_live_at_last_full_gc_counter() {
+    _live_at_last_full_gc_counter->set_value(
+      (jlong)(ps_size_policy()->live_at_last_full_gc()));
+  }
 
   inline void update_scavenge_skipped(int cause) {
     _scavenge_skipped->set_value(cause);
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,6 +46,12 @@
 //
 // Note that this method should only be called from the vm_thread while
 // at a safepoint!
+//
+// Note that the all_soft_refs_clear flag in the collector policy
+// may be true because this method can be called without intervening
+// activity.  For example when the heap space is tight and full measure
+// are being taken to free space.
+
 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
@@ -54,24 +60,18 @@
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   GCCause::Cause gc_cause = heap->gc_cause();
   PSAdaptiveSizePolicy* policy = heap->size_policy();
+  IsGCActiveMark mark;
 
-  // Before each allocation/collection attempt, find out from the
-  // policy object if GCs are, on the whole, taking too long. If so,
-  // bail out without attempting a collection.  The exceptions are
-  // for explicitly requested GC's.
-  if (!policy->gc_time_limit_exceeded() ||
-      GCCause::is_user_requested_gc(gc_cause) ||
-      GCCause::is_serviceability_requested_gc(gc_cause)) {
-    IsGCActiveMark mark;
+  if (ScavengeBeforeFullGC) {
+    PSScavenge::invoke_no_policy();
+  }
 
-    if (ScavengeBeforeFullGC) {
-      PSScavenge::invoke_no_policy();
-    }
+  const bool clear_all_soft_refs =
+    heap->collector_policy()->should_clear_all_soft_refs();
 
-    int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
-    IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
-    PSMarkSweep::invoke_no_policy(maximum_heap_compaction);
-  }
+  int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
+  IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
+  PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
 }
 
 // This method contains no policy. You should probably
@@ -89,6 +89,10 @@
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 
+  // The scope of casr should end after code that can change
+  // CollectorPolicy::_should_clear_all_soft_refs.
+  ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
+
   PSYoungGen* young_gen = heap->young_gen();
   PSOldGen* old_gen = heap->old_gen();
   PSPermGen* perm_gen = heap->perm_gen();
@@ -275,7 +279,8 @@
                                  old_gen->max_gen_size(),
                                  max_eden_size,
                                  true /* full gc*/,
-                                 gc_cause);
+                                 gc_cause,
+                                 heap->collector_policy());
 
         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
 
@@ -326,19 +331,6 @@
     // Track memory usage and detect low memory
     MemoryService::track_memory_usage();
     heap->update_counters();
-
-    if (PrintGCDetails) {
-      if (size_policy->print_gc_time_limit_would_be_exceeded()) {
-        if (size_policy->gc_time_limit_exceeded()) {
-          gclog_or_tty->print_cr("      GC time is exceeding GCTimeLimit "
-            "of %d%%", GCTimeLimit);
-        } else {
-          gclog_or_tty->print_cr("      GC time would exceed GCTimeLimit "
-            "of %d%%", GCTimeLimit);
-        }
-      }
-      size_policy->set_print_gc_time_limit_would_be_exceeded(false);
-    }
   }
 
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Wed May 19 10:22:39 2010 -0700
@@ -65,7 +65,7 @@
   // and releasing the heap lock, which is held during gc's anyway. This method is not
   // safe for use at the same time as allocate_noexpand()!
   HeapWord* cas_allocate_noexpand(size_t word_size) {
-    assert(SafepointSynchronize::is_at_safepoint(), "Must only be called at safepoint")
+    assert(SafepointSynchronize::is_at_safepoint(), "Must only be called at safepoint");
     HeapWord* res = object_space()->cas_allocate(word_size);
     if (res != NULL) {
       _start_array.allocate_block(res);
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1923,31 +1923,32 @@
 //
 // Note that this method should only be called from the vm_thread while at a
 // safepoint.
+//
+// Note that the all_soft_refs_clear flag in the collector policy
+// may be true because this method can be called without intervening
+// activity.  For example when the heap space is tight and full measure
+// are being taken to free space.
 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
          "should be in vm thread");
+
   ParallelScavengeHeap* heap = gc_heap();
   GCCause::Cause gc_cause = heap->gc_cause();
   assert(!heap->is_gc_active(), "not reentrant");
 
   PSAdaptiveSizePolicy* policy = heap->size_policy();
-
-  // Before each allocation/collection attempt, find out from the
-  // policy object if GCs are, on the whole, taking too long. If so,
-  // bail out without attempting a collection.  The exceptions are
-  // for explicitly requested GC's.
-  if (!policy->gc_time_limit_exceeded() ||
-      GCCause::is_user_requested_gc(gc_cause) ||
-      GCCause::is_serviceability_requested_gc(gc_cause)) {
-    IsGCActiveMark mark;
-
-    if (ScavengeBeforeFullGC) {
-      PSScavenge::invoke_no_policy();
-    }
-
-    PSParallelCompact::invoke_no_policy(maximum_heap_compaction);
+  IsGCActiveMark mark;
+
+  if (ScavengeBeforeFullGC) {
+    PSScavenge::invoke_no_policy();
   }
+
+  const bool clear_all_soft_refs =
+    heap->collector_policy()->should_clear_all_soft_refs();
+
+  PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
+                                      maximum_heap_compaction);
 }
 
 bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
@@ -1976,6 +1977,11 @@
   PSPermGen* perm_gen = heap->perm_gen();
   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 
+  // The scope of casr should end after code that can change
+  // CollectorPolicy::_should_clear_all_soft_refs.
+  ClearedAllSoftRefs casr(maximum_heap_compaction,
+                          heap->collector_policy());
+
   if (ZapUnusedHeapArea) {
     // Save information needed to minimize mangling
     heap->record_gen_tops_before_GC();
@@ -2109,7 +2115,8 @@
                               old_gen->max_gen_size(),
                               max_eden_size,
                               true /* full gc*/,
-                              gc_cause);
+                              gc_cause,
+                              heap->collector_policy());
 
         heap->resize_old_gen(
           size_policy->calculated_old_free_size_in_bytes());
@@ -2157,19 +2164,6 @@
     // Track memory usage and detect low memory
     MemoryService::track_memory_usage();
     heap->update_counters();
-
-    if (PrintGCDetails) {
-      if (size_policy->print_gc_time_limit_would_be_exceeded()) {
-        if (size_policy->gc_time_limit_exceeded()) {
-          gclog_or_tty->print_cr("      GC time is exceeding GCTimeLimit "
-            "of %d%%", GCTimeLimit);
-        } else {
-          gclog_or_tty->print_cr("      GC time would exceed GCTimeLimit "
-            "of %d%%", GCTimeLimit);
-        }
-      }
-      size_policy->set_print_gc_time_limit_would_be_exceeded(false);
-    }
   }
 
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
@@ -3283,7 +3277,7 @@
     if (status == ParMarkBitMap::incomplete) {
       // The last obj that starts in the source region does not end in the
       // region.
-      assert(closure.source() < end_addr, "sanity")
+      assert(closure.source() < end_addr, "sanity");
       HeapWord* const obj_beg = closure.source();
       HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
                                        src_space_top);
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -187,8 +187,7 @@
 //
 // Note that this method should only be called from the vm_thread while
 // at a safepoint!
-void PSScavenge::invoke()
-{
+void PSScavenge::invoke() {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   assert(!Universe::heap()->is_gc_active(), "not reentrant");
@@ -197,29 +196,25 @@
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 
   PSAdaptiveSizePolicy* policy = heap->size_policy();
+  IsGCActiveMark mark;
 
-  // Before each allocation/collection attempt, find out from the
-  // policy object if GCs are, on the whole, taking too long. If so,
-  // bail out without attempting a collection.
-  if (!policy->gc_time_limit_exceeded()) {
-    IsGCActiveMark mark;
+  bool scavenge_was_done = PSScavenge::invoke_no_policy();
 
-    bool scavenge_was_done = PSScavenge::invoke_no_policy();
-
-    PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
+  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
+  if (UsePerfData)
+    counters->update_full_follows_scavenge(0);
+  if (!scavenge_was_done ||
+      policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
     if (UsePerfData)
-      counters->update_full_follows_scavenge(0);
-    if (!scavenge_was_done ||
-        policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
-      if (UsePerfData)
-        counters->update_full_follows_scavenge(full_follows_scavenge);
+      counters->update_full_follows_scavenge(full_follows_scavenge);
+    GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
+    CollectorPolicy* cp = heap->collector_policy();
+    const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
 
-      GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
-      if (UseParallelOldGC) {
-        PSParallelCompact::invoke_no_policy(false);
-      } else {
-        PSMarkSweep::invoke_no_policy(false);
-      }
+    if (UseParallelOldGC) {
+      PSParallelCompact::invoke_no_policy(clear_all_softrefs);
+    } else {
+      PSMarkSweep::invoke_no_policy(clear_all_softrefs);
     }
   }
 }
@@ -447,6 +442,9 @@
       size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
       size_policy->update_averages(_survivor_overflow, survived, promoted);
 
+      // A successful scavenge should restart the GC time limit count which is
+      // for full GC's.
+      size_policy->reset_gc_overhead_limit_count();
       if (UseAdaptiveSizePolicy) {
         // Calculate the new survivor size and tenuring threshold
 
@@ -523,7 +521,8 @@
                                    old_gen->max_gen_size(),
                                    max_eden_size,
                                    false  /* full gc*/,
-                                   gc_cause);
+                                   gc_cause,
+                                   heap->collector_policy());
 
         }
         // Resize the young generation at every collection
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Wed May 19 10:22:39 2010 -0700
@@ -521,7 +521,7 @@
     }
 
     eden_end = eden_start + eden_size;
-    assert(eden_end >= eden_start, "addition overflowed")
+    assert(eden_end >= eden_start, "addition overflowed");
 
     // To may resize into from space as long as it is clear of live data.
     // From space must remain page aligned, though, so we need to do some
@@ -605,7 +605,7 @@
                        pointer_delta(to_start, eden_start, sizeof(char)));
     }
     eden_end = eden_start + eden_size;
-    assert(eden_end >= eden_start, "addition overflowed")
+    assert(eden_end >= eden_start, "addition overflowed");
 
     // Could choose to not let eden shrink
     // to_start = MAX2(to_start, eden_end);
--- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2004-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2004-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,13 +44,15 @@
     _survivor_size(init_survivor_size),
     _gc_pause_goal_sec(gc_pause_goal_sec),
     _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
-    _gc_time_limit_exceeded(false),
-    _print_gc_time_limit_would_be_exceeded(false),
-    _gc_time_limit_count(0),
+    _gc_overhead_limit_exceeded(false),
+    _print_gc_overhead_limit_would_be_exceeded(false),
+    _gc_overhead_limit_count(0),
     _latest_minor_mutator_interval_seconds(0),
     _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
     _young_gen_change_for_minor_throughput(0),
     _old_gen_change_for_major_throughput(0) {
+  assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
+    "No opportunity to clear SoftReferences before GC overhead limit");
   _avg_minor_pause    =
     new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
   _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
@@ -278,6 +280,147 @@
   set_decide_at_full_gc(0);
 }
 
+void AdaptiveSizePolicy::check_gc_overhead_limit(
+                                          size_t young_live,
+                                          size_t eden_live,
+                                          size_t max_old_gen_size,
+                                          size_t max_eden_size,
+                                          bool   is_full_gc,
+                                          GCCause::Cause gc_cause,
+                                          CollectorPolicy* collector_policy) {
+
+  // Ignore explicit GC's.  Exiting here does not set the flag and
+  // does not reset the count.  Updating of the averages for system
+  // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
+  if (GCCause::is_user_requested_gc(gc_cause) ||
+      GCCause::is_serviceability_requested_gc(gc_cause)) {
+    return;
+  }
+  // eden_limit is the upper limit on the size of eden based on
+  // the maximum size of the young generation and the sizes
+  // of the survivor space.
+  // The question being asked is whether the gc costs are high
+  // and the space being recovered by a collection is low.
+  // free_in_young_gen is the free space in the young generation
+  // after a collection and promo_live is the free space in the old
+  // generation after a collection.
+  //
+  // Use the minimum of the current value of the live in the
+  // young gen or the average of the live in the young gen.
+  // If the current value drops quickly, that should be taken
+  // into account (i.e., don't trigger if the amount of free
+  // space has suddenly jumped up).  If the current is much
+  // higher than the average, use the average since it represents
+  // the longer term behavor.
+  const size_t live_in_eden =
+    MIN2(eden_live, (size_t) avg_eden_live()->average());
+  const size_t free_in_eden = max_eden_size > live_in_eden ?
+    max_eden_size - live_in_eden : 0;
+  const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
+  const size_t total_free_limit = free_in_old_gen + free_in_eden;
+  const size_t total_mem = max_old_gen_size + max_eden_size;
+  const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
+  const double mem_free_old_limit = max_old_gen_size * (GCHeapFreeLimit/100.0);
+  const double mem_free_eden_limit = max_eden_size * (GCHeapFreeLimit/100.0);
+  const double gc_cost_limit = GCTimeLimit/100.0;
+  size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
+  // But don't force a promo size below the current promo size. Otherwise,
+  // the promo size will shrink for no good reason.
+  promo_limit = MAX2(promo_limit, _promo_size);
+
+
+  if (PrintAdaptiveSizePolicy && (Verbose ||
+      (free_in_old_gen < (size_t) mem_free_old_limit &&
+       free_in_eden < (size_t) mem_free_eden_limit))) {
+    gclog_or_tty->print_cr(
+          "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
+          " promo_limit: " SIZE_FORMAT
+          " max_eden_size: " SIZE_FORMAT
+          " total_free_limit: " SIZE_FORMAT
+          " max_old_gen_size: " SIZE_FORMAT
+          " max_eden_size: " SIZE_FORMAT
+          " mem_free_limit: " SIZE_FORMAT,
+          promo_limit, max_eden_size, total_free_limit,
+          max_old_gen_size, max_eden_size,
+          (size_t) mem_free_limit);
+  }
+
+  bool print_gc_overhead_limit_would_be_exceeded = false;
+  if (is_full_gc) {
+    if (gc_cost() > gc_cost_limit &&
+      free_in_old_gen < (size_t) mem_free_old_limit &&
+      free_in_eden < (size_t) mem_free_eden_limit) {
+      // Collections, on average, are taking too much time, and
+      //      gc_cost() > gc_cost_limit
+      // we have too little space available after a full gc.
+      //      total_free_limit < mem_free_limit
+      // where
+      //   total_free_limit is the free space available in
+      //     both generations
+      //   total_mem is the total space available for allocation
+      //     in both generations (survivor spaces are not included
+      //     just as they are not included in eden_limit).
+      //   mem_free_limit is a fraction of total_mem judged to be an
+      //     acceptable amount that is still unused.
+      // The heap can ask for the value of this variable when deciding
+      // whether to thrown an OutOfMemory error.
+      // Note that the gc time limit test only works for the collections
+      // of the young gen + tenured gen and not for collections of the
+      // permanent gen.  That is because the calculation of the space
+      // freed by the collection is the free space in the young gen +
+      // tenured gen.
+      // At this point the GC overhead limit is being exceeded.
+      inc_gc_overhead_limit_count();
+      if (UseGCOverheadLimit) {
+        if (gc_overhead_limit_count() >=
+            AdaptiveSizePolicyGCTimeLimitThreshold){
+          // All conditions have been met for throwing an out-of-memory
+          set_gc_overhead_limit_exceeded(true);
+          // Avoid consecutive OOM due to the gc time limit by resetting
+          // the counter.
+          reset_gc_overhead_limit_count();
+        } else {
+          // The required consecutive collections which exceed the
+          // GC time limit may or may not have been reached. We
+          // are approaching that condition and so as not to
+          // throw an out-of-memory before all SoftRef's have been
+          // cleared, set _should_clear_all_soft_refs in CollectorPolicy.
+          // The clearing will be done on the next GC.
+          bool near_limit = gc_overhead_limit_near();
+          if (near_limit) {
+            collector_policy->set_should_clear_all_soft_refs(true);
+            if (PrintGCDetails && Verbose) {
+              gclog_or_tty->print_cr("  Nearing GC overhead limit, "
+                "will be clearing all SoftReference");
+            }
+          }
+        }
+      }
+      // Set this even when the overhead limit will not
+      // cause an out-of-memory.  Diagnostic message indicating
+      // that the overhead limit is being exceeded is sometimes
+      // printed.
+      print_gc_overhead_limit_would_be_exceeded = true;
+
+    } else {
+      // Did not exceed overhead limits
+      reset_gc_overhead_limit_count();
+    }
+  }
+
+  if (UseGCOverheadLimit && PrintGCDetails && Verbose) {
+    if (gc_overhead_limit_exceeded()) {
+      gclog_or_tty->print_cr("      GC is exceeding overhead limit "
+        "of %d%%", GCTimeLimit);
+      reset_gc_overhead_limit_count();
+    } else if (print_gc_overhead_limit_would_be_exceeded) {
+      assert(gc_overhead_limit_count() > 0, "Should not be printing");
+      gclog_or_tty->print_cr("      GC would exceed overhead limit "
+        "of %d%% %d consecutive time(s)",
+        GCTimeLimit, gc_overhead_limit_count());
+    }
+  }
+}
 // Printing
 
 bool AdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) const {
--- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2004-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2004-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 
 // Forward decls
 class elapsedTimer;
+class CollectorPolicy;
 
 class AdaptiveSizePolicy : public CHeapObj {
  friend class GCAdaptivePolicyCounters;
@@ -75,13 +76,16 @@
 
   // This is a hint for the heap:  we've detected that gc times
   // are taking longer than GCTimeLimit allows.
-  bool _gc_time_limit_exceeded;
-  // Use for diagnostics only.  If UseGCTimeLimit is false,
+  bool _gc_overhead_limit_exceeded;
+  // Use for diagnostics only.  If UseGCOverheadLimit is false,
   // this variable is still set.
-  bool _print_gc_time_limit_would_be_exceeded;
+  bool _print_gc_overhead_limit_would_be_exceeded;
   // Count of consecutive GC that have exceeded the
   // GC time limit criterion.
-  uint _gc_time_limit_count;
+  uint _gc_overhead_limit_count;
+  // This flag signals that GCTimeLimit is being exceeded
+  // but may not have done so for the required number of consequetive
+  // collections.
 
   // Minor collection timers used to determine both
   // pause and interval times for collections.
@@ -406,22 +410,21 @@
   // Most heaps will choose to throw an OutOfMemoryError when
   // this occurs but it is up to the heap to request this information
   // of the policy
-  bool gc_time_limit_exceeded() {
-    return _gc_time_limit_exceeded;
-  }
-  void set_gc_time_limit_exceeded(bool v) {
-    _gc_time_limit_exceeded = v;
+  bool gc_overhead_limit_exceeded() {
+    return _gc_overhead_limit_exceeded;
   }
-  bool print_gc_time_limit_would_be_exceeded() {
-    return _print_gc_time_limit_would_be_exceeded;
-  }
-  void set_print_gc_time_limit_would_be_exceeded(bool v) {
-    _print_gc_time_limit_would_be_exceeded = v;
+  void set_gc_overhead_limit_exceeded(bool v) {
+    _gc_overhead_limit_exceeded = v;
   }
 
-  uint gc_time_limit_count() { return _gc_time_limit_count; }
-  void reset_gc_time_limit_count() { _gc_time_limit_count = 0; }
-  void inc_gc_time_limit_count() { _gc_time_limit_count++; }
+  // Tests conditions indicate the GC overhead limit is being approached.
+  bool gc_overhead_limit_near() {
+    return gc_overhead_limit_count() >=
+        (AdaptiveSizePolicyGCTimeLimitThreshold - 1);
+  }
+  uint gc_overhead_limit_count() { return _gc_overhead_limit_count; }
+  void reset_gc_overhead_limit_count() { _gc_overhead_limit_count = 0; }
+  void inc_gc_overhead_limit_count() { _gc_overhead_limit_count++; }
   // accessors for flags recording the decisions to resize the
   // generations to meet the pause goal.
 
@@ -436,6 +439,16 @@
   int decide_at_full_gc() { return _decide_at_full_gc; }
   void set_decide_at_full_gc(int v) { _decide_at_full_gc = v; }
 
+  // Check the conditions for an out-of-memory due to excessive GC time.
+  // Set _gc_overhead_limit_exceeded if all the conditions have been met.
+  void check_gc_overhead_limit(size_t young_live,
+                               size_t eden_live,
+                               size_t max_old_gen_size,
+                               size_t max_eden_size,
+                               bool   is_full_gc,
+                               GCCause::Cause gc_cause,
+                               CollectorPolicy* collector_policy);
+
   // Printing support
   virtual bool print_adaptive_size_policy_on(outputStream* st) const;
   bool print_adaptive_size_policy_on(outputStream* st, int
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Wed May 19 10:22:39 2010 -0700
@@ -115,11 +115,25 @@
 void VM_GC_HeapInspection::doit() {
   HandleMark hm;
   CollectedHeap* ch = Universe::heap();
+  ch->ensure_parsability(false); // must happen, even if collection does
+                                 // not happen (e.g. due to GC_locker)
   if (_full_gc) {
-    ch->collect_as_vm_thread(GCCause::_heap_inspection);
-  } else {
-    // make the heap parsable (no need to retire TLABs)
-    ch->ensure_parsability(false);
+    // The collection attempt below would be skipped anyway if
+    // the gc locker is held. The following dump may then be a tad
+    // misleading to someone expecting only live objects to show
+    // up in the dump (see CR 6944195). Just issue a suitable warning
+    // in that case and do not attempt to do a collection.
+    // The latter is a subtle point, because even a failed attempt
+    // to GC will, in fact, induce one in the future, which we
+    // probably want to avoid in this case because the GC that we may
+    // be about to attempt holds value for us only
+    // if it happens now and not if it happens in the eventual
+    // future.
+    if (GC_locker::is_active()) {
+      warning("GC locker is held; pre-dump GC was skipped");
+    } else {
+      ch->collect_as_vm_thread(GCCause::_heap_inspection);
+    }
   }
   HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */);
 }
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2005-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -89,8 +89,19 @@
     if (full) {
       _full_gc_count_before = full_gc_count_before;
     }
+    // In ParallelScavengeHeap::mem_allocate() collections can be
+    // executed within a loop and _all_soft_refs_clear can be set
+    // true after they have been cleared by a collection and another
+    // collection started so that _all_soft_refs_clear can be true
+    // when this collection is started.  Don't assert that
+    // _all_soft_refs_clear have to be false here even though
+    // mutators have run.  Soft refs will be cleared again in this
+    // collection.
   }
-  ~VM_GC_Operation() {}
+  ~VM_GC_Operation() {
+    CollectedHeap* ch = Universe::heap();
+    ch->collector_policy()->set_all_soft_refs_clear(false);
+  }
 
   // Acquire the reference synchronization lock
   virtual bool doit_prologue();
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
 class ThreadClosure;
 class AdaptiveSizePolicy;
 class Thread;
+class CollectorPolicy;
 
 //
 // CollectedHeap
@@ -506,6 +507,9 @@
   // Return the AdaptiveSizePolicy for the heap.
   virtual AdaptiveSizePolicy* size_policy() = 0;
 
+  // Return the CollectorPolicy for the heap
+  virtual CollectorPolicy* collector_policy() const = 0;
+
   // Iterate over all the ref-containing fields of all objects, calling
   // "cl.do_oop" on each. This includes objects in permanent memory.
   virtual void oop_iterate(OopClosure* cl) = 0;
--- a/src/share/vm/includeDB_core	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/includeDB_core	Wed May 19 10:22:39 2010 -0700
@@ -2867,6 +2867,7 @@
 methodHandles.hpp                       globals.hpp
 methodHandles.hpp                       interfaceSupport.hpp
 methodHandles.hpp                       javaClasses.hpp
+methodHandles.hpp                       no_precompiled_headers
 methodHandles.hpp                       vmSymbols.hpp
 
 methodHandles.cpp                       allocation.inline.hpp
@@ -2930,6 +2931,7 @@
 methodOop.cpp                           jvmtiExport.hpp
 methodOop.cpp                           klassOop.hpp
 methodOop.cpp                           methodDataOop.hpp
+methodOop.cpp                           methodHandleWalk.hpp
 methodOop.cpp                           methodOop.hpp
 methodOop.cpp                           nativeLookup.hpp
 methodOop.cpp                           oop.inline.hpp
@@ -4075,6 +4077,7 @@
 systemDictionary.cpp                    klass.inline.hpp
 systemDictionary.cpp                    loaderConstraints.hpp
 systemDictionary.cpp                    methodDataOop.hpp
+systemDictionary.cpp                    methodHandles.hpp
 systemDictionary.cpp                    mutexLocker.hpp
 systemDictionary.cpp                    objArrayKlass.hpp
 systemDictionary.cpp                    oop.inline.hpp
--- a/src/share/vm/includeDB_zero	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/includeDB_zero	Wed May 19 10:22:39 2010 -0700
@@ -1,6 +1,6 @@
 //
 // Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
-// Copyright 2009 Red Hat, Inc.
+// Copyright 2009, 2010 Red Hat, Inc.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
 
 // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
 
+cppInterpreter_<arch>.cpp               stack_<arch>.inline.hpp
+
 entryFrame_<arch>.hpp                   javaCalls.hpp
 entryFrame_<arch>.hpp                   stack_<arch>.hpp
 
@@ -47,9 +49,20 @@
 interpreterFrame_<arch>.hpp             stack_<arch>.hpp
 interpreterFrame_<arch>.hpp             thread.hpp
 
+interpreterRT_<arch>.cpp                stack_<arch>.inline.hpp
+
 sharkFrame_<arch>.hpp                   methodOop.hpp
 sharkFrame_<arch>.hpp                   stack_<arch>.hpp
 
 stack_<arch>.hpp                        sizes.hpp
 
+stack_<arch>.inline.hpp                 stack_<arch>.hpp
+stack_<arch>.inline.hpp                 thread.hpp
+
+stack_<arch>.cpp                        interpreterRuntime.hpp
+stack_<arch>.cpp                        stack_<arch>.hpp
+stack_<arch>.cpp                        stack_<arch>.inline.hpp
+
+stubGenerator_<arch>.cpp                stack_<arch>.inline.hpp
+
 thread.hpp                              stack_<arch>.hpp
--- a/src/share/vm/interpreter/abstractInterpreter.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp	Wed May 19 10:22:39 2010 -0700
@@ -167,60 +167,15 @@
   // Debugging/printing
   static void       print();                                    // prints the interpreter code
 
-  // Support for Tagged Stacks
-  //
-  // Tags are stored on the Java Expression stack above the value:
-  //
-  //  tag
-  //  value
-  //
-  // For double values:
-  //
-  //  tag2
-  //  high word
-  //  tag1
-  //  low word
-
  public:
-  static int stackElementWords()   { return TaggedStackInterpreter ? 2 : 1; }
-  static int stackElementSize()    { return stackElementWords()*wordSize; }
-  static int logStackElementSize() { return
-                 TaggedStackInterpreter? LogBytesPerWord+1 : LogBytesPerWord; }
-
-  // Tag is at pointer, value is one below for a stack growing down
-  // (or above for stack growing up)
-  static int  value_offset_in_bytes()  {
-    return TaggedStackInterpreter ?
-      frame::interpreter_frame_expression_stack_direction() * wordSize : 0;
-  }
-  static int  tag_offset_in_bytes()    {
-    assert(TaggedStackInterpreter, "should not call this");
-    return 0;
-  }
-
-  // Tagged Locals
-  // Locals are stored relative to Llocals:
-  //
-  // tag    <- Llocals[n]
-  // value
-  //
-  // Category 2 types are indexed as:
-  //
-  // tag    <- Llocals[-n]
-  // high word
-  // tag    <- Llocals[-n+1]
-  // low word
-  //
+  // Interpreter helpers
+  const static int stackElementWords   = 1;
+  const static int stackElementSize    = stackElementWords * wordSize;
+  const static int logStackElementSize = LogBytesPerWord;
 
   // Local values relative to locals[n]
   static int  local_offset_in_bytes(int n) {
-    return ((frame::interpreter_frame_expression_stack_direction() * n) *
-            stackElementSize()) + value_offset_in_bytes();
-  }
-  static int  local_tag_offset_in_bytes(int n) {
-    assert(TaggedStackInterpreter, "should not call this");
-    return ((frame::interpreter_frame_expression_stack_direction() * n) *
-            stackElementSize()) + tag_offset_in_bytes();
+    return ((frame::interpreter_frame_expression_stack_direction() * n) * stackElementSize);
   }
 
   // access to stacked values according to type:
@@ -237,29 +192,15 @@
   static jlong long_in_slot(intptr_t* slot_addr) {
     if (sizeof(intptr_t) >= sizeof(jlong)) {
       return *(jlong*) slot_addr;
-    } else if (!TaggedStackInterpreter) {
+    } else {
       return Bytes::get_native_u8((address)slot_addr);
-    } else {
-      assert(sizeof(intptr_t) * 2 == sizeof(jlong), "ILP32");
-      // assemble the long in memory order (not arithmetic order)
-      union { jlong j; jint i[2]; } u;
-      u.i[0] = (jint) slot_addr[0*stackElementSize()];
-      u.i[1] = (jint) slot_addr[1*stackElementSize()];
-      return u.j;
     }
   }
   static void set_long_in_slot(intptr_t* slot_addr, jlong value) {
     if (sizeof(intptr_t) >= sizeof(jlong)) {
       *(jlong*) slot_addr = value;
-    } else if (!TaggedStackInterpreter) {
+    } else {
       Bytes::put_native_u8((address)slot_addr, value);
-    } else {
-      assert(sizeof(intptr_t) * 2 == sizeof(jlong), "ILP32");
-      // assemble the long in memory order (not arithmetic order)
-      union { jlong j; jint i[2]; } u;
-      u.j = value;
-      slot_addr[0*stackElementSize()] = (intptr_t) u.i[0];
-      slot_addr[1*stackElementSize()] = (intptr_t) u.i[1];
     }
   }
   static void get_jvalue_in_slot(intptr_t* slot_addr, BasicType type, jvalue* value) {
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -189,7 +189,7 @@
 
 // JavaStack Implementation
 #define MORE_STACK(count)  \
-    (topOfStack -= ((count) * Interpreter::stackElementWords()))
+    (topOfStack -= ((count) * Interpreter::stackElementWords))
 
 
 #define UPDATE_PC(opsize) {pc += opsize; }
@@ -1950,8 +1950,8 @@
         jint size = STACK_INT(-1);
         // stack grows down, dimensions are up!
         jint *dimarray =
-                   (jint*)&topOfStack[dims * Interpreter::stackElementWords()+
-                                      Interpreter::stackElementWords()-1];
+                   (jint*)&topOfStack[dims * Interpreter::stackElementWords+
+                                      Interpreter::stackElementWords-1];
         //adjust pointer to start of stack element
         CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
                 handle_exception);
@@ -2339,8 +2339,8 @@
               goto opcode_switch;
           }
 #endif
-          fatal2("\t*** Unimplemented opcode: %d = %s\n",
-                 opcode, Bytecodes::name((Bytecodes::Code)opcode));
+          fatal(err_msg("Unimplemented opcode %d = %s", opcode,
+                        Bytecodes::name((Bytecodes::Code)opcode)));
           goto finish;
 
       } /* switch(opc) */
@@ -2375,7 +2375,7 @@
     assert(except_oop(), "No exception to process");
     intptr_t continuation_bci;
     // expression stack is emptied
-    topOfStack = istate->stack_base() - Interpreter::stackElementWords();
+    topOfStack = istate->stack_base() - Interpreter::stackElementWords;
     CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
             handle_exception);
 
@@ -2692,219 +2692,141 @@
 // The implementations are platform dependent. We have to worry about alignment
 // issues on some machines which can change on the same platform depending on
 // whether it is an LP64 machine also.
-#ifdef ASSERT
-void BytecodeInterpreter::verify_stack_tag(intptr_t *tos, frame::Tag tag, int offset) {
-  if (TaggedStackInterpreter) {
-    frame::Tag t = (frame::Tag)tos[Interpreter::expr_tag_index_at(-offset)];
-    assert(t == tag, "stack tag mismatch");
-  }
-}
-#endif // ASSERT
-
 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) {
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
   return (address) tos[Interpreter::expr_index_at(-offset)];
 }
 
 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) {
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
   return *((jint*) &tos[Interpreter::expr_index_at(-offset)]);
 }
 
 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) {
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
   return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]);
 }
 
 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) {
-  debug_only(verify_stack_tag(tos, frame::TagReference, offset));
   return (oop)tos [Interpreter::expr_index_at(-offset)];
 }
 
 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) {
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset-1));
   return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d;
 }
 
 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) {
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset));
-  debug_only(verify_stack_tag(tos, frame::TagValue, offset-1));
   return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l;
 }
 
-void BytecodeInterpreter::tag_stack(intptr_t *tos, frame::Tag tag, int offset) {
-  if (TaggedStackInterpreter)
-    tos[Interpreter::expr_tag_index_at(-offset)] = (intptr_t)tag;
-}
-
 // only used for value types
 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value,
                                                         int offset) {
-  tag_stack(tos, frame::TagValue, offset);
   *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value;
 }
 
 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value,
                                                        int offset) {
-  tag_stack(tos, frame::TagValue, offset);
   *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value;
 }
 
 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value,
                                                          int offset) {
-  tag_stack(tos, frame::TagValue, offset);
   *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value;
 }
 
 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value,
                                                           int offset) {
-  tag_stack(tos, frame::TagReference, offset);
   *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value;
 }
 
 // needs to be platform dep for the 32 bit platforms.
 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value,
                                                           int offset) {
-  tag_stack(tos, frame::TagValue, offset);
-  tag_stack(tos, frame::TagValue, offset-1);
   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value;
 }
 
 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos,
                                               address addr, int offset) {
-  tag_stack(tos, frame::TagValue, offset);
-  tag_stack(tos, frame::TagValue, offset-1);
   (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d =
                         ((VMJavaVal64*)addr)->d);
 }
 
 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value,
                                                         int offset) {
-  tag_stack(tos, frame::TagValue, offset);
   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
-  tag_stack(tos, frame::TagValue, offset-1);
   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value;
 }
 
 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos,
                                             address addr, int offset) {
-  tag_stack(tos, frame::TagValue, offset);
   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
-  tag_stack(tos, frame::TagValue, offset-1);
   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l =
                         ((VMJavaVal64*)addr)->l;
 }
 
 // Locals
 
-#ifdef ASSERT
-void BytecodeInterpreter::verify_locals_tag(intptr_t *locals, frame::Tag tag,
-                                     int offset) {
-  if (TaggedStackInterpreter) {
-    frame::Tag t = (frame::Tag)locals[Interpreter::local_tag_index_at(-offset)];
-    assert(t == tag, "locals tag mismatch");
-  }
-}
-#endif // ASSERT
 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
   return (address)locals[Interpreter::local_index_at(-offset)];
 }
 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
   return (jint)locals[Interpreter::local_index_at(-offset)];
 }
 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
   return (jfloat)locals[Interpreter::local_index_at(-offset)];
 }
 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagReference, offset));
   return (oop)locals[Interpreter::local_index_at(-offset)];
 }
 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
   return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d;
 }
 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset+1));
   return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l;
 }
 
 // Returns the address of locals value.
 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset+1));
   return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
 }
 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) {
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset));
-  debug_only(verify_locals_tag(locals, frame::TagValue, offset+1));
   return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
 }
 
-void BytecodeInterpreter::tag_locals(intptr_t *locals, frame::Tag tag, int offset) {
-  if (TaggedStackInterpreter)
-    locals[Interpreter::local_tag_index_at(-offset)] = (intptr_t)tag;
-}
-
 // Used for local value or returnAddress
 void BytecodeInterpreter::set_locals_slot(intptr_t *locals,
                                    address value, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
   *((address*)&locals[Interpreter::local_index_at(-offset)]) = value;
 }
 void BytecodeInterpreter::set_locals_int(intptr_t *locals,
                                    jint value, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
   *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value;
 }
 void BytecodeInterpreter::set_locals_float(intptr_t *locals,
                                    jfloat value, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
   *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value;
 }
 void BytecodeInterpreter::set_locals_object(intptr_t *locals,
                                    oop value, int offset) {
-  tag_locals(locals, frame::TagReference, offset);
   *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value;
 }
 void BytecodeInterpreter::set_locals_double(intptr_t *locals,
                                    jdouble value, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
-  tag_locals(locals, frame::TagValue, offset+1);
   ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value;
 }
 void BytecodeInterpreter::set_locals_long(intptr_t *locals,
                                    jlong value, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
-  tag_locals(locals, frame::TagValue, offset+1);
   ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value;
 }
 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals,
                                    address addr, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
-  tag_locals(locals, frame::TagValue, offset+1);
   ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d;
 }
 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals,
                                    address addr, int offset) {
-  tag_locals(locals, frame::TagValue, offset);
-  tag_locals(locals, frame::TagValue, offset+1);
   ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l;
 }
 
 void BytecodeInterpreter::astore(intptr_t* tos,    int stack_offset,
                           intptr_t* locals, int locals_offset) {
-  // Copy tag from stack to locals.  astore's operand can be returnAddress
-  // and may not be TagReference
-  if (TaggedStackInterpreter) {
-    frame::Tag t = (frame::Tag) tos[Interpreter::expr_tag_index_at(-stack_offset)];
-    locals[Interpreter::local_tag_index_at(-locals_offset)] = (intptr_t)t;
-  }
   intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
   locals[Interpreter::local_index_at(-locals_offset)] = value;
 }
@@ -2912,10 +2834,6 @@
 
 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
                                    int to_offset) {
-  if (TaggedStackInterpreter) {
-    tos[Interpreter::expr_tag_index_at(-to_offset)] =
-                      (intptr_t)tos[Interpreter::expr_tag_index_at(-from_offset)];
-  }
   tos[Interpreter::expr_index_at(-to_offset)] =
                       (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
 }
@@ -2964,16 +2882,9 @@
 void BytecodeInterpreter::swap(intptr_t *tos) {
   // swap top two elements
   intptr_t val = tos[Interpreter::expr_index_at(1)];
-  frame::Tag t;
-  if (TaggedStackInterpreter) {
-    t = (frame::Tag) tos[Interpreter::expr_tag_index_at(1)];
-  }
   // Copy -2 entry to -1
   copy_stack_slot(tos, -2, -1);
   // Store saved -1 entry into -2
-  if (TaggedStackInterpreter) {
-    tos[Interpreter::expr_tag_index_at(2)] = (intptr_t)t;
-  }
   tos[Interpreter::expr_index_at(2)] = val;
 }
 // --------------------------------------------------------------------------------
--- a/src/share/vm/interpreter/bytecodeInterpreter.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/interpreter/bytecodeInterpreter.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2002-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2002-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -510,8 +510,6 @@
 static jdouble stack_double(intptr_t *tos, int offset);
 static jlong stack_long(intptr_t *tos, int offset);
 
-static void tag_stack(intptr_t *tos, frame::Tag tag, int offset);
-
 // only used for value types
 static void set_stack_slot(intptr_t *tos, address value, int offset);
 static void set_stack_int(intptr_t *tos, int value, int offset);
@@ -537,8 +535,6 @@
 static address locals_long_at(intptr_t* locals, int offset);
 static address locals_double_at(intptr_t* locals, int offset);
 
-static void tag_locals(intptr_t *locals, frame::Tag tag, int offset);
-
 static void set_locals_slot(intptr_t *locals, address value, int offset);
 static void set_locals_int(intptr_t *locals, jint value, int offset);
 static void set_locals_float(intptr_t *locals, jfloat value, int offset);
@@ -557,8 +553,6 @@
 static void copy_stack_slot(intptr_t *tos, int from_offset, int to_offset);
 
 #ifndef PRODUCT
-static void verify_locals_tag(intptr_t *locals, frame::Tag tag, int offset);
-static void verify_stack_tag(intptr_t *tos, frame::Tag tag, int offset);
 static const char* C_msg(BytecodeInterpreter::messages msg);
 void print();
 #endif // PRODUCT
--- a/src/share/vm/interpreter/bytecodes.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/interpreter/bytecodes.cpp	Wed May 19 10:22:39 2010 -0700
@@ -426,7 +426,9 @@
         if (is_defined(i)) {
           Code code = cast(i);
           Code java = java_code(code);
-          if (can_trap(code) && !can_trap(java)) fatal2("%s can trap => %s can trap, too", name(code), name(java));
+          if (can_trap(code) && !can_trap(java))
+            fatal(err_msg("%s can trap => %s can trap, too", name(code),
+                          name(java)));
         }
       }
     }
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Wed May 19 10:22:39 2010 -0700
@@ -691,24 +691,21 @@
 
   methodHandle caller_method(thread, method(thread));
 
-  // first determine if there is a bootstrap method
-  {
-    KlassHandle caller_klass(thread, caller_method->method_holder());
-    Handle bootm = SystemDictionary::find_bootstrap_method(caller_klass, KlassHandle(), CHECK);
-    if (bootm.is_null()) {
-      // If there is no bootstrap method, throw IncompatibleClassChangeError.
-      // This is a valid generic error type for resolution (JLS 12.3.3).
-      char buf[200];
-      jio_snprintf(buf, sizeof(buf), "Class %s has not declared a bootstrap method for invokedynamic",
-                   (Klass::cast(caller_klass()))->external_name());
-      THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
-    }
-  }
+  // first find the bootstrap method
+  KlassHandle caller_klass(thread, caller_method->method_holder());
+  Handle bootm = SystemDictionary::find_bootstrap_method(caller_klass, CHECK);
 
   constantPoolHandle pool(thread, caller_method->constants());
   pool->set_invokedynamic();    // mark header to flag active call sites
 
-  int site_index = four_byte_index(thread);
+  int caller_bci = 0;
+  int site_index = 0;
+  { address caller_bcp = bcp(thread);
+    caller_bci = caller_method->bci_from(caller_bcp);
+    site_index = Bytes::get_native_u4(caller_bcp+1);
+  }
+  assert(site_index == four_byte_index(thread), "");
+  assert(constantPoolCacheOopDesc::is_secondary_index(site_index), "proper format");
   // there is a second CPC entries that is of interest; it caches signature info:
   int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
 
@@ -732,23 +729,32 @@
   // The method (f2 entry) of the main entry is the MH.invoke for the
   // invokedynamic target call signature.
   intptr_t f2_value = pool->cache()->entry_at(main_index)->f2();
-  methodHandle mh_invdyn(THREAD, (methodOop) f2_value);
-  assert(mh_invdyn.not_null() && mh_invdyn->is_method() && mh_invdyn->is_method_handle_invoke(),
+  methodHandle signature_invoker(THREAD, (methodOop) f2_value);
+  assert(signature_invoker.not_null() && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
          "correct result from LinkResolver::resolve_invokedynamic");
 
   symbolHandle call_site_name(THREAD, pool->name_ref_at(site_index));
+
+  Handle info;  // NYI: Other metadata from a new kind of CP entry.  (Annotations?)
+
+  // this is the index which gets stored on the CallSite object (as "callerPosition"):
+  int call_site_position = constantPoolCacheOopDesc::decode_secondary_index(site_index);
+
   Handle call_site
-    = SystemDictionary::make_dynamic_call_site(caller_method->method_holder(),
-                                               caller_method->method_idnum(),
-                                               caller_method->bci_from(bcp(thread)),
+    = SystemDictionary::make_dynamic_call_site(bootm,
+                                               // Callee information:
                                                call_site_name,
-                                               mh_invdyn,
+                                               signature_invoker,
+                                               info,
+                                               // Caller information:
+                                               caller_method,
+                                               caller_bci,
                                                CHECK);
 
   // In the secondary entry, the f1 field is the call site, and the f2 (index)
-  // field is some data about the invoke site.
-  int extra_data = 0;
-  pool->cache()->secondary_entry_at(site_index)->set_dynamic_call(call_site(), extra_data);
+  // field is some data about the invoke site.  Currently, it is just the BCI.
+  // Later, it might be changed to help manage inlining dependencies.
+  pool->cache()->secondary_entry_at(site_index)->set_dynamic_call(call_site, signature_invoker);
 }
 IRT_END
 
@@ -1067,7 +1073,7 @@
   jlong_accessor u;
   jint* newval = (jint*)value;
   u.words[0] = newval[0];
-  u.words[1] = newval[Interpreter::stackElementWords()]; // skip if tag
+  u.words[1] = newval[Interpreter::stackElementWords]; // skip if tag
   fvalue.j = u.long_value;
 #endif // _LP64
 
@@ -1252,6 +1258,6 @@
   ArgumentSizeComputer asc(invoke->signature());
   int size_of_arguments = (asc.size() + (invoke->has_receiver() ? 1 : 0)); // receiver
   Copy::conjoint_bytes(src_address, dest_address,
-                       size_of_arguments * Interpreter::stackElementSize());
+                       size_of_arguments * Interpreter::stackElementSize);
 IRT_END
 #endif
--- a/src/share/vm/interpreter/linkResolver.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/interpreter/linkResolver.cpp	Wed May 19 10:22:39 2010 -0700
@@ -138,6 +138,15 @@
 
 void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS) {
   methodOop result_oop = klass->uncached_lookup_method(name(), signature());
+  if (EnableMethodHandles && result_oop != NULL) {
+    switch (result_oop->intrinsic_id()) {
+    case vmIntrinsics::_invokeExact:
+    case vmIntrinsics::_invokeGeneric:
+    case vmIntrinsics::_invokeDynamic:
+      // Do not link directly to these.  The VM must produce a synthetic one using lookup_implicit_method.
+      return;
+    }
+  }
   result = methodHandle(THREAD, result_oop);
 }
 
@@ -163,12 +172,16 @@
   result = methodHandle(THREAD, ik->lookup_method_in_all_interfaces(name(), signature()));
 }
 
-void LinkResolver::lookup_implicit_method(methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS) {
+void LinkResolver::lookup_implicit_method(methodHandle& result,
+                                          KlassHandle klass, symbolHandle name, symbolHandle signature,
+                                          KlassHandle current_klass,
+                                          TRAPS) {
   if (EnableMethodHandles && MethodHandles::enabled() &&
-      name == vmSymbolHandles::invoke_name() && klass() == SystemDictionary::MethodHandle_klass()) {
-    methodOop result_oop = SystemDictionary::find_method_handle_invoke(signature,
-                                                                       Handle(),
-                                                                       Handle(),
+      klass() == SystemDictionary::MethodHandle_klass() &&
+      methodOopDesc::is_method_handle_invoke_name(name())) {
+    methodOop result_oop = SystemDictionary::find_method_handle_invoke(name,
+                                                                       signature,
+                                                                       current_klass,
                                                                        CHECK);
     if (result_oop != NULL) {
       assert(result_oop->is_method_handle_invoke() && result_oop->signature() == signature(), "consistent");
@@ -239,7 +252,7 @@
   // The class is java.dyn.MethodHandle
   resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
 
-  symbolHandle method_name = vmSymbolHandles::invoke_name();
+  symbolHandle method_name = vmSymbolHandles::invokeExact_name();
 
   symbolHandle method_signature(THREAD, pool->signature_ref_at(index));
   KlassHandle  current_klass   (THREAD, pool->pool_holder());
@@ -279,7 +292,7 @@
 
     if (resolved_method.is_null()) {
       // JSR 292:  see if this is an implicitly generated method MethodHandle.invoke(*...)
-      lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, CHECK);
+      lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, CHECK);
     }
 
     if (resolved_method.is_null()) {
@@ -1041,13 +1054,14 @@
 
   // At this point, we only need the signature, and can ignore the name.
   symbolHandle method_signature(THREAD, pool->signature_ref_at(raw_index));  // raw_index works directly
-  symbolHandle method_name = vmSymbolHandles::invoke_name();
+  symbolHandle method_name = vmSymbolHandles::invokeExact_name();
   KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
 
-  // JSR 292:  this must be an implicitly generated method MethodHandle.invoke(*...)
+  // JSR 292:  this must be an implicitly generated method MethodHandle.invokeExact(*...)
   // The extra MH receiver will be inserted into the stack on every call.
   methodHandle resolved_method;
-  lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, CHECK);
+  KlassHandle current_klass(THREAD, pool->pool_holder());
+  lookup_implicit_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, CHECK);
   if (resolved_method.is_null()) {
     THROW(vmSymbols::java_lang_InternalError());
   }
--- a/src/share/vm/interpreter/linkResolver.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/interpreter/linkResolver.hpp	Wed May 19 10:22:39 2010 -0700
@@ -103,7 +103,8 @@
   static void lookup_method_in_klasses          (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
   static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
   static void lookup_method_in_interfaces       (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
-  static void lookup_implicit_method            (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
+  static void lookup_implicit_method            (methodHandle& result, KlassHandle klass, symbolHandle name, symbolHandle signature,
+                                                 KlassHandle current_klass, TRAPS);
 
   static int vtable_index_of_miranda_method(KlassHandle klass, symbolHandle name, symbolHandle signature, TRAPS);
 
--- a/src/share/vm/interpreter/oopMapCache.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/interpreter/oopMapCache.cpp	Wed May 19 10:22:39 2010 -0700
@@ -224,8 +224,8 @@
   // If we are doing mark sweep _method may not have a valid header
   // $$$ This used to happen only for m/s collections; we might want to
   // think of an appropriate generalization of this distinction.
-  guarantee(Universe::heap()->is_gc_active() ||
-            _method->is_oop_or_null(), "invalid oop in oopMapCache")
+  guarantee(Universe::heap()->is_gc_active() || _method->is_oop_or_null(),
+            "invalid oop in oopMapCache");
 }
 
 #ifdef ENABLE_ZAP_DEAD_LOCALS
--- a/src/share/vm/interpreter/templateInterpreter.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/interpreter/templateInterpreter.cpp	Wed May 19 10:22:39 2010 -0700
@@ -457,7 +457,7 @@
 
 void TemplateInterpreterGenerator::set_wide_entry_point(Template* t, address& wep) {
   assert(t->is_valid(), "template must exist");
-  assert(t->tos_in() == vtos, "only vtos tos_in supported for wide instructions")
+  assert(t->tos_in() == vtos, "only vtos tos_in supported for wide instructions");
   wep = __ pc(); generate_and_dispatch(t);
 }
 
--- a/src/share/vm/memory/blockOffsetTable.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/memory/blockOffsetTable.cpp	Wed May 19 10:22:39 2010 -0700
@@ -689,7 +689,7 @@
   assert(blk_end > _next_offset_threshold,
          "should be past threshold");
   assert(blk_start <= _next_offset_threshold,
-         "blk_start should be at or before threshold")
+         "blk_start should be at or before threshold");
   assert(pointer_delta(_next_offset_threshold, blk_start) <= N_words,
          "offset should be <= BlockOffsetSharedArray::N");
   assert(Universe::heap()->is_in_reserved(blk_start),
--- a/src/share/vm/memory/collectorPolicy.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/memory/collectorPolicy.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -112,6 +112,11 @@
   }
 }
 
+bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
+  bool result = _should_clear_all_soft_refs;
+  set_should_clear_all_soft_refs(false);
+  return result;
+}
 
 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
                                            int max_covered_regions) {
@@ -126,6 +131,17 @@
   }
 }
 
+void CollectorPolicy::cleared_all_soft_refs() {
+  // If near gc overhear limit, continue to clear SoftRefs.  SoftRefs may
+  // have been cleared in the last collection but if the gc overhear
+  // limit continues to be near, SoftRefs should still be cleared.
+  if (size_policy() != NULL) {
+    _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
+  }
+  _all_soft_refs_clear = true;
+}
+
+
 // GenCollectorPolicy methods.
 
 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
@@ -489,6 +505,12 @@
 
   debug_only(gch->check_for_valid_allocation_state());
   assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
+
+  // In general gc_overhead_limit_was_exceeded should be false so
+  // set it so here and reset it to true only if the gc time
+  // limit is being exceeded as checked below.
+  *gc_overhead_limit_was_exceeded = false;
+
   HeapWord* result = NULL;
 
   // Loop until the allocation is satisified,
@@ -524,12 +546,6 @@
         return result;
       }
 
-      // There are NULL's returned for different circumstances below.
-      // In general gc_overhead_limit_was_exceeded should be false so
-      // set it so here and reset it to true only if the gc time
-      // limit is being exceeded as checked below.
-      *gc_overhead_limit_was_exceeded = false;
-
       if (GC_locker::is_active_and_needs_gc()) {
         if (is_tlab) {
           return NULL;  // Caller will retry allocating individual object
@@ -568,18 +584,6 @@
       gc_count_before = Universe::heap()->total_collections();
     }
 
-    // Allocation has failed and a collection is about
-    // to be done.  If the gc time limit was exceeded the
-    // last time a collection was done, return NULL so
-    // that an out-of-memory will be thrown.  Clear
-    // gc_time_limit_exceeded so that subsequent attempts
-    // at a collection will be made.
-    if (size_policy()->gc_time_limit_exceeded()) {
-      *gc_overhead_limit_was_exceeded = true;
-      size_policy()->set_gc_time_limit_exceeded(false);
-      return NULL;
-    }
-
     VM_GenCollectForAllocation op(size,
                                   is_tlab,
                                   gc_count_before);
@@ -590,6 +594,24 @@
          assert(result == NULL, "must be NULL if gc_locked() is true");
          continue;  // retry and/or stall as necessary
       }
+
+      // Allocation has failed and a collection
+      // has been done.  If the gc time limit was exceeded the
+      // this time, return NULL so that an out-of-memory
+      // will be thrown.  Clear gc_overhead_limit_exceeded
+      // so that the overhead exceeded does not persist.
+
+      const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
+      const bool softrefs_clear = all_soft_refs_clear();
+      assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
+      if (limit_exceeded && softrefs_clear) {
+        *gc_overhead_limit_was_exceeded = true;
+        size_policy()->set_gc_overhead_limit_exceeded(false);
+        if (op.result() != NULL) {
+          CollectedHeap::fill_with_object(op.result(), size);
+        }
+        return NULL;
+      }
       assert(result == NULL || gch->is_in_reserved(result),
              "result not in heap");
       return result;
@@ -688,6 +710,9 @@
     return result;
   }
 
+  assert(!should_clear_all_soft_refs(),
+    "Flag should have been handled and cleared prior to this point");
+
   // What else?  We might try synchronous finalization later.  If the total
   // space available is large enough for the allocation, then a more
   // complete compaction phase than we've tried so far might be
--- a/src/share/vm/memory/collectorPolicy.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/memory/collectorPolicy.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,12 +69,28 @@
   size_t _min_alignment;
   size_t _max_alignment;
 
+  // The sizing of the heap are controlled by a sizing policy.
+  AdaptiveSizePolicy* _size_policy;
+
+  // Set to true when policy wants soft refs cleared.
+  // Reset to false by gc after it clears all soft refs.
+  bool _should_clear_all_soft_refs;
+  // Set to true by the GC if the just-completed gc cleared all
+  // softrefs.  This is set to true whenever a gc clears all softrefs, and
+  // set to false each time gc returns to the mutator.  For example, in the
+  // ParallelScavengeHeap case the latter would be done toward the end of
+  // mem_allocate() where it returns op.result()
+  bool _all_soft_refs_clear;
+
   CollectorPolicy() :
     _min_alignment(1),
     _max_alignment(1),
     _initial_heap_byte_size(0),
     _max_heap_byte_size(0),
-    _min_heap_byte_size(0)
+    _min_heap_byte_size(0),
+    _size_policy(NULL),
+    _should_clear_all_soft_refs(false),
+    _all_soft_refs_clear(false)
   {}
 
  public:
@@ -98,6 +114,19 @@
     G1CollectorPolicyKind
   };
 
+  AdaptiveSizePolicy* size_policy() { return _size_policy; }
+  bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; }
+  void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; }
+  // Returns the current value of _should_clear_all_soft_refs.
+  // _should_clear_all_soft_refs is set to false as a side effect.
+  bool use_should_clear_all_soft_refs(bool v);
+  bool all_soft_refs_clear() { return _all_soft_refs_clear; }
+  void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; }
+
+  // Called by the GC after Soft Refs have been cleared to indicate
+  // that the request in _should_clear_all_soft_refs has been fulfilled.
+  void cleared_all_soft_refs();
+
   // Identification methods.
   virtual GenCollectorPolicy*           as_generation_policy()            { return NULL; }
   virtual TwoGenerationCollectorPolicy* as_two_generation_policy()        { return NULL; }
@@ -165,6 +194,22 @@
 
 };
 
+class ClearedAllSoftRefs : public StackObj {
+  bool _clear_all_soft_refs;
+  CollectorPolicy* _collector_policy;
+ public:
+  ClearedAllSoftRefs(bool clear_all_soft_refs,
+                     CollectorPolicy* collector_policy) :
+    _clear_all_soft_refs(clear_all_soft_refs),
+    _collector_policy(collector_policy) {}
+
+  ~ClearedAllSoftRefs() {
+    if (_clear_all_soft_refs) {
+      _collector_policy->cleared_all_soft_refs();
+    }
+  }
+};
+
 class GenCollectorPolicy : public CollectorPolicy {
  protected:
   size_t _min_gen0_size;
@@ -173,10 +218,6 @@
 
   GenerationSpec **_generations;
 
-  // The sizing of the different generations in the heap are controlled
-  // by a sizing policy.
-  AdaptiveSizePolicy* _size_policy;
-
   // Return true if an allocation should be attempted in the older
   // generation if it fails in the younger generation.  Return
   // false, otherwise.
@@ -236,14 +277,11 @@
   virtual size_t large_typearray_limit();
 
   // Adaptive size policy
-  AdaptiveSizePolicy* size_policy() { return _size_policy; }
   virtual void initialize_size_policy(size_t init_eden_size,
                                       size_t init_promo_size,
                                       size_t init_survivor_size);
-
 };
 
-
 // All of hotspot's current collectors are subtypes of this
 // class. Currently, these collectors all use the same gen[0],
 // but have different gen[1] types. If we add another subtype
--- a/src/share/vm/memory/defNewGeneration.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/memory/defNewGeneration.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -594,6 +594,10 @@
     _tenuring_threshold =
       age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
 
+    // A successful scavenge should restart the GC time limit count which is
+    // for full GC's.
+    AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
+    size_policy->reset_gc_overhead_limit_count();
     if (PrintGC && !PrintGCDetails) {
       gch->print_heap_change(gch_prev_used);
     }
--- a/src/share/vm/memory/genCollectedHeap.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -428,7 +428,8 @@
   assert(my_thread->is_VM_thread() ||
          my_thread->is_ConcurrentGC_thread(),
          "incorrect thread type capability");
-  assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock");
+  assert(Heap_lock->is_locked(),
+         "the requesting thread should have the Heap_lock");
   guarantee(!is_gc_active(), "collection is not reentrant");
   assert(max_level < n_gens(), "sanity check");
 
@@ -436,6 +437,11 @@
     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   }
 
+  const bool do_clear_all_soft_refs = clear_all_soft_refs ||
+                          collector_policy()->should_clear_all_soft_refs();
+
+  ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
+
   const size_t perm_prev_used = perm_gen()->used();
 
   if (PrintHeapAtGC) {
@@ -560,11 +566,11 @@
           if (rp->discovery_is_atomic()) {
             rp->verify_no_references_recorded();
             rp->enable_discovery();
-            rp->setup_policy(clear_all_soft_refs);
+            rp->setup_policy(do_clear_all_soft_refs);
           } else {
             // collect() below will enable discovery as appropriate
           }
-          _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab);
+          _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
           if (!rp->enqueuing_is_done()) {
             rp->enqueue_discovered_references();
           } else {
--- a/src/share/vm/memory/genMarkSweep.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/memory/genMarkSweep.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,13 @@
   bool clear_all_softrefs) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+#ifdef ASSERT
+  if (gch->collector_policy()->should_clear_all_soft_refs()) {
+    assert(clear_all_softrefs, "Policy should have been checked earlier");
+  }
+#endif
+
   // hook up weak ref data so it can be used during Mark-Sweep
   assert(ref_processor() == NULL, "no stomping");
   assert(rp != NULL, "should be non-NULL");
@@ -44,7 +51,6 @@
 
   // Increment the invocation count for the permanent generation, since it is
   // implicitly collected whenever we do a full mark sweep collection.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
   gch->perm_gen()->stat_record()->invocations++;
 
   // Capture heap size before collection for printing.
--- a/src/share/vm/memory/heapInspection.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/memory/heapInspection.cpp	Wed May 19 10:22:39 2010 -0700
@@ -315,7 +315,7 @@
 
 void HeapInspection::find_instances_at_safepoint(klassOop k, GrowableArray<oop>* result) {
   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
-  assert(Heap_lock->is_locked(), "should have the Heap_lock")
+  assert(Heap_lock->is_locked(), "should have the Heap_lock");
 
   // Ensure that the heap is parsable
   Universe::heap()->ensure_parsability(false);  // no need to retire TALBs
--- a/src/share/vm/memory/threadLocalAllocBuffer.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp	Wed May 19 10:22:39 2010 -0700
@@ -111,7 +111,22 @@
 
   // Allocate size HeapWords. The memory is NOT initialized to zero.
   inline HeapWord* allocate(size_t size);
-  static size_t alignment_reserve()              { return align_object_size(typeArrayOopDesc::header_size(T_INT)); }
+
+  // Reserve space at the end of TLAB
+  static size_t end_reserve() {
+    int reserve_size = typeArrayOopDesc::header_size(T_INT);
+    if (AllocatePrefetchStyle == 3) {
+      // BIS is used to prefetch - we need a space for it.
+      // +1 for rounding up to next cache line +1 to be safe
+      int lines = AllocatePrefetchLines + 2;
+      int step_size = AllocatePrefetchStepSize;
+      int distance = AllocatePrefetchDistance;
+      int prefetch_end = (distance + step_size*lines)/(int)HeapWordSize;
+      reserve_size = MAX2(reserve_size, prefetch_end);
+    }
+    return reserve_size;
+  }
+  static size_t alignment_reserve()              { return align_object_size(end_reserve()); }
   static size_t alignment_reserve_in_bytes()     { return alignment_reserve() * HeapWordSize; }
 
   // Return tlab size or remaining space in eden such that the
--- a/src/share/vm/memory/universe.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/memory/universe.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1045,7 +1045,7 @@
   k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_reflect_Method(), true, CHECK_false);
   k_h = instanceKlassHandle(THREAD, k);
   k_h->link_class(CHECK_false);
-  m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_array_object_object_signature());
+  m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_object_array_object_signature());
   if (m == NULL || m->is_static()) {
     THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
       "java.lang.reflect.Method.invoke", false);
--- a/src/share/vm/oops/cpCacheOop.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/oops/cpCacheOop.cpp	Wed May 19 10:22:39 2010 -0700
@@ -218,18 +218,19 @@
 }
 
 
-void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, int extra_data) {
-  methodOop method = (methodOop) java_dyn_CallSite::vmmethod(call_site());
-  assert(method->is_method(), "must be initialized properly");
-  int param_size = method->size_of_parameters();
+void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site,
+                                              methodHandle signature_invoker) {
+  int param_size = signature_invoker->size_of_parameters();
   assert(param_size >= 1, "method argument size must include MH.this");
   param_size -= 1;              // do not count MH.this; it is not stacked for invokedynamic
   if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
     // racing threads might be trying to install their own favorites
     set_f1(call_site());
   }
-  set_f2(extra_data);
-  set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | param_size);
+  //set_f2(0);
+  bool is_final = true;
+  assert(signature_invoker->is_final_method(), "is_final");
+  set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size);
   // do not do set_bytecode on a secondary CP cache entry
   //set_bytecode_1(Bytecodes::_invokedynamic);
 }
--- a/src/share/vm/oops/cpCacheOop.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/oops/cpCacheOop.hpp	Wed May 19 10:22:39 2010 -0700
@@ -181,7 +181,7 @@
 
   void set_dynamic_call(
     Handle call_site,                            // Resolved java.dyn.CallSite (f1)
-    int extra_data                               // (f2)
+    methodHandle signature_invoker               // determines signature information
   );
 
   void set_parameter_size(int value) {
--- a/src/share/vm/oops/generateOopMap.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/oops/generateOopMap.cpp	Wed May 19 10:22:39 2010 -0700
@@ -807,7 +807,7 @@
 }
 
 CellTypeState GenerateOopMap::get_var(int localNo) {
-  assert(localNo < _max_locals + _nof_refval_conflicts, "variable read error")
+  assert(localNo < _max_locals + _nof_refval_conflicts, "variable read error");
   if (localNo < 0 || localNo > _max_locals) {
     verify_error("variable read error: r%d", localNo);
     return valCTS; // just to pick something;
--- a/src/share/vm/oops/instanceKlass.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/oops/instanceKlass.cpp	Wed May 19 10:22:39 2010 -0700
@@ -966,7 +966,7 @@
       // not found
 #ifdef ASSERT
       int index = linear_search(methods, name, signature);
-      if (index != -1) fatal1("binary search bug: should have found entry %d", index);
+      assert(index == -1, err_msg("binary search should have found entry %d", index));
 #endif
       return NULL;
     } else if (res < 0) {
@@ -977,7 +977,7 @@
   }
 #ifdef ASSERT
   int index = linear_search(methods, name, signature);
-  if (index != -1) fatal1("binary search bug: should have found entry %d", index);
+  assert(index == -1, err_msg("binary search should have found entry %d", index));
 #endif
   return NULL;
 }
--- a/src/share/vm/oops/instanceKlassKlass.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/oops/instanceKlassKlass.cpp	Wed May 19 10:22:39 2010 -0700
@@ -712,10 +712,10 @@
     int sib_count = 0;
     while (sib != NULL) {
       if (sib == ik) {
-        fatal1("subclass cycle of length %d", sib_count);
+        fatal(err_msg("subclass cycle of length %d", sib_count));
       }
       if (sib_count >= 100000) {
-        fatal1("suspiciously long subclass list %d", sib_count);
+        fatal(err_msg("suspiciously long subclass list %d", sib_count));
       }
       guarantee(sib->as_klassOop()->is_klass(), "should be klass");
       guarantee(sib->as_klassOop()->is_perm(),  "should be in permspace");
--- a/src/share/vm/oops/klassVtable.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/oops/klassVtable.cpp	Wed May 19 10:22:39 2010 -0700
@@ -899,7 +899,7 @@
   int nof_methods = methods()->length();
   HandleMark hm;
   KlassHandle klass = _klass;
-  assert(nof_methods > 0, "at least one method must exist for interface to be in vtable")
+  assert(nof_methods > 0, "at least one method must exist for interface to be in vtable");
   Handle interface_loader (THREAD, instanceKlass::cast(interf_h())->class_loader());
   int ime_num = 0;
 
@@ -1180,8 +1180,8 @@
   oop* end_of_obj = (oop*)_klass() + _klass()->size();
   oop* end_of_vtable = (oop *)&table()[_length];
   if (end_of_vtable > end_of_obj) {
-    fatal1("klass %s: klass object too short (vtable extends beyond end)",
-          _klass->internal_name());
+    fatal(err_msg("klass %s: klass object too short (vtable extends beyond "
+                  "end)", _klass->internal_name()));
   }
 
   for (int i = 0; i < _length; i++) table()[i].verify(this, st);
@@ -1224,7 +1224,7 @@
 #ifndef PRODUCT
     print();
 #endif
-    fatal1("vtableEntry %#lx: method is from subclass", this);
+    fatal(err_msg("vtableEntry " PTR_FORMAT ": method is from subclass", this));
   }
 }
 
--- a/src/share/vm/oops/methodKlass.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/oops/methodKlass.cpp	Wed May 19 10:22:39 2010 -0700
@@ -236,8 +236,10 @@
   assert(obj->is_method(), "must be method");
   Klass::oop_print_on(obj, st);
   methodOop m = methodOop(obj);
+  // get the effect of PrintOopAddress, always, for methods:
+  st->print   (" - this oop:          "INTPTR_FORMAT, (intptr_t)m);
   st->print   (" - method holder:     ");    m->method_holder()->print_value_on(st); st->cr();
-  st->print   (" - constants:         " INTPTR_FORMAT, " ", (address)m->constants());
+  st->print   (" - constants:         "INTPTR_FORMAT" ", (address)m->constants());
   m->constants()->print_value_on(st); st->cr();
   st->print   (" - access:            0x%x  ", m->access_flags().as_int()); m->access_flags().print_on(st); st->cr();
   st->print   (" - name:              ");    m->name()->print_value_on(st); st->cr();
@@ -246,6 +248,10 @@
   st->print_cr(" - max locals:        %d",   m->max_locals());
   st->print_cr(" - size of params:    %d",   m->size_of_parameters());
   st->print_cr(" - method size:       %d",   m->method_size());
+  if (m->intrinsic_id() != vmIntrinsics::_none)
+    st->print_cr(" - intrinsic id:      %d %s", m->intrinsic_id(), vmIntrinsics::name_at(m->intrinsic_id()));
+  if (m->highest_tier_compile() != CompLevel_none)
+    st->print_cr(" - highest tier:      %d", m->highest_tier_compile());
   st->print_cr(" - vtable index:      %d",   m->_vtable_index);
   st->print_cr(" - i2i entry:         " INTPTR_FORMAT, m->interpreter_entry());
   st->print_cr(" - adapter:           " INTPTR_FORMAT, m->adapter());
--- a/src/share/vm/oops/methodOop.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/oops/methodOop.cpp	Wed May 19 10:22:39 2010 -0700
@@ -306,7 +306,7 @@
 
 int methodOopDesc::extra_stack_words() {
   // not an inline function, to avoid a header dependency on Interpreter
-  return extra_stack_entries() * Interpreter::stackElementSize();
+  return extra_stack_entries() * Interpreter::stackElementSize;
 }
 
 
@@ -807,9 +807,19 @@
   return false;
 }
 
+bool methodOopDesc::is_method_handle_invoke_name(vmSymbols::SID name_sid) {
+  switch (name_sid) {
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name):  // FIXME: remove this transitional form
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name):
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name):
+    return true;
+  }
+  return false;
+}
+
 // Constant pool structure for invoke methods:
 enum {
-  _imcp_invoke_name = 1,        // utf8: 'invoke'
+  _imcp_invoke_name = 1,        // utf8: 'invokeExact' or 'invokeGeneric'
   _imcp_invoke_signature,       // utf8: (variable symbolOop)
   _imcp_method_type_value,      // string: (variable java/dyn/MethodType, sic)
   _imcp_limit
@@ -839,14 +849,15 @@
 //
 // Tests if this method is an internal adapter frame from the
 // MethodHandleCompiler.
+// Must be consistent with MethodHandleCompiler::get_method_oop().
 bool methodOopDesc::is_method_handle_adapter() const {
-  return ((name() == vmSymbols::invoke_name() &&
-           method_holder() == SystemDictionary::MethodHandle_klass())
-          ||
-          method_holder() == SystemDictionary::InvokeDynamic_klass());
+  return (is_method_handle_invoke_name(name()) &&
+          is_synthetic() &&
+          MethodHandleCompiler::klass_is_method_handle_adapter_holder(method_holder()));
 }
 
 methodHandle methodOopDesc::make_invoke_method(KlassHandle holder,
+                                               symbolHandle name,
                                                symbolHandle signature,
                                                Handle method_type, TRAPS) {
   methodHandle empty;
@@ -865,7 +876,7 @@
     constantPoolOop cp_oop = oopFactory::new_constantPool(_imcp_limit, IsSafeConc, CHECK_(empty));
     cp = constantPoolHandle(THREAD, cp_oop);
   }
-  cp->symbol_at_put(_imcp_invoke_name,       vmSymbols::invoke_name());
+  cp->symbol_at_put(_imcp_invoke_name,       name());
   cp->symbol_at_put(_imcp_invoke_signature,  signature());
   cp->string_at_put(_imcp_method_type_value, vmSymbols::void_signature());
   cp->set_pool_holder(holder());
@@ -882,7 +893,7 @@
   m->set_constants(cp());
   m->set_name_index(_imcp_invoke_name);
   m->set_signature_index(_imcp_invoke_signature);
-  assert(m->name() == vmSymbols::invoke_name(), "");
+  assert(is_method_handle_invoke_name(m->name()), "");
   assert(m->signature() == signature(), "");
 #ifdef CC_INTERP
   ResultTypeFinder rtf(signature());
@@ -1033,6 +1044,24 @@
       id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
       break;
     }
+    break;
+
+  // Signature-polymorphic methods: MethodHandle.invoke*, InvokeDynamic.*.
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_MethodHandle):
+    if (is_static() || !is_native())  break;
+    switch (name_id) {
+    case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name):
+      id = vmIntrinsics::_invokeGeneric; break;
+    default:
+      if (is_method_handle_invoke_name(name()))
+        id = vmIntrinsics::_invokeExact;
+      break;
+    }
+    break;
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_InvokeDynamic):
+    if (!is_static() || !is_native())  break;
+    id = vmIntrinsics::_invokeDynamic;
+    break;
   }
 
   if (id != vmIntrinsics::_none) {
@@ -1114,6 +1143,20 @@
     return ( a < b ? -1 : (a == b ? 0 : 1));
   }
 
+  // We implement special compare versions for narrow oops to avoid
+  // testing for UseCompressedOops on every comparison.
+  static int method_compare_narrow(narrowOop* a, narrowOop* b) {
+    methodOop m = (methodOop)oopDesc::load_decode_heap_oop(a);
+    methodOop n = (methodOop)oopDesc::load_decode_heap_oop(b);
+    return m->name()->fast_compare(n->name());
+  }
+
+  static int method_compare_narrow_idempotent(narrowOop* a, narrowOop* b) {
+    int i = method_compare_narrow(a, b);
+    if (i != 0) return i;
+    return ( a < b ? -1 : (a == b ? 0 : 1));
+  }
+
   typedef int (*compareFn)(const void*, const void*);
 }
 
@@ -1166,7 +1209,7 @@
 
     // Use a simple bubble sort for small number of methods since
     // qsort requires a functional pointer call for each comparison.
-    if (UseCompressedOops || length < 8) {
+    if (length < 8) {
       bool sorted = true;
       for (int i=length-1; i>0; i--) {
         for (int j=0; j<i; j++) {
@@ -1182,10 +1225,10 @@
           sorted = true;
       }
     } else {
-      // XXX This doesn't work for UseCompressedOops because the compare fn
-      // will have to decode the methodOop anyway making it not much faster
-      // than above.
-      compareFn compare = (compareFn) (idempotent ? method_compare_idempotent : method_compare);
+      compareFn compare =
+        (UseCompressedOops ?
+         (compareFn) (idempotent ? method_compare_narrow_idempotent : method_compare_narrow):
+         (compareFn) (idempotent ? method_compare_idempotent : method_compare));
       qsort(methods->base(), length, heapOopSize, compare);
     }
 
--- a/src/share/vm/oops/methodOop.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/oops/methodOop.hpp	Wed May 19 10:22:39 2010 -0700
@@ -525,11 +525,16 @@
 
   // JSR 292 support
   bool is_method_handle_invoke() const              { return access_flags().is_method_handle_invoke(); }
+  static bool is_method_handle_invoke_name(vmSymbols::SID name_sid);
+  static bool is_method_handle_invoke_name(symbolOop name) {
+    return is_method_handle_invoke_name(vmSymbols::find_sid(name));
+  }
   // Tests if this method is an internal adapter frame from the
   // MethodHandleCompiler.
   bool is_method_handle_adapter() const;
   static methodHandle make_invoke_method(KlassHandle holder,
-                                         symbolHandle signature,
+                                         symbolHandle name, //invokeExact or invokeGeneric
+                                         symbolHandle signature, //anything at all
                                          Handle method_type,
                                          TRAPS);
   // these operate only on invoke methods:
--- a/src/share/vm/oops/typeArrayKlass.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/oops/typeArrayKlass.cpp	Wed May 19 10:22:39 2010 -0700
@@ -123,16 +123,16 @@
      || (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length()) ) {
     THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
   }
+  // Check zero copy
+  if (length == 0)
+    return;
 
   // This is an attempt to make the copy_array fast.
-  // NB: memmove takes care of overlapping memory segments.
-  // Potential problem: memmove is not guaranteed to be word atomic
-  // Revisit in Merlin
   int l2es = log2_element_size();
   int ihs = array_header_in_bytes() / wordSize;
-  char* src = (char*) ((oop*)s + ihs) + (src_pos << l2es);
-  char* dst = (char*) ((oop*)d + ihs) + (dst_pos << l2es);
-  memmove(dst, src, length << l2es);
+  char* src = (char*) ((oop*)s + ihs) + ((size_t)src_pos << l2es);
+  char* dst = (char*) ((oop*)d + ihs) + ((size_t)dst_pos << l2es);
+  Copy::conjoint_memory_atomic(src, dst, (size_t)length << l2es);
 }
 
 
--- a/src/share/vm/opto/addnode.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/addnode.cpp	Wed May 19 10:22:39 2010 -0700
@@ -714,71 +714,6 @@
   return idx > Base;
 }
 
-//---------------------------mach_bottom_type----------------------------------
-// Utility function for use by ADLC.  Implements bottom_type for matched AddP.
-const Type *AddPNode::mach_bottom_type( const MachNode* n) {
-  Node* base = n->in(Base);
-  const Type *t = base->bottom_type();
-  if ( t == Type::TOP ) {
-    // an untyped pointer
-    return TypeRawPtr::BOTTOM;
-  }
-  const TypePtr* tp = t->isa_oopptr();
-  if ( tp == NULL )  return t;
-  if ( tp->_offset == TypePtr::OffsetBot )  return tp;
-
-  // We must carefully add up the various offsets...
-  intptr_t offset = 0;
-  const TypePtr* tptr = NULL;
-
-  uint numopnds = n->num_opnds();
-  uint index = n->oper_input_base();
-  for ( uint i = 1; i < numopnds; i++ ) {
-    MachOper *opnd = n->_opnds[i];
-    // Check for any interesting operand info.
-    // In particular, check for both memory and non-memory operands.
-    // %%%%% Clean this up: use xadd_offset
-    intptr_t con = opnd->constant();
-    if ( con == TypePtr::OffsetBot )  goto bottom_out;
-    offset += con;
-    con = opnd->constant_disp();
-    if ( con == TypePtr::OffsetBot )  goto bottom_out;
-    offset += con;
-    if( opnd->scale() != 0 ) goto bottom_out;
-
-    // Check each operand input edge.  Find the 1 allowed pointer
-    // edge.  Other edges must be index edges; track exact constant
-    // inputs and otherwise assume the worst.
-    for ( uint j = opnd->num_edges(); j > 0; j-- ) {
-      Node* edge = n->in(index++);
-      const Type*    et  = edge->bottom_type();
-      const TypeX*   eti = et->isa_intptr_t();
-      if ( eti == NULL ) {
-        // there must be one pointer among the operands
-        guarantee(tptr == NULL, "must be only one pointer operand");
-        if (UseCompressedOops && Universe::narrow_oop_shift() == 0) {
-          // 32-bits narrow oop can be the base of address expressions
-          tptr = et->make_ptr()->isa_oopptr();
-        } else {
-          // only regular oops are expected here
-          tptr = et->isa_oopptr();
-        }
-        guarantee(tptr != NULL, "non-int operand must be pointer");
-        if (tptr->higher_equal(tp->add_offset(tptr->offset())))
-          tp = tptr; // Set more precise type for bailout
-        continue;
-      }
-      if ( eti->_hi != eti->_lo )  goto bottom_out;
-      offset += eti->_lo;
-    }
-  }
-  guarantee(tptr != NULL, "must be exactly one pointer operand");
-  return tptr->add_offset(offset);
-
- bottom_out:
-  return tp->add_offset(TypePtr::OffsetBot);
-}
-
 //=============================================================================
 //------------------------------Identity---------------------------------------
 Node *OrINode::Identity( PhaseTransform *phase ) {
--- a/src/share/vm/opto/addnode.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/addnode.hpp	Wed May 19 10:22:39 2010 -0700
@@ -151,7 +151,6 @@
 
   // Do not match base-ptr edge
   virtual uint match_edge(uint idx) const;
-  static const Type *mach_bottom_type(const MachNode* n);  // used by ad_<arch>.hpp
 };
 
 //------------------------------OrINode----------------------------------------
--- a/src/share/vm/opto/bytecodeInfo.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Wed May 19 10:22:39 2010 -0700
@@ -477,12 +477,7 @@
   }
   int new_depth_adjust = 0;
   if (caller_jvms->method() != NULL) {
-    if ((caller_jvms->method()->name() == ciSymbol::invoke_name() &&
-         caller_jvms->method()->holder()->name() == ciSymbol::java_dyn_MethodHandle())
-        || caller_jvms->method()->holder()->name() == ciSymbol::java_dyn_InvokeDynamic())
-      /* @@@ FIXME:
     if (caller_jvms->method()->is_method_handle_adapter())
-      */
       new_depth_adjust -= 1;  // don't count actions in MH or indy adapter frames
     else if (callee_method->is_method_handle_invoke()) {
       new_depth_adjust -= 1;  // don't count method handle calls from java.dyn implem
--- a/src/share/vm/opto/c2_globals.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/c2_globals.hpp	Wed May 19 10:22:39 2010 -0700
@@ -52,9 +52,6 @@
           "Code alignment for interior entry points "                       \
           "in generated code (in bytes)")                                   \
                                                                             \
-  product_pd(intx, OptoLoopAlignment,                                       \
-          "Align inner loops to zero relative to this modulus")             \
-                                                                            \
   product(intx, MaxLoopPad, (OptoLoopAlignment-1),                          \
           "Align a loop if padding size in bytes is less or equal to this value") \
                                                                             \
--- a/src/share/vm/opto/c2compiler.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/c2compiler.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -105,8 +105,7 @@
   }
   bool subsume_loads = true;
   bool do_escape_analysis = DoEscapeAnalysis &&
-                            !(env->jvmti_can_hotswap_or_post_breakpoint() ||
-                              env->jvmti_can_examine_or_deopt_anywhere());
+    !env->jvmti_can_access_local_variables();
   while (!env->failing()) {
     // Attempt to compile while subsuming loads into machine instructions.
     Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis);
--- a/src/share/vm/opto/cfgnode.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/cfgnode.cpp	Wed May 19 10:22:39 2010 -0700
@@ -956,6 +956,7 @@
     }
     if( jtkp && ttkp ) {
       if( jtkp->is_loaded() &&  jtkp->klass()->is_interface() &&
+          !jtkp->klass_is_exact() && // Keep exact interface klass (6894807)
           ttkp->is_loaded() && !ttkp->klass()->is_interface() ) {
         assert(ft == ttkp->cast_to_ptr_type(jtkp->ptr()) ||
                ft->isa_narrowoop() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr()), "");
--- a/src/share/vm/opto/classes.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/classes.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,8 @@
 macro(BoxLock)
 macro(ReverseBytesI)
 macro(ReverseBytesL)
+macro(ReverseBytesUS)
+macro(ReverseBytesS)
 macro(CProj)
 macro(CallDynamicJava)
 macro(CallJava)
--- a/src/share/vm/opto/compile.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/compile.cpp	Wed May 19 10:22:39 2010 -0700
@@ -871,7 +871,6 @@
   set_has_split_ifs(false);
   set_has_loops(has_method() && method()->has_loops()); // first approximation
   set_has_stringbuilder(false);
-  _deopt_happens = true;  // start out assuming the worst
   _trap_can_recompile = false;  // no traps emitted yet
   _major_progress = true; // start out assuming good things will happen
   set_has_unsafe_access(false);
--- a/src/share/vm/opto/compile.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/compile.hpp	Wed May 19 10:22:39 2010 -0700
@@ -146,7 +146,6 @@
   int                   _orig_pc_slot_offset_in_bytes;
 
   int                   _major_progress;        // Count of something big happening
-  bool                  _deopt_happens;         // TRUE if de-optimization CAN happen
   bool                  _has_loops;             // True if the method _may_ have some loops
   bool                  _has_split_ifs;         // True if the method _may_ have some split-if
   bool                  _has_unsafe_access;     // True if the method _may_ produce faults in unsafe loads or stores.
@@ -300,7 +299,6 @@
   void          set_freq_inline_size(int n)     { _freq_inline_size = n; }
   int               freq_inline_size() const    { return _freq_inline_size; }
   void          set_max_inline_size(int n)      { _max_inline_size = n; }
-  bool              deopt_happens() const       { return _deopt_happens; }
   bool              has_loops() const           { return _has_loops; }
   void          set_has_loops(bool z)           { _has_loops = z; }
   bool              has_split_ifs() const       { return _has_split_ifs; }
--- a/src/share/vm/opto/escape.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/escape.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1989,20 +1989,15 @@
     case Op_Allocate:
     {
       Node *k = call->in(AllocateNode::KlassNode);
-      const TypeKlassPtr *kt;
-      if (k->Opcode() == Op_LoadKlass) {
-        kt = k->as_Load()->type()->isa_klassptr();
-      } else {
-        // Also works for DecodeN(LoadNKlass).
-        kt = k->as_Type()->type()->isa_klassptr();
-      }
+      const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
       assert(kt != NULL, "TypeKlassPtr  required.");
       ciKlass* cik = kt->klass();
-      ciInstanceKlass* ciik = cik->as_instance_klass();
 
       PointsToNode::EscapeState es;
       uint edge_to;
-      if (cik->is_subclass_of(_compile->env()->Thread_klass()) || ciik->has_finalizer()) {
+      if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
+         !cik->is_instance_klass() || // StressReflectiveCode
+          cik->as_instance_klass()->has_finalizer()) {
         es = PointsToNode::GlobalEscape;
         edge_to = _phantom_object; // Could not be worse
       } else {
@@ -2017,13 +2012,28 @@
 
     case Op_AllocateArray:
     {
-      int length = call->in(AllocateNode::ALength)->find_int_con(-1);
-      if (length < 0 || length > EliminateAllocationArraySizeLimit) {
-        // Not scalar replaceable if the length is not constant or too big.
-        ptnode_adr(call_idx)->_scalar_replaceable = false;
+
+      Node *k = call->in(AllocateNode::KlassNode);
+      const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
+      assert(kt != NULL, "TypeKlassPtr  required.");
+      ciKlass* cik = kt->klass();
+
+      PointsToNode::EscapeState es;
+      uint edge_to;
+      if (!cik->is_array_klass()) { // StressReflectiveCode
+        es = PointsToNode::GlobalEscape;
+        edge_to = _phantom_object;
+      } else {
+        es = PointsToNode::NoEscape;
+        edge_to = call_idx;
+        int length = call->in(AllocateNode::ALength)->find_int_con(-1);
+        if (length < 0 || length > EliminateAllocationArraySizeLimit) {
+          // Not scalar replaceable if the length is not constant or too big.
+          ptnode_adr(call_idx)->_scalar_replaceable = false;
+        }
       }
-      set_escape_state(call_idx, PointsToNode::NoEscape);
-      add_pointsto_edge(resproj_idx, call_idx);
+      set_escape_state(call_idx, es);
+      add_pointsto_edge(resproj_idx, edge_to);
       _processed.set(resproj_idx);
       break;
     }
--- a/src/share/vm/opto/graphKit.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/graphKit.cpp	Wed May 19 10:22:39 2010 -0700
@@ -812,10 +812,6 @@
 
   JVMState* youngest_jvms = sync_jvms();
 
-  // Do we need debug info here?  If it is a SafePoint and this method
-  // cannot de-opt, then we do NOT need any debug info.
-  bool full_info = (C->deopt_happens() || call->Opcode() != Op_SafePoint);
-
   // If we are guaranteed to throw, we can prune everything but the
   // input to the current bytecode.
   bool can_prune_locals = false;
@@ -829,10 +825,9 @@
     }
   }
 
-  if (env()->jvmti_can_examine_or_deopt_anywhere()) {
+  if (env()->jvmti_can_access_local_variables()) {
     // At any safepoint, this method can get breakpointed, which would
     // then require an immediate deoptimization.
-    full_info = true;
     can_prune_locals = false;  // do not prune locals
     stack_slots_not_pruned = 0;
   }
@@ -890,7 +885,7 @@
     k = in_jvms->locoff();
     l = in_jvms->loc_size();
     out_jvms->set_locoff(p);
-    if (full_info && !can_prune_locals) {
+    if (!can_prune_locals) {
       for (j = 0; j < l; j++)
         call->set_req(p++, in_map->in(k+j));
     } else {
@@ -901,7 +896,7 @@
     k = in_jvms->stkoff();
     l = in_jvms->sp();
     out_jvms->set_stkoff(p);
-    if (full_info && !can_prune_locals) {
+    if (!can_prune_locals) {
       for (j = 0; j < l; j++)
         call->set_req(p++, in_map->in(k+j));
     } else if (can_prune_locals && stack_slots_not_pruned != 0) {
--- a/src/share/vm/opto/idealGraphPrinter.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/idealGraphPrinter.cpp	Wed May 19 10:22:39 2010 -0700
@@ -151,7 +151,8 @@
     } else {
       // It would be nice if we could shut down cleanly but it should
       // be an error if we can't connect to the visualizer.
-      fatal2("Couldn't connect to visualizer at %s:%d", PrintIdealGraphAddress, PrintIdealGraphPort);
+      fatal(err_msg("Couldn't connect to visualizer at %s:%d",
+                    PrintIdealGraphAddress, PrintIdealGraphPort));
     }
   }
 
--- a/src/share/vm/opto/library_call.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/library_call.cpp	Wed May 19 10:22:39 2010 -0700
@@ -636,6 +636,8 @@
 
   case vmIntrinsics::_reverseBytes_i:
   case vmIntrinsics::_reverseBytes_l:
+  case vmIntrinsics::_reverseBytes_s:
+  case vmIntrinsics::_reverseBytes_c:
     return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());
 
   case vmIntrinsics::_get_AtomicLong:
@@ -807,8 +809,7 @@
   Node* no_ctrl = NULL;
 
   ciInstanceKlass* klass = env()->String_klass();
-  const TypeInstPtr* string_type =
-        TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
+  const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
 
   const TypeAryPtr* value_type =
         TypeAryPtr::make(TypePtr::NotNull,
@@ -881,8 +882,7 @@
   }
 
   ciInstanceKlass* klass = env()->String_klass();
-  const TypeInstPtr* string_type =
-    TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
+  const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
   Node* no_ctrl = NULL;
 
   // Get counts for string and argument
@@ -956,14 +956,16 @@
     }
   }
 
-  const TypeInstPtr* string_type =
-    TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
+  const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
 
   Node* no_ctrl = NULL;
   Node* receiver_cnt;
   Node* argument_cnt;
 
   if (!stopped()) {
+    // Properly cast the argument to String
+    argument = _gvn.transform(new (C, 2) CheckCastPPNode(control(), argument, string_type));
+
     // Get counts for string and argument
     Node* receiver_cnta = basic_plus_adr(receiver, receiver, count_offset);
     receiver_cnt  = make_load(no_ctrl, receiver_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
@@ -1088,7 +1090,7 @@
   const int offset_offset = java_lang_String::offset_offset_in_bytes();
 
   ciInstanceKlass* klass = env()->String_klass();
-  const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
+  const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
   const TypeAryPtr*  source_type = TypeAryPtr::make(TypePtr::NotNull, TypeAry::make(TypeInt::CHAR,TypeInt::POS), ciTypeArrayKlass::make(T_CHAR), true, 0);
 
   Node* sourceOffseta = basic_plus_adr(string_object, string_object, offset_offset);
@@ -1173,7 +1175,9 @@
   Node *receiver = pop();
 
   Node* result;
-  if (Matcher::has_match_rule(Op_StrIndexOf) &&
+  // Disable the use of pcmpestri until it can be guaranteed that
+  // the load doesn't cross into the uncommited space.
+  if (false && Matcher::has_match_rule(Op_StrIndexOf) &&
       UseSSE42Intrinsics) {
     // Generate SSE4.2 version of indexOf
     // We currently only have match rules that use SSE4.2
@@ -1197,8 +1201,7 @@
     Node* no_ctrl  = NULL;
 
     ciInstanceKlass* klass = env()->String_klass();
-    const TypeInstPtr* string_type =
-      TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
+    const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
 
     // Get counts for string and substr
     Node* source_cnta = basic_plus_adr(receiver, receiver, count_offset);
@@ -2010,13 +2013,19 @@
   return true;
 }
 
-//----------------------------inline_reverseBytes_int/long-------------------
+//----------------------------inline_reverseBytes_int/long/char/short-------------------
 // inline Integer.reverseBytes(int)
 // inline Long.reverseBytes(long)
+// inline Character.reverseBytes(char)
+// inline Short.reverseBytes(short)
 bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) {
-  assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l, "not reverse Bytes");
-  if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false;
-  if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL)) return false;
+  assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l ||
+         id == vmIntrinsics::_reverseBytes_c || id == vmIntrinsics::_reverseBytes_s,
+         "not reverse Bytes");
+  if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI))  return false;
+  if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL))  return false;
+  if (id == vmIntrinsics::_reverseBytes_c && !Matcher::has_match_rule(Op_ReverseBytesUS)) return false;
+  if (id == vmIntrinsics::_reverseBytes_s && !Matcher::has_match_rule(Op_ReverseBytesS))  return false;
   _sp += arg_size();        // restore stack pointer
   switch (id) {
   case vmIntrinsics::_reverseBytes_i:
@@ -2025,6 +2034,12 @@
   case vmIntrinsics::_reverseBytes_l:
     push_pair(_gvn.transform(new (C, 2) ReverseBytesLNode(0, pop_pair())));
     break;
+  case vmIntrinsics::_reverseBytes_c:
+    push(_gvn.transform(new (C, 2) ReverseBytesUSNode(0, pop())));
+    break;
+  case vmIntrinsics::_reverseBytes_s:
+    push(_gvn.transform(new (C, 2) ReverseBytesSNode(0, pop())));
+    break;
   default:
     ;
   }
--- a/src/share/vm/opto/macro.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/macro.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1487,11 +1487,11 @@
                                         Node*& contended_phi_rawmem,
                                         Node* old_eden_top, Node* new_eden_top,
                                         Node* length) {
+   enum { fall_in_path = 1, pf_path = 2 };
    if( UseTLAB && AllocatePrefetchStyle == 2 ) {
       // Generate prefetch allocation with watermark check.
       // As an allocation hits the watermark, we will prefetch starting
       // at a "distance" away from watermark.
-      enum { fall_in_path = 1, pf_path = 2 };
 
       Node *pf_region = new (C, 3) RegionNode(3);
       Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY,
@@ -1570,6 +1570,45 @@
       needgc_false = pf_region;
       contended_phi_rawmem = pf_phi_rawmem;
       i_o = pf_phi_abio;
+   } else if( UseTLAB && AllocatePrefetchStyle == 3 ) {
+      // Insert a prefetch for each allocation only on the fast-path
+      Node *pf_region = new (C, 3) RegionNode(3);
+      Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY,
+                                                TypeRawPtr::BOTTOM );
+
+      // Generate several prefetch instructions only for arrays.
+      uint lines = (length != NULL) ? AllocatePrefetchLines : 1;
+      uint step_size = AllocatePrefetchStepSize;
+      uint distance = AllocatePrefetchDistance;
+
+      // Next cache address.
+      Node *cache_adr = new (C, 4) AddPNode(old_eden_top, old_eden_top,
+                                            _igvn.MakeConX(distance));
+      transform_later(cache_adr);
+      cache_adr = new (C, 2) CastP2XNode(needgc_false, cache_adr);
+      transform_later(cache_adr);
+      Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1));
+      cache_adr = new (C, 3) AndXNode(cache_adr, mask);
+      transform_later(cache_adr);
+      cache_adr = new (C, 2) CastX2PNode(cache_adr);
+      transform_later(cache_adr);
+
+      // Prefetch
+      Node *prefetch = new (C, 3) PrefetchWriteNode( contended_phi_rawmem, cache_adr );
+      prefetch->set_req(0, needgc_false);
+      transform_later(prefetch);
+      contended_phi_rawmem = prefetch;
+      Node *prefetch_adr;
+      distance = step_size;
+      for ( uint i = 1; i < lines; i++ ) {
+        prefetch_adr = new (C, 4) AddPNode( cache_adr, cache_adr,
+                                            _igvn.MakeConX(distance) );
+        transform_later(prefetch_adr);
+        prefetch = new (C, 3) PrefetchWriteNode( contended_phi_rawmem, prefetch_adr );
+        transform_later(prefetch);
+        distance += step_size;
+        contended_phi_rawmem = prefetch;
+      }
    } else if( AllocatePrefetchStyle > 0 ) {
       // Insert a prefetch for each allocation only on the fast-path
       Node *prefetch_adr;
--- a/src/share/vm/opto/memnode.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/memnode.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1244,5 +1244,5 @@
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return NotAMachineReg; }
   virtual uint match_edge(uint idx) const { return idx==2; }
-  virtual const Type *bottom_type() const { return Type::ABIO; }
+  virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
 };
--- a/src/share/vm/opto/node.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/node.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1188,7 +1188,7 @@
         Node* use = dead->last_out(k);
         igvn->hash_delete(use);       // Yank from hash table prior to mod
         if (use->in(0) == dead) {     // Found another dead node
-          assert (!use->is_Con(), "Control for Con node should be Root node.")
+          assert (!use->is_Con(), "Control for Con node should be Root node.");
           use->set_req(0, top);       // Cut dead edge to prevent processing
           nstack.push(use);           // the dead node again.
         } else {                      // Else found a not-dead user
--- a/src/share/vm/opto/output.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/output.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1231,7 +1231,7 @@
         if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset ) {
           padding = nop_size;
         }
-        assert( labels_not_set || padding == 0, "instruction should already be aligned")
+        assert( labels_not_set || padding == 0, "instruction should already be aligned");
 
         if(padding > 0) {
           assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
@@ -2407,7 +2407,7 @@
       n->dump();
       tty->print_cr("...");
       prior_use->dump();
-      assert_msg(edge_from_to(prior_use,n),msg);
+      assert(edge_from_to(prior_use,n),msg);
     }
     _reg_node.map(def,NULL); // Kill live USEs
   }
@@ -2446,11 +2446,11 @@
       OptoReg::Name reg_lo = _regalloc->get_reg_first(def);
       OptoReg::Name reg_hi = _regalloc->get_reg_second(def);
       if( OptoReg::is_valid(reg_lo) ) {
-        assert_msg(!_reg_node[reg_lo] || edge_from_to(_reg_node[reg_lo],def), msg );
+        assert(!_reg_node[reg_lo] || edge_from_to(_reg_node[reg_lo],def), msg);
         _reg_node.map(reg_lo,n);
       }
       if( OptoReg::is_valid(reg_hi) ) {
-        assert_msg(!_reg_node[reg_hi] || edge_from_to(_reg_node[reg_hi],def), msg );
+        assert(!_reg_node[reg_hi] || edge_from_to(_reg_node[reg_hi],def), msg);
         _reg_node.map(reg_hi,n);
       }
     }
--- a/src/share/vm/opto/parse1.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/parse1.cpp	Wed May 19 10:22:39 2010 -0700
@@ -280,7 +280,13 @@
       continue;
     }
     // Construct code to access the appropriate local.
-    Node *value = fetch_interpreter_state(index, type->basic_type(), locals_addr, osr_buf);
+    BasicType bt = type->basic_type();
+    if (type == TypePtr::NULL_PTR) {
+      // Ptr types are mixed together with T_ADDRESS but NULL is
+      // really for T_OBJECT types so correct it.
+      bt = T_OBJECT;
+    }
+    Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf);
     set_local(index, value);
   }
 
--- a/src/share/vm/opto/phaseX.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/phaseX.hpp	Wed May 19 10:22:39 2010 -0700
@@ -310,7 +310,7 @@
   void dump_nodes_and_types_recur( const Node *n, uint depth, bool only_ctrl, VectorSet &visited);
 
   uint   _count_progress;       // For profiling, count transforms that make progress
-  void   set_progress()        { ++_count_progress; assert( allow_progress(),"No progress allowed during verification") }
+  void   set_progress()        { ++_count_progress; assert( allow_progress(),"No progress allowed during verification"); }
   void   clear_progress()      { _count_progress = 0; }
   uint   made_progress() const { return _count_progress; }
 
--- a/src/share/vm/opto/runtime.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/runtime.cpp	Wed May 19 10:22:39 2010 -0700
@@ -865,7 +865,7 @@
     thread->set_exception_stack_size(0);
 
     // Check if the exception PC is a MethodHandle call site.
-    thread->set_is_method_handle_exception(nm->is_method_handle_return(pc));
+    thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
   }
 
   // Restore correct return pc.  Was saved above.
--- a/src/share/vm/opto/subnode.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/subnode.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -509,3 +509,23 @@
   const Type *bottom_type() const { return TypeLong::LONG; }
   virtual uint ideal_reg() const { return Op_RegL; }
 };
+
+//-------------------------------ReverseBytesUSNode--------------------------------
+// reverse bytes of an unsigned short / char
+class ReverseBytesUSNode : public Node {
+public:
+  ReverseBytesUSNode(Node *c, Node *in1) : Node(c, in1) {}
+  virtual int Opcode() const;
+  const Type *bottom_type() const { return TypeInt::CHAR; }
+  virtual uint ideal_reg() const { return Op_RegI; }
+};
+
+//-------------------------------ReverseBytesSNode--------------------------------
+// reverse bytes of a short
+class ReverseBytesSNode : public Node {
+public:
+  ReverseBytesSNode(Node *c, Node *in1) : Node(c, in1) {}
+  virtual int Opcode() const;
+  const Type *bottom_type() const { return TypeInt::SHORT; }
+  virtual uint ideal_reg() const { return Op_RegI; }
+};
--- a/src/share/vm/opto/type.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/opto/type.cpp	Wed May 19 10:22:39 2010 -0700
@@ -2545,12 +2545,15 @@
       ftip->is_loaded() &&  ftip->klass()->is_interface() &&
       ktip->is_loaded() && !ktip->klass()->is_interface()) {
     // Happens in a CTW of rt.jar, 320-341, no extra flags
+    assert(!ftip->klass_is_exact(), "interface could not be exact");
     return ktip->cast_to_ptr_type(ftip->ptr());
   }
+  // Interface klass type could be exact in opposite to interface type,
+  // return it here instead of incorrect Constant ptr J/L/Object (6894807).
   if (ftkp != NULL && ktkp != NULL &&
       ftkp->is_loaded() &&  ftkp->klass()->is_interface() &&
+      !ftkp->klass_is_exact() && // Keep exact interface klass
       ktkp->is_loaded() && !ktkp->klass()->is_interface()) {
-    // Happens in a CTW of rt.jar, 320-341, no extra flags
     return ktkp->cast_to_ptr_type(ftkp->ptr());
   }
 
--- a/src/share/vm/prims/forte.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/prims/forte.cpp	Wed May 19 10:22:39 2010 -0700
@@ -647,7 +647,7 @@
 void Forte::register_stub(const char* name, address start, address end) {
 #if !defined(_WINDOWS) && !defined(IA64)
   assert(pointer_delta(end, start, sizeof(jbyte)) < INT_MAX,
-    "Code size exceeds maximum range")
+         "Code size exceeds maximum range");
 
   collector_func_load((char*)name, NULL, NULL, start,
     pointer_delta(end, start, sizeof(jbyte)), 0, NULL);
--- a/src/share/vm/prims/jni.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/prims/jni.cpp	Wed May 19 10:22:39 2010 -0700
@@ -3311,6 +3311,7 @@
     OrderAccess::release_store(&vm_created, 0);
   }
 
+  NOT_PRODUCT(test_error_handler(ErrorHandlerTest));
   return result;
 }
 
--- a/src/share/vm/prims/jvmtiExport.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/prims/jvmtiExport.cpp	Wed May 19 10:22:39 2010 -0700
@@ -270,7 +270,6 @@
 int               JvmtiExport::_field_modification_count                  = 0;
 
 bool              JvmtiExport::_can_access_local_variables                = false;
-bool              JvmtiExport::_can_examine_or_deopt_anywhere             = false;
 bool              JvmtiExport::_can_hotswap_or_post_breakpoint            = false;
 bool              JvmtiExport::_can_modify_any_class                      = false;
 bool              JvmtiExport::_can_walk_any_space                        = false;
--- a/src/share/vm/prims/jvmtiExport.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/prims/jvmtiExport.hpp	Wed May 19 10:22:39 2010 -0700
@@ -58,7 +58,6 @@
   static int         _field_modification_count;
 
   static bool        _can_access_local_variables;
-  static bool        _can_examine_or_deopt_anywhere;
   static bool        _can_hotswap_or_post_breakpoint;
   static bool        _can_modify_any_class;
   static bool        _can_walk_any_space;
@@ -112,7 +111,6 @@
 
   // these should only be called by the friend class
   friend class JvmtiManageCapabilities;
-  inline static void set_can_examine_or_deopt_anywhere(bool on)        { _can_examine_or_deopt_anywhere = (on != 0); }
   inline static void set_can_modify_any_class(bool on)                 { _can_modify_any_class = (on != 0); }
   inline static void set_can_access_local_variables(bool on)           { _can_access_local_variables = (on != 0); }
   inline static void set_can_hotswap_or_post_breakpoint(bool on)       { _can_hotswap_or_post_breakpoint = (on != 0); }
@@ -220,7 +218,6 @@
   static void enter_live_phase();
 
   // ------ can_* conditions (below) are set at OnLoad and never changed ------------
-  inline static bool can_examine_or_deopt_anywhere()              { return _can_examine_or_deopt_anywhere; }
   inline static bool can_modify_any_class()                       { return _can_modify_any_class; }
   inline static bool can_access_local_variables()                 { return _can_access_local_variables; }
   inline static bool can_hotswap_or_post_breakpoint()             { return _can_hotswap_or_post_breakpoint; }
--- a/src/share/vm/prims/jvmtiManageCapabilities.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/prims/jvmtiManageCapabilities.cpp	Wed May 19 10:22:39 2010 -0700
@@ -332,16 +332,6 @@
   }
 
   JvmtiExport::set_can_get_source_debug_extension(avail.can_get_source_debug_extension);
-  JvmtiExport::set_can_examine_or_deopt_anywhere(
-    avail.can_generate_breakpoint_events ||
-    interp_events ||
-    avail.can_redefine_classes ||
-    avail.can_retransform_classes ||
-    avail.can_access_local_variables ||
-    avail.can_get_owned_monitor_info ||
-    avail.can_get_current_contended_monitor ||
-    avail.can_get_monitor_info ||
-    avail.can_get_owned_monitor_stack_depth_info);
   JvmtiExport::set_can_maintain_original_method_order(avail.can_maintain_original_method_order);
   JvmtiExport::set_can_post_interpreter_events(interp_events);
   JvmtiExport::set_can_hotswap_or_post_breakpoint(
@@ -353,10 +343,13 @@
     avail.can_generate_all_class_hook_events);
   JvmtiExport::set_can_walk_any_space(
     avail.can_tag_objects);   // disable sharing in onload phase
+  // This controls whether the compilers keep extra locals live to
+  // improve the debugging experience so only set them if the selected
+  // capabilities look like a debugger.
   JvmtiExport::set_can_access_local_variables(
-    avail.can_access_local_variables  ||
-    avail.can_redefine_classes ||
-    avail.can_retransform_classes);
+    avail.can_access_local_variables ||
+    avail.can_generate_breakpoint_events ||
+    avail.can_generate_frame_pop_events);
   JvmtiExport::set_can_post_on_exceptions(
     avail.can_generate_exception_events ||
     avail.can_generate_frame_pop_events ||
--- a/src/share/vm/prims/methodHandleWalk.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/prims/methodHandleWalk.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1173,9 +1173,9 @@
   // has no receiver, normal MH calls do.
   int flags_bits;
   if (for_invokedynamic())
-    flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_STATIC);
+    flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_SYNTHETIC | JVM_ACC_STATIC);
   else
-    flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL);
+    flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_SYNTHETIC);
 
   bool is_conc_safe = true;
   methodOop m_oop = oopFactory::new_method(bytecode_length(),
@@ -1217,6 +1217,7 @@
   }
 #endif //PRODUCT
 
+  assert(m->is_method_handle_adapter(), "must be recognized as an adapter");
   return m;
 }
 
--- a/src/share/vm/prims/methodHandles.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/prims/methodHandles.cpp	Wed May 19 10:22:39 2010 -0700
@@ -366,6 +366,13 @@
   VM_INDEX_UNINITIALIZED = sun_dyn_MemberName::VM_INDEX_UNINITIALIZED
 };
 
+Handle MethodHandles::new_MemberName(TRAPS) {
+  Handle empty;
+  instanceKlassHandle k(THREAD, SystemDictionary::MemberName_klass());
+  if (!k->is_initialized())  k->initialize(CHECK_(empty));
+  return Handle(THREAD, k->allocate_instance(THREAD));
+}
+
 void MethodHandles::init_MemberName(oop mname_oop, oop target_oop) {
   if (target_oop->klass() == SystemDictionary::reflect_Field_klass()) {
     oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder()
@@ -394,16 +401,18 @@
   sun_dyn_MemberName::set_vmtarget(mname_oop, vmtarget);
   sun_dyn_MemberName::set_vmindex(mname_oop,  vmindex);
   sun_dyn_MemberName::set_flags(mname_oop,    flags);
+  sun_dyn_MemberName::set_clazz(mname_oop,    Klass::cast(m->method_holder())->java_mirror());
 }
 
 void MethodHandles::init_MemberName(oop mname_oop, klassOop field_holder, AccessFlags mods, int offset) {
   int flags = (IS_FIELD | (jushort)( mods.as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS ));
   oop vmtarget = field_holder;
-  int vmindex  = offset;  // implies no info yet
+  int vmindex  = offset;  // determines the field uniquely when combined with static bit
   assert(vmindex != VM_INDEX_UNINITIALIZED, "bad alias on vmindex");
   sun_dyn_MemberName::set_vmtarget(mname_oop, vmtarget);
   sun_dyn_MemberName::set_vmindex(mname_oop,  vmindex);
   sun_dyn_MemberName::set_flags(mname_oop,    flags);
+  sun_dyn_MemberName::set_clazz(mname_oop,    Klass::cast(field_holder)->java_mirror());
 }
 
 
@@ -466,16 +475,25 @@
   if (name.is_null())  return;  // no such name
   name_str = NULL;  // safety
 
+  Handle polymorphic_method_type;
+  bool polymorphic_signature = false;
+  if ((flags & ALL_KINDS) == IS_METHOD &&
+      (defc() == SystemDictionary::InvokeDynamic_klass() ||
+       (defc() == SystemDictionary::MethodHandle_klass() &&
+        methodOopDesc::is_method_handle_invoke_name(name()))))
+    polymorphic_signature = true;
+
   // convert the external string or reflective type to an internal signature
-  bool force_signature = (name() == vmSymbols::invoke_name());
   symbolHandle type; {
     symbolOop type_sym = NULL;
     if (java_dyn_MethodType::is_instance(type_str)) {
-      type_sym = java_dyn_MethodType::as_signature(type_str, force_signature, CHECK);
+      type_sym = java_dyn_MethodType::as_signature(type_str, polymorphic_signature, CHECK);
+      if (polymorphic_signature)
+        polymorphic_method_type = Handle(THREAD, type_str);  //preserve exactly
     } else if (java_lang_Class::is_instance(type_str)) {
-      type_sym = java_lang_Class::as_signature(type_str, force_signature, CHECK);
+      type_sym = java_lang_Class::as_signature(type_str, false, CHECK);
     } else if (java_lang_String::is_instance(type_str)) {
-      if (force_signature) {
+      if (polymorphic_signature) {
         type     = java_lang_String::as_symbol(type_str, CHECK);
       } else {
         type_sym = java_lang_String::as_symbol_or_null(type_str);
@@ -508,7 +526,7 @@
         }
         if (HAS_PENDING_EXCEPTION) {
           CLEAR_PENDING_EXCEPTION;
-          return;
+          break;  // go to second chance
         }
       }
       methodHandle m = result.resolved_method();
@@ -582,8 +600,42 @@
       sun_dyn_MemberName::set_modifiers(mname(), mods);
       return;
     }
+  default:
+    THROW_MSG(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format");
   }
-  THROW_MSG(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format");
+
+  // Second chance.
+  if (polymorphic_method_type.not_null()) {
+    // Look on a non-null class loader.
+    Handle cur_class_loader;
+    const int nptypes = java_dyn_MethodType::ptype_count(polymorphic_method_type());
+    for (int i = 0; i <= nptypes; i++) {
+      oop type_mirror;
+      if (i < nptypes)  type_mirror = java_dyn_MethodType::ptype(polymorphic_method_type(), i);
+      else              type_mirror = java_dyn_MethodType::rtype(polymorphic_method_type());
+      klassOop example_type = java_lang_Class::as_klassOop(type_mirror);
+      if (example_type == NULL)  continue;
+      oop class_loader = Klass::cast(example_type)->class_loader();
+      if (class_loader == NULL || class_loader == cur_class_loader())  continue;
+      cur_class_loader = Handle(THREAD, class_loader);
+      methodOop m = SystemDictionary::find_method_handle_invoke(name,
+                                                                type,
+                                                                KlassHandle(THREAD, example_type),
+                                                                THREAD);
+      if (HAS_PENDING_EXCEPTION) {
+        CLEAR_PENDING_EXCEPTION;
+        m = NULL;
+        // try again with a different class loader...
+      }
+      if (m != NULL) {
+        int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS);
+        sun_dyn_MemberName::set_vmtarget(mname(),  m);
+        sun_dyn_MemberName::set_vmindex(mname(),   m->vtable_index());
+        sun_dyn_MemberName::set_modifiers(mname(), mods);
+        return;
+      }
+    }
+  }
 }
 
 // Conversely, a member name which is only initialized from JVM internals
@@ -775,6 +827,20 @@
 }
 
 
+// Decode this java.lang.Class object into an instanceKlass, if possible.
+// Throw IAE if not
+instanceKlassHandle MethodHandles::resolve_instance_klass(oop java_mirror_oop, TRAPS) {
+  instanceKlassHandle empty;
+  klassOop caller = NULL;
+  if (java_lang_Class::is_instance(java_mirror_oop)) {
+    caller = java_lang_Class::as_klassOop(java_mirror_oop);
+  }
+  if (caller == NULL || !Klass::cast(caller)->oop_is_instance()) {
+    THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), "not a class", empty);
+  }
+  return instanceKlassHandle(THREAD, caller);
+}
+
 
 
 // Decode the vmtarget field of a method handle.
@@ -970,6 +1036,13 @@
       pnum += 1;
       mnum += 1;
     }
+    klassOop  pklass = NULL;
+    BasicType ptype  = T_OBJECT;
+    if (ptype_oop != NULL)
+      ptype = java_lang_Class::as_BasicType(ptype_oop, &pklass);
+    else
+      // null does not match any non-reference; use Object to report the error
+      pklass = SystemDictionary::Object_klass();
     klassOop  mklass = NULL;
     BasicType mtype  = ss.type();
     if (mtype == T_ARRAY)  mtype = T_OBJECT; // fold all refs to T_OBJECT
@@ -978,21 +1051,22 @@
         // null matches any reference
         continue;
       }
+      KlassHandle pklass_handle(THREAD, pklass); pklass = NULL;
       // If we fail to resolve types at this point, we will throw an error.
       symbolOop    name_oop = ss.as_symbol(CHECK);
       symbolHandle name(THREAD, name_oop);
       instanceKlass* mk = instanceKlass::cast(m->method_holder());
       Handle loader(THREAD, mk->class_loader());
       Handle domain(THREAD, mk->protection_domain());
-      mklass = SystemDictionary::resolve_or_fail(name, loader, domain,
-                                                 true, CHECK);
+      mklass = SystemDictionary::resolve_or_null(name, loader, domain, CHECK);
+      pklass = pklass_handle();
+      if (mklass == NULL && pklass != NULL &&
+          Klass::cast(pklass)->name() == name() &&
+          m->is_method_handle_invoke()) {
+        // Assume a match.  We can't really decode the signature of MH.invoke*.
+        continue;
+      }
     }
-    if (ptype_oop == NULL) {
-      // null does not match any non-reference; use Object to report the error
-      ptype_oop = object_java_mirror();
-    }
-    klassOop  pklass = NULL;
-    BasicType ptype  = java_lang_Class::as_BasicType(ptype_oop, &pklass);
     if (!ss.at_return_type()) {
       err = check_argument_type_change(ptype, pklass, mtype, mklass, mnum);
     } else {
@@ -2115,31 +2189,26 @@
     KlassHandle caller(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh)));
     // If this were a bytecode, the first access check would be against
     // the "reference class" mentioned in the CONSTANT_Methodref.
-    // For that class, we use the defining class of m,
-    // or a more specific receiver limit if available.
-    klassOop reference_klass = m->method_holder();  // OK approximation
-    if (receiver_limit != NULL && receiver_limit != reference_klass) {
-      if (!Klass::cast(receiver_limit)->is_subtype_of(reference_klass))
-        THROW_MSG(vmSymbols::java_lang_InternalError(), "receiver limit out of bounds");  // Java code bug
-      reference_klass = receiver_limit;
-    }
-    // Emulate LinkResolver::check_klass_accessability.
-    if (!Reflection::verify_class_access(caller->as_klassOop(),
-                                         reference_klass,
-                                         true)) {
-      THROW_MSG(vmSymbols::java_lang_InternalError(), Klass::cast(m->method_holder())->external_name());
-    }
+    // We don't know at this point which class that was, and if we
+    // check against m.method_holder we might get the wrong answer.
+    // So we just make sure to handle this check when the resolution
+    // happens, when we call resolve_MemberName.
+    //
+    // (A public class can inherit public members from private supers,
+    // and it would be wrong to check access against the private super
+    // if the original symbolic reference was against the public class.)
+    //
     // If there were a bytecode, the next step would be to lookup the method
     // in the reference class, then then check the method's access bits.
     // Emulate LinkResolver::check_method_accessability.
     klassOop resolved_klass = m->method_holder();
     if (!Reflection::verify_field_access(caller->as_klassOop(),
-                                         resolved_klass, reference_klass,
+                                         resolved_klass, resolved_klass,
                                          m->access_flags(),
                                          true)) {
       // %%% following cutout belongs in Reflection::verify_field_access?
       bool same_pm = Reflection::is_same_package_member(caller->as_klassOop(),
-                                                        reference_klass, THREAD);
+                                                        resolved_klass, THREAD);
       if (!same_pm) {
         THROW_MSG(vmSymbols::java_lang_InternalError(), m->name_and_sig_as_C_string());
       }
@@ -2244,6 +2313,8 @@
   case MethodHandles::GC_JVM_STACK_MOVE_UNIT:
     // return number of words per slot, signed according to stack direction
     return MethodHandles::stack_move_unit();
+  case MethodHandles::GC_CONV_OP_IMPLEMENTED_MASK:
+    return MethodHandles::adapter_conversion_ops_supported_mask();
   }
   return 0;
 }
@@ -2342,7 +2413,22 @@
 JVM_ENTRY(void, MHI_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) {
   if (mname_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
   Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh));
-  // %%% take caller into account!
+
+  // The trusted Java code that calls this method should already have performed
+  // access checks on behalf of the given caller.  But, we can verify this.
+  if (VerifyMethodHandles && caller_jh != NULL) {
+    klassOop reference_klass = java_lang_Class::as_klassOop(sun_dyn_MemberName::clazz(mname()));
+    if (reference_klass != NULL) {
+      // Emulate LinkResolver::check_klass_accessability.
+      klassOop caller = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh));
+      if (!Reflection::verify_class_access(caller,
+                                           reference_klass,
+                                           true)) {
+        THROW_MSG(vmSymbols::java_lang_InternalError(), Klass::cast(reference_klass)->external_name());
+      }
+    }
+  }
+
   MethodHandles::resolve_MemberName(mname, CHECK);
 }
 JVM_END
@@ -2387,12 +2473,48 @@
 }
 JVM_END
 
+JVM_ENTRY(void, MHI_registerBootstrap(JNIEnv *env, jobject igcls, jclass caller_jh, jobject bsm_jh)) {
+  instanceKlassHandle ik = MethodHandles::resolve_instance_klass(caller_jh, THREAD);
+  ik->link_class(CHECK);
+  if (!java_dyn_MethodHandle::is_instance(JNIHandles::resolve(bsm_jh))) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "method handle");
+  }
+  const char* err = NULL;
+  if (ik->is_initialized() || ik->is_in_error_state()) {
+    err = "too late: class is already initialized";
+  } else {
+    ObjectLocker ol(ik, THREAD);  // note:  this should be a recursive lock
+    if (ik->is_not_initialized() ||
+        (ik->is_being_initialized() && ik->is_reentrant_initialization(THREAD))) {
+      if (ik->bootstrap_method() != NULL) {
+        err = "class is already equipped with a bootstrap method";
+      } else {
+        ik->set_bootstrap_method(JNIHandles::resolve_non_null(bsm_jh));
+        err = NULL;
+      }
+    } else {
+      err = "class is already initialized";
+      if (ik->is_being_initialized())
+        err = "class is already being initialized in a different thread";
+    }
+  }
+  if (err != NULL) {
+    THROW_MSG(vmSymbols::java_lang_IllegalStateException(), err);
+  }
+}
+JVM_END
 
-JVM_ENTRY(void, MH_linkCallSite(JNIEnv *env, jobject igcls, jobject site_jh, jobject target_jh)) {
+JVM_ENTRY(jobject, MHI_getBootstrap(JNIEnv *env, jobject igcls, jclass caller_jh)) {
+  instanceKlassHandle ik = MethodHandles::resolve_instance_klass(caller_jh, THREAD);
+  return JNIHandles::make_local(THREAD, ik->bootstrap_method());
+}
+JVM_END
+
+JVM_ENTRY(void, MHI_setCallSiteTarget(JNIEnv *env, jobject igcls, jobject site_jh, jobject target_jh)) {
   // No special action required, yet.
   oop site_oop = JNIHandles::resolve(site_jh);
-  if (site_oop == NULL || site_oop->klass() != SystemDictionary::CallSite_klass())
-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "call site");
+  if (!java_dyn_CallSite::is_instance(site_oop))
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "not a CallSite");
   java_dyn_CallSite::set_target(site_oop, JNIHandles::resolve(target_jh));
 }
 JVM_END
@@ -2442,7 +2564,9 @@
 
 // More entry points specifically for EnableInvokeDynamic.
 static JNINativeMethod methods2[] = {
-  {CC"linkCallSite",            CC"("CST MH")V",                FN_PTR(MH_linkCallSite)}
+  {CC"registerBootstrap",       CC"("CLS MH")V",                FN_PTR(MHI_registerBootstrap)},
+  {CC"getBootstrap",            CC"("CLS")"MH,                  FN_PTR(MHI_getBootstrap)},
+  {CC"setCallSiteTarget",       CC"("CST MH")V",                FN_PTR(MHI_setCallSiteTarget)}
 };
 
 
--- a/src/share/vm/prims/methodHandles.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/prims/methodHandles.hpp	Wed May 19 10:22:39 2010 -0700
@@ -163,7 +163,7 @@
     default: ShouldNotReachHere();
     }
     // Return the size of the stack slots to move in bytes.
-    swap_bytes = swap_slots * Interpreter::stackElementSize();
+    swap_bytes = swap_slots * Interpreter::stackElementSize;
   }
 
   static int get_ek_adapter_opt_spread_info(EntryKind ek) {
@@ -216,10 +216,13 @@
     return (conv >> CONV_VMINFO_SHIFT) & CONV_VMINFO_MASK;
   }
 
+  // Bit mask of conversion_op values.  May vary by platform.
+  static int adapter_conversion_ops_supported_mask();
+
   // Offset in words that the interpreter stack pointer moves when an argument is pushed.
   // The stack_move value must always be a multiple of this.
   static int stack_move_unit() {
-    return frame::interpreter_frame_expression_stack_direction() * Interpreter::stackElementWords();
+    return frame::interpreter_frame_expression_stack_direction() * Interpreter::stackElementWords;
   }
 
   enum { CONV_VMINFO_SIGN_FLAG = 0x80 };
@@ -262,8 +265,9 @@
   // working with member names
   static void resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type
   static void expand_MemberName(Handle mname, int suppress, TRAPS);  // expand defc/name/type if missing
+  static Handle new_MemberName(TRAPS);  // must be followed by init_MemberName
   static void init_MemberName(oop mname_oop, oop target); // compute vmtarget/vmindex from target
-  static void init_MemberName(oop mname_oop, methodOop m, bool do_dispatch);
+  static void init_MemberName(oop mname_oop, methodOop m, bool do_dispatch = true);
   static void init_MemberName(oop mname_oop, klassOop field_holder, AccessFlags mods, int offset);
   static int find_MemberNames(klassOop k, symbolOop name, symbolOop sig,
                               int mflags, klassOop caller,
@@ -300,6 +304,7 @@
     // format of query to getConstant:
     GC_JVM_PUSH_LIMIT = 0,
     GC_JVM_STACK_MOVE_UNIT = 1,
+    GC_CONV_OP_IMPLEMENTED_MASK = 2,
 
     // format of result from getTarget / encode_target:
     ETF_HANDLE_OR_METHOD_NAME = 0, // all available data (immediate MH or method)
@@ -311,6 +316,11 @@
   static oop encode_target(Handle mh, int format, TRAPS); // report vmtarget (to Java code)
   static bool class_cast_needed(klassOop src, klassOop dst);
 
+  static instanceKlassHandle resolve_instance_klass(oop    java_mirror_oop, TRAPS);
+  static instanceKlassHandle resolve_instance_klass(jclass java_mirror_jh,  TRAPS) {
+    return resolve_instance_klass(JNIHandles::resolve(java_mirror_jh), THREAD);
+  }
+
  private:
   // These checkers operate on a pair of whole MethodTypes:
   static const char* check_method_type_change(oop src_mtype, int src_beg, int src_end,
@@ -430,12 +440,12 @@
                                RegisterOrConstant arg_slots,
                                int arg_mask,
                                Register argslot_reg,
-                               Register temp_reg, Register temp2_reg);
+                               Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
 
   static void remove_arg_slots(MacroAssembler* _masm,
                                RegisterOrConstant arg_slots,
                                Register argslot_reg,
-                               Register temp_reg, Register temp2_reg);
+                               Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
 };
 
 
--- a/src/share/vm/runtime/arguments.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1353,6 +1353,16 @@
       MarkStackSize / K, MarkStackSizeMax / K);
     tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
   }
+
+  if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
+    // In G1, we want the default GC overhead goal to be higher than
+    // say in PS. So we set it here to 10%. Otherwise the heap might
+    // be expanded more aggressively than we would like it to. In
+    // fact, even 10% seems to not be high enough in some cases
+    // (especially small GC stress tests that the main thing they do
+    // is allocation). We might consider increase it further.
+    FLAG_SET_DEFAULT(GCTimeRatio, 9);
+  }
 }
 
 void Arguments::set_heap_size() {
@@ -2857,12 +2867,6 @@
   }
 #endif // _LP64
 
-  // MethodHandles code does not support TaggedStackInterpreter.
-  if (EnableMethodHandles && TaggedStackInterpreter) {
-    warning("TaggedStackInterpreter is not supported by MethodHandles code.  Disabling TaggedStackInterpreter.");
-    TaggedStackInterpreter = false;
-  }
-
   // Check the GC selections again.
   if (!check_gc_consistency()) {
     return JNI_EINVAL;
@@ -2905,11 +2909,6 @@
   LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
 #endif // CC_INTERP
 
-#ifdef ZERO
-  // Clear flags not supported by Zero
-  FLAG_SET_DEFAULT(TaggedStackInterpreter, false);
-#endif // ZERO
-
 #ifdef COMPILER2
   if (!UseBiasedLocking || EmitSync != 0) {
     UseOptoBiasInlining = false;
--- a/src/share/vm/runtime/frame.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/frame.cpp	Wed May 19 10:22:39 2010 -0700
@@ -468,42 +468,16 @@
   return &((*interpreter_frame_locals_addr())[n]);
 }
 
-frame::Tag frame::interpreter_frame_local_tag(int index) const {
-  const int n = Interpreter::local_tag_offset_in_bytes(index)/wordSize;
-  return (Tag)(*interpreter_frame_locals_addr()) [n];
-}
-
-void frame::interpreter_frame_set_local_tag(int index, Tag tag) const {
-  const int n = Interpreter::local_tag_offset_in_bytes(index)/wordSize;
-  (*interpreter_frame_locals_addr())[n] = (intptr_t)tag;
-}
-
 intptr_t* frame::interpreter_frame_expression_stack_at(jint offset) const {
   const int i = offset * interpreter_frame_expression_stack_direction();
-  const int n = ((i * Interpreter::stackElementSize()) +
-                 Interpreter::value_offset_in_bytes())/wordSize;
+  const int n = i * Interpreter::stackElementWords;
   return &(interpreter_frame_expression_stack()[n]);
 }
 
-frame::Tag frame::interpreter_frame_expression_stack_tag(jint offset) const {
-  const int i = offset * interpreter_frame_expression_stack_direction();
-  const int n = ((i * Interpreter::stackElementSize()) +
-                 Interpreter::tag_offset_in_bytes())/wordSize;
-  return (Tag)(interpreter_frame_expression_stack()[n]);
-}
-
-void frame::interpreter_frame_set_expression_stack_tag(jint offset,
-                                                       Tag tag) const {
-  const int i = offset * interpreter_frame_expression_stack_direction();
-  const int n = ((i * Interpreter::stackElementSize()) +
-                 Interpreter::tag_offset_in_bytes())/wordSize;
-  interpreter_frame_expression_stack()[n] = (intptr_t)tag;
-}
-
 jint frame::interpreter_frame_expression_stack_size() const {
   // Number of elements on the interpreter expression stack
   // Callers should span by stackElementWords
-  int element_size = Interpreter::stackElementWords();
+  int element_size = Interpreter::stackElementWords;
   if (frame::interpreter_frame_expression_stack_direction() < 0) {
     return (interpreter_frame_expression_stack() -
             interpreter_frame_tos_address() + 1)/element_size;
@@ -585,20 +559,12 @@
   for (i = 0; i < interpreter_frame_method()->max_locals(); i++ ) {
     intptr_t x = *interpreter_frame_local_at(i);
     st->print(" - local  [" INTPTR_FORMAT "]", x);
-    if (TaggedStackInterpreter) {
-      Tag x = interpreter_frame_local_tag(i);
-      st->print(" - local tag [" INTPTR_FORMAT "]", x);
-    }
     st->fill_to(23);
     st->print_cr("; #%d", i);
   }
   for (i = interpreter_frame_expression_stack_size() - 1; i >= 0; --i ) {
     intptr_t x = *interpreter_frame_expression_stack_at(i);
     st->print(" - stack  [" INTPTR_FORMAT "]", x);
-    if (TaggedStackInterpreter) {
-      Tag x = interpreter_frame_expression_stack_tag(i);
-      st->print(" - stack tag [" INTPTR_FORMAT "]", x);
-    }
     st->fill_to(23);
     st->print_cr("; #%d", i);
   }
@@ -844,7 +810,7 @@
   }
 
   void oop_at_offset_do(int offset) {
-    assert (offset >= 0, "illegal offset")
+    assert (offset >= 0, "illegal offset");
     oop* addr = (oop*) _fr->entry_frame_argument_at(offset);
     _f->do_oop(addr);
   }
@@ -950,103 +916,19 @@
     }
   }
 
-  if (TaggedStackInterpreter) {
-    // process locals & expression stack
-    InterpreterOopMap *mask = NULL;
-#ifdef ASSERT
-    InterpreterOopMap oopmap_mask;
-    OopMapCache::compute_one_oop_map(m, bci, &oopmap_mask);
-    mask = &oopmap_mask;
-#endif // ASSERT
-    oops_interpreted_locals_do(f, max_locals, mask);
-    oops_interpreted_expressions_do(f, signature, has_receiver,
-                                    m->max_stack(),
-                                    max_locals, mask);
+  InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
+
+  // process locals & expression stack
+  InterpreterOopMap mask;
+  if (query_oop_map_cache) {
+    m->mask_for(bci, &mask);
   } else {
-    InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
-
-    // process locals & expression stack
-    InterpreterOopMap mask;
-    if (query_oop_map_cache) {
-      m->mask_for(bci, &mask);
-    } else {
-      OopMapCache::compute_one_oop_map(m, bci, &mask);
-    }
-    mask.iterate_oop(&blk);
+    OopMapCache::compute_one_oop_map(m, bci, &mask);
   }
+  mask.iterate_oop(&blk);
 }
 
 
-void frame::oops_interpreted_locals_do(OopClosure *f,
-                                      int max_locals,
-                                      InterpreterOopMap *mask) {
-  // Process locals then interpreter expression stack
-  for (int i = 0; i < max_locals; i++ ) {
-    Tag tag = interpreter_frame_local_tag(i);
-    if (tag == TagReference) {
-      oop* addr = (oop*) interpreter_frame_local_at(i);
-      assert((intptr_t*)addr >= sp(), "must be inside the frame");
-      f->do_oop(addr);
-#ifdef ASSERT
-    } else {
-      assert(tag == TagValue, "bad tag value for locals");
-      oop* p = (oop*) interpreter_frame_local_at(i);
-      // Not always true - too bad.  May have dead oops without tags in locals.
-      // assert(*p == NULL || !(*p)->is_oop(), "oop not tagged on interpreter locals");
-      assert(*p == NULL || !mask->is_oop(i), "local oop map mismatch");
-#endif // ASSERT
-    }
-  }
-}
-
-void frame::oops_interpreted_expressions_do(OopClosure *f,
-                                      symbolHandle signature,
-                                      bool has_receiver,
-                                      int max_stack,
-                                      int max_locals,
-                                      InterpreterOopMap *mask) {
-  // There is no stack no matter what the esp is pointing to (native methods
-  // might look like expression stack is nonempty).
-  if (max_stack == 0) return;
-
-  // Point the top of the expression stack above arguments to a call so
-  // arguments aren't gc'ed as both stack values for callee and callee
-  // arguments in callee's locals.
-  int args_size = 0;
-  if (!signature.is_null()) {
-    args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
-  }
-
-  intptr_t *tos_addr = interpreter_frame_tos_at(args_size);
-  assert(args_size != 0 || tos_addr == interpreter_frame_tos_address(), "these are same");
-  intptr_t *frst_expr = interpreter_frame_expression_stack_at(0);
-  // In case of exceptions, the expression stack is invalid and the esp
-  // will be reset to express this condition. Therefore, we call f only
-  // if addr is 'inside' the stack (i.e., addr >= esp for Intel).
-  bool in_stack;
-  if (interpreter_frame_expression_stack_direction() > 0) {
-    in_stack = (intptr_t*)frst_expr <= tos_addr;
-  } else {
-    in_stack = (intptr_t*)frst_expr >= tos_addr;
-  }
-  if (!in_stack) return;
-
-  jint stack_size = interpreter_frame_expression_stack_size() - args_size;
-  for (int j = 0; j < stack_size; j++) {
-    Tag tag = interpreter_frame_expression_stack_tag(j);
-    if (tag == TagReference) {
-      oop *addr = (oop*) interpreter_frame_expression_stack_at(j);
-      f->do_oop(addr);
-#ifdef ASSERT
-    } else {
-      assert(tag == TagValue, "bad tag value for stack element");
-      oop *p = (oop*) interpreter_frame_expression_stack_at((j));
-      assert(*p == NULL || !mask->is_oop(j+max_locals), "stack oop map mismatch");
-#endif // ASSERT
-    }
-  }
-}
-
 void frame::oops_interpreted_arguments_do(symbolHandle signature, bool has_receiver, OopClosure* f) {
   InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
   finder.oops_do();
@@ -1306,29 +1188,18 @@
 
   int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
 
-  if (TaggedStackInterpreter) {
-    InterpreterOopMap *mask = NULL;
-#ifdef ASSERT
-    InterpreterOopMap oopmap_mask;
-    methodHandle method(thread, m);
-    OopMapCache::compute_one_oop_map(method, bci, &oopmap_mask);
-    mask = &oopmap_mask;
-#endif // ASSERT
-    oops_interpreted_locals_do(&_check_oop, max_locals, mask);
-  } else {
-    // process dynamic part
-    InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(),
-                                      &_check_value);
-    InterpreterFrameClosure   oop_blk(this, max_locals, m->max_stack(),
-                                      &_check_oop  );
-    InterpreterFrameClosure  dead_blk(this, max_locals, m->max_stack(),
-                                      &_zap_dead   );
+  // process dynamic part
+  InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(),
+                                    &_check_value);
+  InterpreterFrameClosure   oop_blk(this, max_locals, m->max_stack(),
+                                    &_check_oop  );
+  InterpreterFrameClosure  dead_blk(this, max_locals, m->max_stack(),
+                                    &_zap_dead   );
 
-    // get frame map
-    InterpreterOopMap mask;
-    m->mask_for(bci, &mask);
-    mask.iterate_all( &oop_blk, &value_blk, &dead_blk);
-  }
+  // get frame map
+  InterpreterOopMap mask;
+  m->mask_for(bci, &mask);
+  mask.iterate_all( &oop_blk, &value_blk, &dead_blk);
 }
 
 
--- a/src/share/vm/runtime/frame.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/frame.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -191,26 +191,10 @@
   intptr_t*  interpreter_frame_mdx_addr() const;
 
  public:
-  // Tags for TaggedStackInterpreter
-  enum Tag {
-      TagValue = 0,          // Important: must be zero to use G0 on sparc.
-      TagReference = 0x555,  // Reference type - is an oop that needs gc.
-      TagCategory2 = 0x666   // Only used internally by interpreter
-                             // and not written to the java stack.
-      // The values above are chosen so that misuse causes a crash
-      // with a recognizable value.
-  };
-
-  static Tag tag_for_basic_type(BasicType typ) {
-    return (typ == T_OBJECT ? TagReference : TagValue);
-  }
-
   // Locals
 
   // The _at version returns a pointer because the address is used for GC.
   intptr_t* interpreter_frame_local_at(int index) const;
-  Tag       interpreter_frame_local_tag(int index) const;
-  void      interpreter_frame_set_local_tag(int index, Tag tag) const;
 
   void interpreter_frame_set_locals(intptr_t* locs);
 
@@ -260,8 +244,6 @@
 
   // The _at version returns a pointer because the address is used for GC.
   intptr_t* interpreter_frame_expression_stack_at(jint offset) const;
-  Tag       interpreter_frame_expression_stack_tag(jint offset) const;
-  void      interpreter_frame_set_expression_stack_tag(jint offset, Tag tag) const;
 
   // top of expression stack
   intptr_t* interpreter_frame_tos_at(jint offset) const;
@@ -375,12 +357,6 @@
   void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true);
 
  private:
-  void oops_interpreted_locals_do(OopClosure *f,
-                                 int max_locals,
-                                 InterpreterOopMap *mask);
-  void oops_interpreted_expressions_do(OopClosure *f, symbolHandle signature,
-                                 bool has_receiver, int max_stack, int max_locals,
-                                 InterpreterOopMap *mask);
   void oops_interpreted_arguments_do(symbolHandle signature, bool has_receiver, OopClosure* f);
 
   // Iteration of oops
--- a/src/share/vm/runtime/globals.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/globals.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -652,6 +652,11 @@
   product(bool, PrintGCApplicationStoppedTime, false,                       \
           "Print the time the application has been stopped")                \
                                                                             \
+  notproduct(uintx, ErrorHandlerTest, 0,                                    \
+          "If > 0, provokes an error after VM initialization; the value"    \
+          "determines which error to provoke.  See test_error_handler()"    \
+          "in debug.cpp.")                                                  \
+                                                                            \
   develop(bool, Verbose, false,                                             \
           "Prints additional debugging information from other modes")       \
                                                                             \
@@ -1052,7 +1057,8 @@
           "Use SSE2 MOVDQU instruction for Arraycopy")                      \
                                                                             \
   product(intx, FieldsAllocationStyle, 1,                                   \
-          "0 - type based with oops first, 1 - with oops last")             \
+          "0 - type based with oops first, 1 - with oops last, "            \
+          "2 - oops in super and sub classes are together")                 \
                                                                             \
   product(bool, CompactFields, true,                                        \
           "Allocate nonstatic fields in gaps between previous fields")      \
@@ -2707,7 +2713,8 @@
   product(intx,  AllocatePrefetchStyle, 1,                                  \
           "0 = no prefetch, "                                               \
           "1 = prefetch instructions for each allocation, "                 \
-          "2 = use TLAB watermark to gate allocation prefetch")             \
+          "2 = use TLAB watermark to gate allocation prefetch, "            \
+          "3 = use BIS instruction on Sparc for allocation prefetch")       \
                                                                             \
   product(intx,  AllocatePrefetchDistance, -1,                              \
           "Distance to prefetch ahead of allocation pointer")               \
@@ -2749,6 +2756,9 @@
   product(intx, NmethodSweepFraction, 4,                                    \
           "Number of invocations of sweeper to cover all nmethods")         \
                                                                             \
+  product(intx, NmethodSweepCheckInterval, 5,                               \
+          "Compilers wake up every n seconds to possibly sweep nmethods")   \
+                                                                            \
   notproduct(intx, MemProfilingInterval, 500,                               \
           "Time between each invocation of the MemProfiler")                \
                                                                             \
@@ -3110,6 +3120,9 @@
   develop_pd(intx, CodeEntryAlignment,                                      \
           "Code entry alignment for generated code (in bytes)")             \
                                                                             \
+  product_pd(intx, OptoLoopAlignment,                                       \
+          "Align inner loops to zero relative to this modulus")             \
+                                                                            \
   product_pd(uintx, InitialCodeCacheSize,                                   \
           "Initial code cache size (in bytes)")                             \
                                                                             \
@@ -3492,9 +3505,6 @@
   develop(bool, TraceInvokeDynamic, false,                                  \
           "trace internal invoke dynamic operations")                       \
                                                                             \
-  product(bool, TaggedStackInterpreter, false,                              \
-          "Insert tags in interpreter execution stack for oopmap generaion")\
-                                                                            \
   diagnostic(bool, PauseAtStartup,      false,                              \
           "Causes the VM to pause at startup time and wait for the pause "  \
           "file to be removed (default: ./vm.paused.<pid>)")                \
--- a/src/share/vm/runtime/javaCalls.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/javaCalls.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -417,17 +417,9 @@
       // Handle conversion
       _value[i] = (intptr_t)Handle::raw_resolve((oop *)_value[i]);
     }
-    // The parameters are moved to the parameters array to include the tags.
-    if (TaggedStackInterpreter) {
-      // Tags are interspersed with arguments.  Tags are first.
-      int tagged_index = i*2;
-      _parameters[tagged_index]   = _is_oop[i] ? frame::TagReference :
-                                                 frame::TagValue;
-      _parameters[tagged_index+1] = _value[i];
-    }
   }
   // Return argument vector
-  return TaggedStackInterpreter ? _parameters : _value;
+  return _value;
 }
 
 
--- a/src/share/vm/runtime/javaCalls.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/javaCalls.hpp	Wed May 19 10:22:39 2010 -0700
@@ -66,11 +66,9 @@
   };
 
   intptr_t    _value_buffer [_default_size + 1];
-  intptr_t    _parameter_buffer [_default_size*2 + 1];
   bool        _is_oop_buffer[_default_size + 1];
 
   intptr_t*   _value;
-  intptr_t*   _parameters;
   bool*       _is_oop;
   int         _size;
   int         _max_size;
@@ -81,7 +79,6 @@
     _value    = &_value_buffer[1];
     _is_oop   = &_is_oop_buffer[1];
 
-    _parameters = &_parameter_buffer[0];
     _max_size = _default_size;
     _size = 0;
     _start_at_zero = false;
@@ -99,11 +96,10 @@
     if (max_size > _default_size) {
       _value  = NEW_RESOURCE_ARRAY(intptr_t, max_size + 1);
       _is_oop = NEW_RESOURCE_ARRAY(bool, max_size + 1);
-      if (TaggedStackInterpreter) {
-        _parameters  = NEW_RESOURCE_ARRAY(intptr_t, max_size*2 + 1);
-      }
+
       // Reserve room for potential receiver in value and is_oop
       _value++; _is_oop++;
+
       _max_size = max_size;
       _size = 0;
       _start_at_zero = false;
--- a/src/share/vm/runtime/memprofiler.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/memprofiler.cpp	Wed May 19 10:22:39 2010 -0700
@@ -62,7 +62,7 @@
     // Create log file
     _log_fp = fopen(log_name , "w+");
     if (_log_fp == NULL) {
-      fatal1("MemProfiler: Cannot create log file: %s", log_name);
+      fatal(err_msg("MemProfiler: Cannot create log file: %s", log_name));
     }
     fprintf(_log_fp, "MemProfiler: sizes are in Kb, time is in seconds since startup\n\n");
     fprintf(_log_fp, "  time, #thr, #cls,  heap,  heap,  perm,  perm,  code, hndls, rescs, oopmp\n");
--- a/src/share/vm/runtime/mutex.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/mutex.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1288,8 +1288,9 @@
           !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
             SafepointSynchronize::is_synchronizing())) {
         new_owner->print_owned_locks();
-        fatal4("acquiring lock %s/%d out of order with lock %s/%d -- possible deadlock",
-               this->name(), this->rank(), locks->name(), locks->rank());
+        fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
+                      "possible deadlock", this->name(), this->rank(),
+                      locks->name(), locks->rank()));
       }
 
       this->_next = new_owner->_owned_locks;
@@ -1342,7 +1343,8 @@
          || rank() == Mutex::special, "wrong thread state for using locks");
   if (StrictSafepointChecks) {
     if (thread->is_VM_thread() && !allow_vm_block()) {
-      fatal1("VM thread using lock %s (not allowed to block on)", name());
+      fatal(err_msg("VM thread using lock %s (not allowed to block on)",
+                    name()));
     }
     debug_only(if (rank() != Mutex::special) \
       thread->check_for_valid_safepoint_state(false);)
--- a/src/share/vm/runtime/mutexLocker.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/mutexLocker.cpp	Wed May 19 10:22:39 2010 -0700
@@ -70,6 +70,7 @@
 Monitor* CMark_lock                   = NULL;
 Monitor* ZF_mon                       = NULL;
 Monitor* Cleanup_mon                  = NULL;
+Mutex*   CMRegionStack_lock           = NULL;
 Mutex*   SATB_Q_FL_lock               = NULL;
 Monitor* SATB_Q_CBL_mon               = NULL;
 Mutex*   Shared_SATB_Q_lock           = NULL;
@@ -135,7 +136,7 @@
   // see if invoker of VM operation owns it
   VM_Operation* op = VMThread::vm_operation();
   if (op != NULL && op->calling_thread() == lock->owner()) return;
-  fatal1("must own lock %s", lock->name());
+  fatal(err_msg("must own lock %s", lock->name()));
 }
 
 // a stronger assertion than the above
@@ -143,7 +144,7 @@
   if (IgnoreLockingAssertions) return;
   assert(lock != NULL, "Need non-NULL lock");
   if (lock->owned_by_self()) return;
-  fatal1("must own lock %s", lock->name());
+  fatal(err_msg("must own lock %s", lock->name()));
 }
 #endif
 
@@ -167,6 +168,7 @@
     def(CMark_lock                 , Monitor, nonleaf,     true ); // coordinate concurrent mark thread
     def(ZF_mon                     , Monitor, leaf,        true );
     def(Cleanup_mon                , Monitor, nonleaf,     true );
+    def(CMRegionStack_lock         , Mutex,   leaf,        true );
     def(SATB_Q_FL_lock             , Mutex  , special,     true );
     def(SATB_Q_CBL_mon             , Monitor, nonleaf,     true );
     def(Shared_SATB_Q_lock         , Mutex,   nonleaf,     true );
--- a/src/share/vm/runtime/mutexLocker.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/mutexLocker.hpp	Wed May 19 10:22:39 2010 -0700
@@ -63,6 +63,7 @@
 extern Monitor* CMark_lock;                      // used for concurrent mark thread coordination
 extern Monitor* ZF_mon;                          // used for G1 conc zero-fill.
 extern Monitor* Cleanup_mon;                     // used for G1 conc cleanup.
+extern Mutex*   CMRegionStack_lock;              // used for protecting accesses to the CM region stack
 extern Mutex*   SATB_Q_FL_lock;                  // Protects SATB Q
                                                  // buffer free list.
 extern Monitor* SATB_Q_CBL_mon;                  // Protects SATB Q
--- a/src/share/vm/runtime/os.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/os.cpp	Wed May 19 10:22:39 2010 -0700
@@ -406,8 +406,10 @@
 #ifdef ASSERT
 inline size_t get_size(void* obj) {
   size_t size = *size_addr_from_obj(obj);
-  if (size < 0 )
-    fatal2("free: size field of object #%p was overwritten (%lu)", obj, size);
+  if (size < 0) {
+    fatal(err_msg("free: size field of object #" PTR_FORMAT " was overwritten ("
+                  SIZE_FORMAT ")", obj, size));
+  }
   return size;
 }
 
--- a/src/share/vm/runtime/safepoint.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/safepoint.cpp	Wed May 19 10:22:39 2010 -0700
@@ -472,7 +472,7 @@
   }
 
   TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
-  NMethodSweeper::sweep();
+  NMethodSweeper::scan_stacks();
 }
 
 
@@ -594,7 +594,7 @@
       break;
 
     default:
-     fatal1("Illegal threadstate encountered: %d", state);
+     fatal(err_msg("Illegal threadstate encountered: %d", state));
   }
 
   // Check for pending. async. exceptions or suspends - except if the
--- a/src/share/vm/runtime/sharedRuntime.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Wed May 19 10:22:39 2010 -0700
@@ -259,13 +259,16 @@
 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
   assert(frame::verify_return_pc(return_address), "must be a return pc");
 
+  // Reset MethodHandle flag.
+  thread->set_is_method_handle_return(false);
+
   // the fastest case first
   CodeBlob* blob = CodeCache::find_blob(return_address);
   if (blob != NULL && blob->is_nmethod()) {
     nmethod* code = (nmethod*)blob;
     assert(code != NULL, "nmethod must be present");
     // Check if the return address is a MethodHandle call site.
-    thread->set_is_method_handle_exception(code->is_method_handle_return(return_address));
+    thread->set_is_method_handle_return(code->is_method_handle_return(return_address));
     // native nmethods don't have exception handlers
     assert(!code->is_native_method(), "no exception handler");
     assert(code->header_begin() != code->exception_begin(), "no exception handler");
@@ -292,7 +295,7 @@
       nmethod* code = (nmethod*)blob;
       assert(code != NULL, "nmethod must be present");
       // Check if the return address is a MethodHandle call site.
-      thread->set_is_method_handle_exception(code->is_method_handle_return(return_address));
+      thread->set_is_method_handle_return(code->is_method_handle_return(return_address));
       assert(code->header_begin() != code->exception_begin(), "no exception handler");
       return code->exception_begin();
     }
@@ -470,6 +473,13 @@
     t = table.entry_for(catch_pco, -1, 0);
   }
 
+#ifdef COMPILER1
+  if (t == NULL && nm->is_compiled_by_c1()) {
+    assert(nm->unwind_handler_begin() != NULL, "");
+    return nm->unwind_handler_begin();
+  }
+#endif
+
   if (t == NULL) {
     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
     tty->print_cr("   Exception:");
@@ -1547,7 +1557,7 @@
     methodOop actual_method = MethodHandles::decode_method(actual,
                                                           kignore, fignore);
     if (actual_method != NULL) {
-      if (actual_method->name() == vmSymbols::invoke_name())
+      if (methodOopDesc::is_method_handle_invoke_name(actual_method->name()))
         mhName = "$";
       else
         mhName = actual_method->signature()->as_C_string();
@@ -1832,14 +1842,11 @@
 
       case T_OBJECT:
       case T_ARRAY:
-        if (!TaggedStackInterpreter) {
 #ifdef _LP64
-          return T_LONG;
+        return T_LONG;
 #else
-          return T_INT;
+        return T_INT;
 #endif
-        }
-        return T_OBJECT;
 
       case T_INT:
       case T_LONG:
@@ -2585,17 +2592,9 @@
   // Copy the locals.  Order is preserved so that loading of longs works.
   // Since there's no GC I can copy the oops blindly.
   assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
-  if (TaggedStackInterpreter) {
-    for (int i = 0; i < max_locals; i++) {
-      // copy only each local separately to the buffer avoiding the tag
-      buf[i] = *fr.interpreter_frame_local_at(max_locals-i-1);
-    }
-  } else {
-    Copy::disjoint_words(
-                       (HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
+  Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
                        (HeapWord*)&buf[0],
                        max_locals);
-  }
 
   // Inflate locks.  Copy the displaced headers.  Be careful, there can be holes.
   int i = max_locals;
--- a/src/share/vm/runtime/signature.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/signature.cpp	Wed May 19 10:22:39 2010 -0700
@@ -57,7 +57,7 @@
 }
 
 void SignatureIterator::expect(char c) {
-  if (_signature->byte_at(_index) != c) fatal1("expecting %c", c);
+  if (_signature->byte_at(_index) != c) fatal(err_msg("expecting %c", c));
   _index++;
 }
 
@@ -327,6 +327,26 @@
   return result;
 }
 
+klassOop SignatureStream::as_klass(Handle class_loader, Handle protection_domain,
+                                   FailureMode failure_mode, TRAPS) {
+  if (!is_object())  return NULL;
+  symbolOop name = as_symbol(CHECK_NULL);
+  if (failure_mode == ReturnNull) {
+    return SystemDictionary::resolve_or_null(name, class_loader, protection_domain, THREAD);
+  } else {
+    bool throw_error = (failure_mode == NCDFError);
+    return SystemDictionary::resolve_or_fail(name, class_loader, protection_domain, throw_error, THREAD);
+  }
+}
+
+oop SignatureStream::as_java_mirror(Handle class_loader, Handle protection_domain,
+                                    FailureMode failure_mode, TRAPS) {
+  if (!is_object())
+    return Universe::java_mirror(type());
+  klassOop klass = as_klass(class_loader, protection_domain, failure_mode, CHECK_NULL);
+  if (klass == NULL)  return NULL;
+  return Klass::cast(klass)->java_mirror();
+}
 
 symbolOop SignatureStream::as_symbol_or_null() {
   // Create a symbol from for string _begin _end
--- a/src/share/vm/runtime/signature.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/signature.hpp	Wed May 19 10:22:39 2010 -0700
@@ -402,6 +402,9 @@
   bool is_array() const;                         // True if this argument is an array
   BasicType type() const                         { return _type; }
   symbolOop as_symbol(TRAPS);
+  enum FailureMode { ReturnNull, CNFException, NCDFError };
+  klassOop as_klass(Handle class_loader, Handle protection_domain, FailureMode failure_mode, TRAPS);
+  oop as_java_mirror(Handle class_loader, Handle protection_domain, FailureMode failure_mode, TRAPS);
 
   // return same as_symbol except allocation of new symbols is avoided.
   symbolOop as_symbol_or_null();
--- a/src/share/vm/runtime/stubRoutines.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/stubRoutines.cpp	Wed May 19 10:22:39 2010 -0700
@@ -118,7 +118,10 @@
     ResourceMark rm;
     TraceTime timer("StubRoutines generation 1", TraceStartupTime);
     _code1 = BufferBlob::create("StubRoutines (1)", code_size1);
-    if( _code1 == NULL) vm_exit_out_of_memory1(code_size1, "CodeCache: no room for %s", "StubRoutines (1)");
+    if (_code1 == NULL) {
+      vm_exit_out_of_memory(code_size1,
+                            "CodeCache: no room for StubRoutines (1)");
+    }
     CodeBuffer buffer(_code1->instructions_begin(), _code1->instructions_size());
     StubGenerator_generate(&buffer, false);
   }
@@ -164,7 +167,10 @@
     ResourceMark rm;
     TraceTime timer("StubRoutines generation 2", TraceStartupTime);
     _code2 = BufferBlob::create("StubRoutines (2)", code_size2);
-    if( _code2 == NULL) vm_exit_out_of_memory1(code_size2, "CodeCache: no room for %s", "StubRoutines (2)");
+    if (_code2 == NULL) {
+      vm_exit_out_of_memory(code_size2,
+                            "CodeCache: no room for StubRoutines (2)");
+    }
     CodeBuffer buffer(_code2->instructions_begin(), _code2->instructions_size());
     StubGenerator_generate(&buffer, true);
   }
--- a/src/share/vm/runtime/sweeper.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/sweeper.cpp	Wed May 19 10:22:39 2010 -0700
@@ -33,6 +33,8 @@
 jint      NMethodSweeper::_locked_seen = 0;
 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
 bool      NMethodSweeper::_rescan = false;
+bool      NMethodSweeper::_do_sweep = false;
+jint      NMethodSweeper::_sweep_started = 0;
 bool      NMethodSweeper::_was_full = false;
 jint      NMethodSweeper::_advise_to_sweep = 0;
 jlong     NMethodSweeper::_last_was_full = 0;
@@ -50,14 +52,20 @@
 };
 static MarkActivationClosure mark_activation_closure;
 
-void NMethodSweeper::sweep() {
+void NMethodSweeper::scan_stacks() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
   if (!MethodFlushing) return;
+  _do_sweep = true;
 
   // No need to synchronize access, since this is always executed at a
   // safepoint.  If we aren't in the middle of scan and a rescan
-  // hasn't been requested then just return.
-  if (_current == NULL && !_rescan) return;
+  // hasn't been requested then just return. If UseCodeCacheFlushing is on and
+  // code cache flushing is in progress, don't skip sweeping to help make progress
+  // clearing space in the code cache.
+  if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
+    _do_sweep = false;
+    return;
+  }
 
   // Make sure CompiledIC_lock in unlocked, since we might update some
   // inline caches. If it is, we just bail-out and try later.
@@ -68,7 +76,7 @@
   if (_current == NULL) {
     _seen        = 0;
     _invocations = NmethodSweepFraction;
-    _current     = CodeCache::first();
+    _current     = CodeCache::first_nmethod();
     _traversals  += 1;
     if (PrintMethodFlushing) {
       tty->print_cr("### Sweep: stack traversal %d", _traversals);
@@ -81,48 +89,9 @@
     _not_entrant_seen_on_stack = 0;
   }
 
-  if (PrintMethodFlushing && Verbose) {
-    tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
-  }
-
-  // We want to visit all nmethods after NmethodSweepFraction invocations.
-  // If invocation is 1 we do the rest
-  int todo = CodeCache::nof_blobs();
-  if (_invocations != 1) {
-    todo = (CodeCache::nof_blobs() - _seen) / _invocations;
-    _invocations--;
-  }
-
-  for(int i = 0; i < todo && _current != NULL; i++) {
-    CodeBlob* next = CodeCache::next(_current); // Read next before we potentially delete current
-    if (_current->is_nmethod()) {
-      process_nmethod((nmethod *)_current);
-    }
-    _seen++;
-    _current = next;
-  }
-  // Because we could stop on a codeBlob other than an nmethod we skip forward
-  // to the next nmethod (if any). codeBlobs other than nmethods can be freed
-  // async to us and make _current invalid while we sleep.
-  while (_current != NULL && !_current->is_nmethod()) {
-    _current = CodeCache::next(_current);
-  }
-
-  if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
-    // we've completed a scan without making progress but there were
-    // nmethods we were unable to process either because they were
-    // locked or were still on stack.  We don't have to aggresively
-    // clean them up so just stop scanning.  We could scan once more
-    // but that complicates the control logic and it's unlikely to
-    // matter much.
-    if (PrintMethodFlushing) {
-      tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
-    }
-  }
-
   if (UseCodeCacheFlushing) {
     if (!CodeCache::needs_flushing()) {
-      // In a safepoint, no race with setters
+      // scan_stacks() runs during a safepoint, no race with setters
       _advise_to_sweep = 0;
     }
 
@@ -155,13 +124,99 @@
   }
 }
 
+void NMethodSweeper::possibly_sweep() {
+  if ((!MethodFlushing) || (!_do_sweep)) return;
+
+  if (_invocations > 0) {
+    // Only one thread at a time will sweep
+    jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
+    if (old != 0) {
+      return;
+    }
+    sweep_code_cache();
+  }
+  _sweep_started = 0;
+}
+
+void NMethodSweeper::sweep_code_cache() {
+#ifdef ASSERT
+  jlong sweep_start;
+  if(PrintMethodFlushing) {
+    sweep_start = os::javaTimeMillis();
+  }
+#endif
+  if (PrintMethodFlushing && Verbose) {
+    tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
+  }
+
+  // We want to visit all nmethods after NmethodSweepFraction invocations.
+  // If invocation is 1 we do the rest
+  int todo = CodeCache::nof_blobs();
+  if (_invocations > 1) {
+    todo = (CodeCache::nof_blobs() - _seen) / _invocations;
+  }
+
+  // Compilers may check to sweep more often than stack scans happen,
+  // don't keep trying once it is all scanned
+  _invocations--;
+
+  assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
+  assert(!CodeCache_lock->owned_by_self(), "just checking");
+
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+
+    for(int i = 0; i < todo && _current != NULL; i++) {
+
+      // Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
+      // Other blobs can be deleted by other threads
+      // Read next before we potentially delete current
+      CodeBlob* next = CodeCache::next_nmethod(_current);
+
+      // Now ready to process nmethod and give up CodeCache_lock
+      {
+        MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+        process_nmethod((nmethod *)_current);
+      }
+      _seen++;
+      _current = next;
+    }
+
+    // Skip forward to the next nmethod (if any). Code blobs other than nmethods
+    // can be freed async to us and make _current invalid while we sleep.
+    _current = CodeCache::next_nmethod(_current);
+  }
+
+  if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
+    // we've completed a scan without making progress but there were
+    // nmethods we were unable to process either because they were
+    // locked or were still on stack.  We don't have to aggresively
+    // clean them up so just stop scanning.  We could scan once more
+    // but that complicates the control logic and it's unlikely to
+    // matter much.
+    if (PrintMethodFlushing) {
+      tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
+    }
+  }
+
+#ifdef ASSERT
+  if(PrintMethodFlushing) {
+    jlong sweep_end             = os::javaTimeMillis();
+    tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
+  }
+#endif
+}
+
 
 void NMethodSweeper::process_nmethod(nmethod *nm) {
+  assert(!CodeCache_lock->owned_by_self(), "just checking");
+
   // Skip methods that are currently referenced by the VM
   if (nm->is_locked_by_vm()) {
     // But still remember to clean-up inline caches for alive nmethods
     if (nm->is_alive()) {
       // Clean-up all inline caches that points to zombie/non-reentrant methods
+      MutexLocker cl(CompiledIC_lock);
       nm->cleanup_inline_caches();
     } else {
       _locked_seen++;
@@ -178,6 +233,7 @@
       if (PrintMethodFlushing && Verbose) {
         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
       }
+      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
       nm->flush();
     } else {
       if (PrintMethodFlushing && Verbose) {
@@ -197,10 +253,11 @@
       _rescan = true;
     } else {
       // Still alive, clean up its inline caches
+      MutexLocker cl(CompiledIC_lock);
       nm->cleanup_inline_caches();
       // we coudn't transition this nmethod so don't immediately
       // request a rescan.  If this method stays on the stack for a
-      // long time we don't want to keep rescanning at every safepoint.
+      // long time we don't want to keep rescanning the code cache.
       _not_entrant_seen_on_stack++;
     }
   } else if (nm->is_unloaded()) {
@@ -209,6 +266,7 @@
       tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
     if (nm->is_osr_method()) {
       // No inline caches will ever point to osr methods, so we can just remove it
+      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
       nm->flush();
     } else {
       nm->make_zombie();
@@ -227,6 +285,7 @@
     }
 
     // Clean-up all inline caches that points to zombie/non-reentrant methods
+    MutexLocker cl(CompiledIC_lock);
     nm->cleanup_inline_caches();
   }
 }
@@ -235,8 +294,8 @@
 // they will call a vm op that comes here. This code attempts to speculatively
 // unload the oldest half of the nmethods (based on the compile job id) by
 // saving the old code in a list in the CodeCache. Then
-// execution resumes. If a method so marked is not called by the second
-// safepoint from the current one, the nmethod will be marked non-entrant and
+// execution resumes. If a method so marked is not called by the second sweeper
+// stack traversal after the current one, the nmethod will be marked non-entrant and
 // got rid of by normal sweeping. If the method is called, the methodOop's
 // _code field is restored and the methodOop/nmethod
 // go back to their normal state.
@@ -364,8 +423,8 @@
     xtty->end_elem();
   }
 
-  // Shut off compiler. Sweeper will run exiting from this safepoint
-  // and turn it back on if it clears enough space
+  // Shut off compiler. Sweeper will start over with a new stack scan and
+  // traversal cycle and turn it back on if it clears enough space.
   if (was_full()) {
     _last_was_full = os::javaTimeMillis();
     CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
--- a/src/share/vm/runtime/sweeper.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/sweeper.hpp	Wed May 19 10:22:39 2010 -0700
@@ -35,6 +35,8 @@
 
   static bool      _rescan;          // Indicates that we should do a full rescan of the
                                      // of the code cache looking for work to do.
+  static bool      _do_sweep;        // Flag to skip the conc sweep if no stack scan happened
+  static jint      _sweep_started;   // Flag to control conc sweeper
   static int       _locked_seen;     // Number of locked nmethods encountered during the scan
   static int       _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
 
@@ -48,7 +50,9 @@
  public:
   static long traversal_count() { return _traversals; }
 
-  static void sweep();  // Invoked at the end of each safepoint
+  static void scan_stacks();      // Invoked at the end of each safepoint
+  static void sweep_code_cache(); // Concurrent part of sweep job
+  static void possibly_sweep();   // Compiler threads call this to sweep
 
   static void notify(nmethod* nm) {
     // Perform a full scan of the code cache from the beginning.  No
--- a/src/share/vm/runtime/thread.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/thread.hpp	Wed May 19 10:22:39 2010 -0700
@@ -772,7 +772,7 @@
   volatile address _exception_pc;                // PC where exception happened
   volatile address _exception_handler_pc;        // PC for handler of exception
   volatile int     _exception_stack_size;        // Size of frame where exception happened
-  volatile int     _is_method_handle_exception;  // True if the current exception PC is at a MethodHandle call.
+  volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
 
   // support for compilation
   bool    _is_compiling;                         // is true if a compilation is active inthis thread (one compilation per thread possible)
@@ -1108,13 +1108,13 @@
   int      exception_stack_size() const          { return _exception_stack_size; }
   address  exception_pc() const                  { return _exception_pc; }
   address  exception_handler_pc() const          { return _exception_handler_pc; }
-  int      is_method_handle_exception() const    { return _is_method_handle_exception; }
+  bool     is_method_handle_return() const       { return _is_method_handle_return == 1; }
 
   void set_exception_oop(oop o)                  { _exception_oop = o; }
   void set_exception_pc(address a)               { _exception_pc = a; }
   void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
   void set_exception_stack_size(int size)        { _exception_stack_size = size; }
-  void set_is_method_handle_exception(int value) { _is_method_handle_exception = value; }
+  void set_is_method_handle_return(bool value)   { _is_method_handle_return = value ? 1 : 0; }
 
   // Stack overflow support
   inline size_t stack_available(address cur_sp);
@@ -1188,7 +1188,7 @@
   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc        ); }
   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
   static ByteSize exception_stack_size_offset()  { return byte_offset_of(JavaThread, _exception_stack_size); }
-  static ByteSize is_method_handle_exception_offset() { return byte_offset_of(JavaThread, _is_method_handle_exception); }
+  static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state   ); }
   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags       ); }
 
--- a/src/share/vm/runtime/vframe.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/vframe.cpp	Wed May 19 10:22:39 2010 -0700
@@ -244,51 +244,30 @@
   StackValueCollection* result = new StackValueCollection(length);
 
   // Get oopmap describing oops and int for current bci
-  if (TaggedStackInterpreter) {
-    for(int i=0; i < length; i++) {
-      // Find stack location
-      intptr_t *addr = locals_addr_at(i);
-
-      // Depending on oop/int put it in the right package
-      StackValue *sv;
-      frame::Tag tag = fr().interpreter_frame_local_tag(i);
-      if (tag == frame::TagReference) {
-        // oop value
-        Handle h(*(oop *)addr);
-        sv = new StackValue(h);
-      } else {
-        // integer
-        sv = new StackValue(*addr);
-      }
-      assert(sv != NULL, "sanity check");
-      result->add(sv);
-    }
+  InterpreterOopMap oop_mask;
+  if (TraceDeoptimization && Verbose) {
+    methodHandle m_h(thread(), method());
+    OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
   } else {
-    InterpreterOopMap oop_mask;
-    if (TraceDeoptimization && Verbose) {
-      methodHandle m_h(thread(), method());
-      OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
-    } else {
-      method()->mask_for(bci(), &oop_mask);
-    }
-    // handle locals
-    for(int i=0; i < length; i++) {
-      // Find stack location
-      intptr_t *addr = locals_addr_at(i);
+    method()->mask_for(bci(), &oop_mask);
+  }
+  // handle locals
+  for(int i=0; i < length; i++) {
+    // Find stack location
+    intptr_t *addr = locals_addr_at(i);
 
-      // Depending on oop/int put it in the right package
-      StackValue *sv;
-      if (oop_mask.is_oop(i)) {
-        // oop value
-        Handle h(*(oop *)addr);
-        sv = new StackValue(h);
-      } else {
-        // integer
-        sv = new StackValue(*addr);
-      }
-      assert(sv != NULL, "sanity check");
-      result->add(sv);
+    // Depending on oop/int put it in the right package
+    StackValue *sv;
+    if (oop_mask.is_oop(i)) {
+      // oop value
+      Handle h(*(oop *)addr);
+      sv = new StackValue(h);
+    } else {
+      // integer
+      sv = new StackValue(*addr);
     }
+    assert(sv != NULL, "sanity check");
+    result->add(sv);
   }
   return result;
 }
@@ -331,53 +310,31 @@
   int nof_locals = method()->max_locals();
   StackValueCollection* result = new StackValueCollection(length);
 
-  if (TaggedStackInterpreter) {
-    // handle expressions
-    for(int i=0; i < length; i++) {
-      // Find stack location
-      intptr_t *addr = fr().interpreter_frame_expression_stack_at(i);
-      frame::Tag tag = fr().interpreter_frame_expression_stack_tag(i);
-
-      // Depending on oop/int put it in the right package
-      StackValue *sv;
-      if (tag == frame::TagReference) {
-        // oop value
-        Handle h(*(oop *)addr);
-        sv = new StackValue(h);
-      } else {
-        // otherwise
-        sv = new StackValue(*addr);
-      }
-      assert(sv != NULL, "sanity check");
-      result->add(sv);
-    }
+  InterpreterOopMap oop_mask;
+  // Get oopmap describing oops and int for current bci
+  if (TraceDeoptimization && Verbose) {
+    methodHandle m_h(method());
+    OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
   } else {
-    InterpreterOopMap oop_mask;
-    // Get oopmap describing oops and int for current bci
-    if (TraceDeoptimization && Verbose) {
-      methodHandle m_h(method());
-      OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
-    } else {
-      method()->mask_for(bci(), &oop_mask);
-    }
-    // handle expressions
-    for(int i=0; i < length; i++) {
-      // Find stack location
-      intptr_t *addr = fr().interpreter_frame_expression_stack_at(i);
+    method()->mask_for(bci(), &oop_mask);
+  }
+  // handle expressions
+  for(int i=0; i < length; i++) {
+    // Find stack location
+    intptr_t *addr = fr().interpreter_frame_expression_stack_at(i);
 
-      // Depending on oop/int put it in the right package
-      StackValue *sv;
-      if (oop_mask.is_oop(i + nof_locals)) {
-        // oop value
-        Handle h(*(oop *)addr);
-        sv = new StackValue(h);
-      } else {
-        // integer
-        sv = new StackValue(*addr);
-      }
-      assert(sv != NULL, "sanity check");
-      result->add(sv);
+    // Depending on oop/int put it in the right package
+    StackValue *sv;
+    if (oop_mask.is_oop(i + nof_locals)) {
+      // oop value
+      Handle h(*(oop *)addr);
+      sv = new StackValue(h);
+    } else {
+      // integer
+      sv = new StackValue(*addr);
     }
+    assert(sv != NULL, "sanity check");
+    result->add(sv);
   }
   return result;
 }
--- a/src/share/vm/runtime/vframeArray.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/vframeArray.cpp	Wed May 19 10:22:39 2010 -0700
@@ -309,11 +309,6 @@
       default:
         ShouldNotReachHere();
     }
-    if (TaggedStackInterpreter) {
-      // Write tag to the stack
-      iframe()->interpreter_frame_set_expression_stack_tag(i,
-                                  frame::tag_for_basic_type(value->type()));
-    }
   }
 
 
@@ -335,11 +330,6 @@
       default:
         ShouldNotReachHere();
     }
-    if (TaggedStackInterpreter) {
-      // Write tag to stack
-      iframe()->interpreter_frame_set_local_tag(i,
-                                  frame::tag_for_basic_type(value->type()));
-    }
   }
 
   if (is_top_frame && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
@@ -354,9 +344,8 @@
       void* saved_args = thread->popframe_preserved_args();
       assert(saved_args != NULL, "must have been saved by interpreter");
 #ifdef ASSERT
-      int stack_words = Interpreter::stackElementWords();
       assert(popframe_preserved_args_size_in_words <=
-             iframe()->interpreter_frame_expression_stack_size()*stack_words,
+             iframe()->interpreter_frame_expression_stack_size()*Interpreter::stackElementWords,
              "expression stack size should have been extended");
 #endif // ASSERT
       int top_element = iframe()->interpreter_frame_expression_stack_size()-1;
--- a/src/share/vm/runtime/vmThread.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/runtime/vmThread.cpp	Wed May 19 10:22:39 2010 -0700
@@ -106,7 +106,7 @@
   // restore queue to empty state
   _queue[prio]->set_next(_queue[prio]);
   _queue[prio]->set_prev(_queue[prio]);
-  assert(queue_empty(prio), "drain corrupted queue")
+  assert(queue_empty(prio), "drain corrupted queue");
 #ifdef DEBUG
   int len = 0;
   VM_Operation* cur;
@@ -593,7 +593,8 @@
       // Check the VM operation allows nested VM operation. This normally not the case, e.g., the compiler
       // does not allow nested scavenges or compiles.
       if (!prev_vm_operation->allow_nested_vm_operations()) {
-        fatal2("Nested VM operation %s requested by operation %s", op->name(), vm_operation()->name());
+        fatal(err_msg("Nested VM operation %s requested by operation %s",
+                      op->name(), vm_operation()->name()));
       }
       op->set_calling_thread(prev_vm_operation->calling_thread(), prev_vm_operation->priority());
     }
--- a/src/share/vm/services/g1MemoryPool.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/services/g1MemoryPool.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2007-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
 
 // See the comment at the top of g1MemoryPool.hpp
 size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
-  size_t young_list_length = g1h->young_list_length();
+  size_t young_list_length = g1h->young_list()->length();
   size_t eden_used = young_list_length * HeapRegion::GrainBytes;
   size_t survivor_used = survivor_space_used(g1h);
   eden_used = subtract_up_to_zero(eden_used, survivor_used);
--- a/src/share/vm/utilities/debug.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/utilities/debug.cpp	Wed May 19 10:22:39 2010 -0700
@@ -72,7 +72,7 @@
 // assert/guarantee/... may happen very early during VM initialization.
 // Don't rely on anything that is initialized by Threads::create_vm(). For
 // example, don't use tty.
-bool assert_is_suppressed(const char* file_name, int line_no) {
+bool error_is_suppressed(const char* file_name, int line_no) {
   // The following 1-element cache requires that passed-in
   // file names are always only constant literals.
   if (file_name == last_file_name && line_no == last_line_no)  return true;
@@ -163,38 +163,30 @@
 #else
 
 // Place-holder for non-existent suppression check:
-#define assert_is_suppressed(file_name, line_no) (false)
+#define error_is_suppressed(file_name, line_no) (false)
 
 #endif //PRODUCT
 
-void report_assertion_failure(const char* file_name, int line_no, const char* message) {
-  if (Debugging || assert_is_suppressed(file_name, line_no))  return;
-  VMError err(ThreadLocalStorage::get_thread_slow(), message, file_name, line_no);
+void report_vm_error(const char* file, int line, const char* error_msg,
+                     const char* detail_msg)
+{
+  if (Debugging || error_is_suppressed(file, line)) return;
+  Thread* const thread = ThreadLocalStorage::get_thread_slow();
+  VMError err(thread, file, line, error_msg, detail_msg);
   err.report_and_die();
 }
 
-void report_fatal(const char* file_name, int line_no, const char* message) {
-  if (Debugging || assert_is_suppressed(file_name, line_no))  return;
-  VMError err(ThreadLocalStorage::get_thread_slow(), message, file_name, line_no);
-  err.report_and_die();
+void report_fatal(const char* file, int line, const char* message)
+{
+  report_vm_error(file, line, "fatal error", message);
 }
 
-void report_fatal_vararg(const char* file_name, int line_no, const char* format, ...) {
-  char buffer[256];
-  va_list ap;
-  va_start(ap, format);
-  jio_vsnprintf(buffer, sizeof(buffer), format, ap);
-  va_end(ap);
-  report_fatal(file_name, line_no, buffer);
-}
-
-
 // Used by report_vm_out_of_memory to detect recursion.
 static jint _exiting_out_of_mem = 0;
 
-// Just passing the flow to VMError to handle error
-void report_vm_out_of_memory(const char* file_name, int line_no, size_t size, const char* message) {
-  if (Debugging || assert_is_suppressed(file_name, line_no))  return;
+void report_vm_out_of_memory(const char* file, int line, size_t size,
+                             const char* message) {
+  if (Debugging || error_is_suppressed(file, line)) return;
 
   // We try to gather additional information for the first out of memory
   // error only; gathering additional data might cause an allocation and a
@@ -206,46 +198,28 @@
 
   if (first_time_here) {
     Thread* thread = ThreadLocalStorage::get_thread_slow();
-    VMError(thread, size, message, file_name, line_no).report_and_die();
+    VMError(thread, file, line, size, message).report_and_die();
   }
 
   // Dump core and abort
   vm_abort(true);
 }
 
-void report_vm_out_of_memory_vararg(const char* file_name, int line_no, size_t size, const char* format, ...) {
-  char buffer[256];
-  va_list ap;
-  va_start(ap, format);
-  jio_vsnprintf(buffer, sizeof(buffer), format, ap);
-  va_end(ap);
-  report_vm_out_of_memory(file_name, line_no, size, buffer);
+void report_should_not_call(const char* file, int line) {
+  report_vm_error(file, line, "ShouldNotCall()");
 }
 
-void report_should_not_call(const char* file_name, int line_no) {
-  if (Debugging || assert_is_suppressed(file_name, line_no))  return;
-  VMError err(ThreadLocalStorage::get_thread_slow(), "ShouldNotCall()", file_name, line_no);
-  err.report_and_die();
+void report_should_not_reach_here(const char* file, int line) {
+  report_vm_error(file, line, "ShouldNotReachHere()");
 }
 
-
-void report_should_not_reach_here(const char* file_name, int line_no) {
-  if (Debugging || assert_is_suppressed(file_name, line_no))  return;
-  VMError err(ThreadLocalStorage::get_thread_slow(), "ShouldNotReachHere()", file_name, line_no);
-  err.report_and_die();
+void report_unimplemented(const char* file, int line) {
+  report_vm_error(file, line, "Unimplemented()");
 }
 
-
-void report_unimplemented(const char* file_name, int line_no) {
-  if (Debugging || assert_is_suppressed(file_name, line_no))  return;
-  VMError err(ThreadLocalStorage::get_thread_slow(), "Unimplemented()", file_name, line_no);
-  err.report_and_die();
-}
-
-
-void report_untested(const char* file_name, int line_no, const char* msg) {
+void report_untested(const char* file, int line, const char* message) {
 #ifndef PRODUCT
-  warning("Untested: %s in %s: %d\n", msg, file_name, line_no);
+  warning("Untested: %s in %s: %d\n", message, file, line);
 #endif // PRODUCT
 }
 
@@ -284,6 +258,51 @@
     return error_reported;
 }
 
+#ifndef PRODUCT
+#include <signal.h>
+
+void test_error_handler(size_t test_num)
+{
+  if (test_num == 0) return;
+
+  // If asserts are disabled, use the corresponding guarantee instead.
+  size_t n = test_num;
+  NOT_DEBUG(if (n <= 2) n += 2);
+
+  const char* const str = "hello";
+  const size_t      num = (size_t)os::vm_page_size();
+
+  const char* const eol = os::line_separator();
+  const char* const msg = "this message should be truncated during formatting";
+
+  // Keep this in sync with test/runtime/6888954/vmerrors.sh.
+  switch (n) {
+    case  1: assert(str == NULL, "expected null");
+    case  2: assert(num == 1023 && *str == 'X',
+                    err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str));
+    case  3: guarantee(str == NULL, "expected null");
+    case  4: guarantee(num == 1023 && *str == 'X',
+                       err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str));
+    case  5: fatal("expected null");
+    case  6: fatal(err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str));
+    case  7: fatal(err_msg("%s%s#    %s%s#    %s%s#    %s%s#    %s%s#    "
+                           "%s%s#    %s%s#    %s%s#    %s%s#    %s%s#    "
+                           "%s%s#    %s%s#    %s%s#    %s%s#    %s",
+                           msg, eol, msg, eol, msg, eol, msg, eol, msg, eol,
+                           msg, eol, msg, eol, msg, eol, msg, eol, msg, eol,
+                           msg, eol, msg, eol, msg, eol, msg, eol, msg));
+    case  8: vm_exit_out_of_memory(num, "ChunkPool::allocate");
+    case  9: ShouldNotCallThis();
+    case 10: ShouldNotReachHere();
+    case 11: Unimplemented();
+    // This is last because it does not generate an hs_err* file on Windows.
+    case 12: os::signal_raise(SIGSEGV);
+
+    default: ShouldNotReachHere();
+  }
+}
+#endif // #ifndef PRODUCT
+
 // ------ helper functions for debugging go here ------------
 
 #ifndef PRODUCT
--- a/src/share/vm/utilities/debug.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/utilities/debug.hpp	Wed May 19 10:22:39 2010 -0700
@@ -22,28 +22,54 @@
  *
  */
 
+#include <stdarg.h>
+
+// Simple class to format the ctor arguments into a fixed-sized buffer.
+template <size_t bufsz = 256>
+class FormatBuffer {
+public:
+  inline FormatBuffer(const char * format, ...);
+  operator const char *() const { return _buf; }
+
+private:
+  FormatBuffer(const FormatBuffer &); // prevent copies
+
+private:
+  char _buf[bufsz];
+};
+
+template <size_t bufsz>
+FormatBuffer<bufsz>::FormatBuffer(const char * format, ...) {
+  va_list argp;
+  va_start(argp, format);
+  vsnprintf(_buf, bufsz, format, argp);
+  va_end(argp);
+}
+
+// Used to format messages for assert(), guarantee(), fatal(), etc.
+typedef FormatBuffer<> err_msg;
+
 // assertions
 #ifdef ASSERT
-// Turn this off by default:
-//#define USE_REPEATED_ASSERTS
-#ifdef USE_REPEATED_ASSERTS
-  #define assert(p,msg)                                              \
-    { for (int __i = 0; __i < AssertRepeat; __i++) {                 \
-        if (!(p)) {                                                  \
-          report_assertion_failure(__FILE__, __LINE__,               \
-                                  "assert(" XSTR(p) ",\"" msg "\")");\
-          BREAKPOINT;                                                \
-        }                                                            \
-      }                                                              \
-    }
-#else
-  #define assert(p,msg)                                          \
-    if (!(p)) {                                                  \
-      report_assertion_failure(__FILE__, __LINE__,               \
-                              "assert(" XSTR(p) ",\"" msg "\")");\
-      BREAKPOINT;                                                \
-    }
-#endif
+#ifndef USE_REPEATED_ASSERTS
+#define assert(p, msg)                                                       \
+do {                                                                         \
+  if (!(p)) {                                                                \
+    report_vm_error(__FILE__, __LINE__, "assert(" #p ") failed", msg);       \
+    BREAKPOINT;                                                              \
+  }                                                                          \
+} while (0)
+#else // #ifndef USE_REPEATED_ASSERTS
+#define assert(p, msg)
+do {                                                                         \
+  for (int __i = 0; __i < AssertRepeat; __i++) {                             \
+    if (!(p)) {                                                              \
+      report_vm_error(__FILE__, __LINE__, "assert(" #p ") failed", msg);     \
+      BREAKPOINT;                                                            \
+    }                                                                        \
+  }                                                                          \
+} while (0)
+#endif // #ifndef USE_REPEATED_ASSERTS
 
 // This version of assert is for use with checking return status from
 // library calls that return actual error values eg. EINVAL,
@@ -52,70 +78,83 @@
 // what status was actually returned, so we pass the status variable as
 // an extra arg and use strerror to convert it to a meaningful string
 // like "Invalid argument", "out of memory" etc
-#define assert_status(p, status, msg)                                     \
-   do {                                                                   \
-    if (!(p)) {                                                           \
-      char buf[128];                                                      \
-      snprintf(buf, 127,                                                  \
-               "assert_status(" XSTR(p) ", error: %s(%d), \"" msg "\")" , \
-               strerror((status)), (status));                             \
-      report_assertion_failure(__FILE__, __LINE__, buf);                  \
-      BREAKPOINT;                                                         \
-    }                                                                     \
-  } while (0)
-
-// Another version of assert where the message is not a string literal
-// The boolean condition is not printed out because cpp doesn't like it.
-#define assert_msg(p, msg)                                       \
-    if (!(p)) {                                                  \
-      report_assertion_failure(__FILE__, __LINE__, msg);         \
-      BREAKPOINT;                                                \
-    }
+#define assert_status(p, status, msg)                                        \
+do {                                                                         \
+  if (!(p)) {                                                                \
+    report_vm_error(__FILE__, __LINE__, "assert(" #p ") failed",             \
+                    err_msg("error %s(%d) %s", strerror(status),             \
+                            status, msg));                                   \
+    BREAKPOINT;                                                              \
+  }                                                                          \
+} while (0)
 
 // Do not assert this condition if there's already another error reported.
 #define assert_if_no_error(cond,msg) assert((cond) || is_error_reported(), msg)
-#else
+#else // #ifdef ASSERT
   #define assert(p,msg)
   #define assert_status(p,status,msg)
   #define assert_if_no_error(cond,msg)
-  #define assert_msg(cond,msg)
-#endif
-
-
-// fatals
-#define fatal(m)                             { report_fatal(__FILE__, __LINE__, m                          ); BREAKPOINT; }
-#define fatal1(m,x1)                         { report_fatal_vararg(__FILE__, __LINE__, m, x1               ); BREAKPOINT; }
-#define fatal2(m,x1,x2)                      { report_fatal_vararg(__FILE__, __LINE__, m, x1, x2           ); BREAKPOINT; }
-#define fatal3(m,x1,x2,x3)                   { report_fatal_vararg(__FILE__, __LINE__, m, x1, x2, x3       ); BREAKPOINT; }
-#define fatal4(m,x1,x2,x3,x4)                { report_fatal_vararg(__FILE__, __LINE__, m, x1, x2, x3, x4   ); BREAKPOINT; }
-
-// out of memory
-#define vm_exit_out_of_memory(s,m)              { report_vm_out_of_memory(__FILE__, __LINE__, s, m                       ); BREAKPOINT; }
-#define vm_exit_out_of_memory1(s,m,x1)          { report_vm_out_of_memory_vararg(__FILE__, __LINE__, s, m, x1            ); BREAKPOINT; }
-#define vm_exit_out_of_memory2(s,m,x1,x2)       { report_vm_out_of_memory_vararg(__FILE__, __LINE__, s, m, x1, x2        ); BREAKPOINT; }
-#define vm_exit_out_of_memory3(s,m,x1,x2,x3)    { report_vm_out_of_memory_vararg(__FILE__, __LINE__, s, m, x1, x2, x3    ); BREAKPOINT; }
-#define vm_exit_out_of_memory4(s,m,x1,x2,x3,x4) { report_vm_out_of_memory_vararg(__FILE__, __LINE__, s, m, x1, x2, x3, x4); BREAKPOINT; }
+#endif // #ifdef ASSERT
 
 // guarantee is like assert except it's always executed -- use it for
-// cheap tests that catch errors that would otherwise be hard to find
+// cheap tests that catch errors that would otherwise be hard to find.
 // guarantee is also used for Verify options.
-#define guarantee(b,msg)         { if (!(b)) fatal("guarantee(" XSTR(b) ",\"" msg "\")"); }
+#define guarantee(p, msg)                                                    \
+do {                                                                         \
+  if (!(p)) {                                                                \
+    report_vm_error(__FILE__, __LINE__, "guarantee(" #p ") failed", msg);    \
+    BREAKPOINT;                                                              \
+  }                                                                          \
+} while (0)
+
+#define fatal(msg)                                                           \
+do {                                                                         \
+  report_fatal(__FILE__, __LINE__, msg);                                     \
+  BREAKPOINT;                                                                \
+} while (0)
+
+// out of memory
+#define vm_exit_out_of_memory(size, msg)                                     \
+do {                                                                         \
+  report_vm_out_of_memory(__FILE__, __LINE__, size, msg);                    \
+  BREAKPOINT;                                                                \
+} while (0)
 
-#define ShouldNotCallThis()      { report_should_not_call        (__FILE__, __LINE__); BREAKPOINT; }
-#define ShouldNotReachHere()     { report_should_not_reach_here  (__FILE__, __LINE__); BREAKPOINT; }
-#define Unimplemented()          { report_unimplemented          (__FILE__, __LINE__); BREAKPOINT; }
-#define Untested(msg)            { report_untested               (__FILE__, __LINE__, msg); BREAKPOINT; }
+#define ShouldNotCallThis()                                                  \
+do {                                                                         \
+  report_should_not_call(__FILE__, __LINE__);                                \
+  BREAKPOINT;                                                                \
+} while (0)
+
+#define ShouldNotReachHere()                                                 \
+do {                                                                         \
+  report_should_not_reach_here(__FILE__, __LINE__);                          \
+  BREAKPOINT;                                                                \
+} while (0)
+
+#define Unimplemented()                                                      \
+do {                                                                         \
+  report_unimplemented(__FILE__, __LINE__);                                  \
+  BREAKPOINT;                                                                \
+} while (0)
+
+#define Untested(msg)                                                        \
+do {                                                                         \
+  report_untested(__FILE__, __LINE__, msg);                                  \
+  BREAKPOINT;                                                                \
+} while (0);
 
 // error reporting helper functions
-void report_assertion_failure(const char* file_name, int line_no, const char* message);
-void report_fatal_vararg(const char* file_name, int line_no, const char* format, ...);
-void report_fatal(const char* file_name, int line_no, const char* message);
-void report_vm_out_of_memory_vararg(const char* file_name, int line_no, size_t size, const char* format, ...);
-void report_vm_out_of_memory(const char* file_name, int line_no, size_t size, const char* message);
-void report_should_not_call(const char* file_name, int line_no);
-void report_should_not_reach_here(const char* file_name, int line_no);
-void report_unimplemented(const char* file_name, int line_no);
-void report_untested(const char* file_name, int line_no, const char* msg);
+void report_vm_error(const char* file, int line, const char* error_msg,
+                     const char* detail_msg = NULL);
+void report_fatal(const char* file, int line, const char* message);
+void report_vm_out_of_memory(const char* file, int line, size_t size,
+                             const char* message);
+void report_should_not_call(const char* file, int line);
+void report_should_not_reach_here(const char* file, int line);
+void report_unimplemented(const char* file, int line);
+void report_untested(const char* file, int line, const char* message);
+
 void warning(const char* format, ...);
 
 // out of memory reporting
@@ -125,5 +164,8 @@
 bool is_error_reported();
 void set_error_reported();
 
+/* Test assert(), fatal(), guarantee(), etc. */
+NOT_PRODUCT(void test_error_handler(size_t test_num);)
+
 void pd_ps(frame f);
 void pd_obfuscate_location(char *buf, size_t buflen);
--- a/src/share/vm/utilities/exceptions.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/utilities/exceptions.cpp	Wed May 19 10:22:39 2010 -0700
@@ -378,7 +378,7 @@
 void Exceptions::debug_check_abort(const char *value_string) {
   if (AbortVMOnException != NULL && value_string != NULL &&
       strstr(value_string, AbortVMOnException)) {
-    fatal1("Saw %s, aborting", value_string);
+    fatal(err_msg("Saw %s, aborting", value_string));
   }
 }
 
--- a/src/share/vm/utilities/macros.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/utilities/macros.hpp	Wed May 19 10:22:39 2010 -0700
@@ -188,6 +188,4 @@
 #define NOT_SPARC(code) code
 #endif
 
-#define FIX_THIS(code) report_assertion_failure("FIX_THIS",__FILE__, __LINE__, "")
-
 #define define_pd_global(type, name, value) const type pd_##name = value;
--- a/src/share/vm/utilities/ostream.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/utilities/ostream.cpp	Wed May 19 10:22:39 2010 -0700
@@ -363,7 +363,7 @@
   return _log_file != NULL;
 }
 
-static const char* make_log_name(const char* log_name, const char* force_directory, char* buf) {
+static const char* make_log_name(const char* log_name, const char* force_directory) {
   const char* basename = log_name;
   char file_sep = os::file_separator()[0];
   const char* cp;
@@ -374,6 +374,27 @@
   }
   const char* nametail = log_name;
 
+  // Compute buffer length
+  size_t buffer_length;
+  if (force_directory != NULL) {
+    buffer_length = strlen(force_directory) + strlen(os::file_separator()) +
+                    strlen(basename) + 1;
+  } else {
+    buffer_length = strlen(log_name) + 1;
+  }
+
+  const char* star = strchr(basename, '*');
+  int star_pos = (star == NULL) ? -1 : (star - nametail);
+
+  char pid[32];
+  if (star_pos >= 0) {
+    jio_snprintf(pid, sizeof(pid), "%u", os::current_process_id());
+    buffer_length += strlen(pid);
+  }
+
+  // Create big enough buffer.
+  char *buf = NEW_C_HEAP_ARRAY(char, buffer_length);
+
   strcpy(buf, "");
   if (force_directory != NULL) {
     strcat(buf, force_directory);
@@ -381,14 +402,11 @@
     nametail = basename;       // completely skip directory prefix
   }
 
-  const char* star = strchr(basename, '*');
-  int star_pos = (star == NULL) ? -1 : (star - nametail);
-
   if (star_pos >= 0) {
     // convert foo*bar.log to foo123bar.log
     int buf_pos = (int) strlen(buf);
     strncpy(&buf[buf_pos], nametail, star_pos);
-    sprintf(&buf[buf_pos + star_pos], "%u", os::current_process_id());
+    strcpy(&buf[buf_pos + star_pos], pid);
     nametail += star_pos + 1;  // skip prefix and star
   }
 
@@ -399,20 +417,23 @@
 void defaultStream::init_log() {
   // %%% Need a MutexLocker?
   const char* log_name = LogFile != NULL ? LogFile : "hotspot.log";
-  char buf[O_BUFLEN*2];
-  const char* try_name = make_log_name(log_name, NULL, buf);
+  const char* try_name = make_log_name(log_name, NULL);
   fileStream* file = new(ResourceObj::C_HEAP) fileStream(try_name);
   if (!file->is_open()) {
     // Try again to open the file.
     char warnbuf[O_BUFLEN*2];
-    sprintf(warnbuf, "Warning:  Cannot open log file: %s\n", try_name);
+    jio_snprintf(warnbuf, sizeof(warnbuf),
+                 "Warning:  Cannot open log file: %s\n", try_name);
     // Note:  This feature is for maintainer use only.  No need for L10N.
     jio_print(warnbuf);
-    try_name = make_log_name("hs_pid*.log", os::get_temp_directory(), buf);
-    sprintf(warnbuf, "Warning:  Forcing option -XX:LogFile=%s\n", try_name);
+    FREE_C_HEAP_ARRAY(char, try_name);
+    try_name = make_log_name("hs_pid*.log", os::get_temp_directory());
+    jio_snprintf(warnbuf, sizeof(warnbuf),
+                 "Warning:  Forcing option -XX:LogFile=%s\n", try_name);
     jio_print(warnbuf);
     delete file;
     file = new(ResourceObj::C_HEAP) fileStream(try_name);
+    FREE_C_HEAP_ARRAY(char, try_name);
   }
   if (file->is_open()) {
     _log_file = file;
--- a/src/share/vm/utilities/vmError.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/utilities/vmError.cpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,8 @@
     _current_step = 0;
     _current_step_info = NULL;
 
-    _message = "";
+    _message = NULL;
+    _detail_msg = NULL;
     _filename = NULL;
     _lineno = 0;
 
@@ -73,31 +74,36 @@
 }
 
 // Constructor for internal errors
-VMError::VMError(Thread* thread, const char* message, const char* filename, int lineno) {
+VMError::VMError(Thread* thread, const char* filename, int lineno,
+                 const char* message, const char * detail_msg)
+{
+  _thread = thread;
+  _id = internal_error;     // Value that's not an OS exception/signal
+  _filename = filename;
+  _lineno = lineno;
+  _message = message;
+  _detail_msg = detail_msg;
+
+  _verbose = false;
+  _current_step = 0;
+  _current_step_info = NULL;
+
+  _pc = NULL;
+  _siginfo = NULL;
+  _context = NULL;
+
+  _size = 0;
+}
+
+// Constructor for OOM errors
+VMError::VMError(Thread* thread, const char* filename, int lineno, size_t size,
+                 const char* message) {
     _thread = thread;
-    _id = internal_error;     // set it to a value that's not an OS exception/signal
+    _id = oom_error;     // Value that's not an OS exception/signal
     _filename = filename;
     _lineno = lineno;
     _message = message;
-
-    _verbose = false;
-    _current_step = 0;
-    _current_step_info = NULL;
-
-    _pc = NULL;
-    _siginfo = NULL;
-    _context = NULL;
-
-    _size = 0;
-}
-
-// Constructor for OOM errors
-VMError::VMError(Thread* thread, size_t size, const char* message, const char* filename, int lineno) {
-    _thread = thread;
-    _id = oom_error;     // set it to a value that's not an OS exception/signal
-    _filename = filename;
-    _lineno = lineno;
-    _message = message;
+    _detail_msg = NULL;
 
     _verbose = false;
     _current_step = 0;
@@ -114,10 +120,11 @@
 // Constructor for non-fatal errors
 VMError::VMError(const char* message) {
     _thread = NULL;
-    _id = internal_error;     // set it to a value that's not an OS exception/signal
+    _id = internal_error;     // Value that's not an OS exception/signal
     _filename = NULL;
     _lineno = 0;
     _message = message;
+    _detail_msg = NULL;
 
     _verbose = false;
     _current_step = 0;
@@ -191,27 +198,77 @@
                  "%s (0x%x) at pc=" PTR_FORMAT ", pid=%d, tid=" UINTX_FORMAT,
                  signame, _id, _pc,
                  os::current_process_id(), os::current_thread_id());
+  } else if (_filename != NULL && _lineno > 0) {
+    // skip directory names
+    char separator = os::file_separator()[0];
+    const char *p = strrchr(_filename, separator);
+    int n = jio_snprintf(buf, buflen,
+                         "Internal Error at %s:%d, pid=%d, tid=" UINTX_FORMAT,
+                         p ? p + 1 : _filename, _lineno,
+                         os::current_process_id(), os::current_thread_id());
+    if (n >= 0 && n < buflen && _message) {
+      if (_detail_msg) {
+        jio_snprintf(buf + n, buflen - n, "%s%s: %s",
+                     os::line_separator(), _message, _detail_msg);
+      } else {
+        jio_snprintf(buf + n, buflen - n, "%sError: %s",
+                     os::line_separator(), _message);
+      }
+    }
   } else {
-    if (_filename != NULL && _lineno > 0) {
-      // skip directory names
-      char separator = os::file_separator()[0];
-      const char *p = strrchr(_filename, separator);
-
-      jio_snprintf(buf, buflen,
-        "Internal Error at %s:%d, pid=%d, tid=" UINTX_FORMAT " \nError: %s",
-        p ? p + 1 : _filename, _lineno,
-        os::current_process_id(), os::current_thread_id(),
-        _message ? _message : "");
-    } else {
-      jio_snprintf(buf, buflen,
-        "Internal Error (0x%x), pid=%d, tid=" UINTX_FORMAT,
-        _id, os::current_process_id(), os::current_thread_id());
-    }
+    jio_snprintf(buf, buflen,
+                 "Internal Error (0x%x), pid=%d, tid=" UINTX_FORMAT,
+                 _id, os::current_process_id(), os::current_thread_id());
   }
 
   return buf;
 }
 
+void VMError::print_stack_trace(outputStream* st, JavaThread* jt,
+                                char* buf, int buflen, bool verbose) {
+#ifdef ZERO
+  if (jt->zero_stack()->sp() && jt->top_zero_frame()) {
+    // StackFrameStream uses the frame anchor, which may not have
+    // been set up.  This can be done at any time in Zero, however,
+    // so if it hasn't been set up then we just set it up now and
+    // clear it again when we're done.
+    bool has_last_Java_frame = jt->has_last_Java_frame();
+    if (!has_last_Java_frame)
+      jt->set_last_Java_frame();
+    st->print("Java frames:");
+
+    // If the top frame is a Shark frame and the frame anchor isn't
+    // set up then it's possible that the information in the frame
+    // is garbage: it could be from a previous decache, or it could
+    // simply have never been written.  So we print a warning...
+    StackFrameStream sfs(jt);
+    if (!has_last_Java_frame && !sfs.is_done()) {
+      if (sfs.current()->zeroframe()->is_shark_frame()) {
+        st->print(" (TOP FRAME MAY BE JUNK)");
+      }
+    }
+    st->cr();
+
+    // Print the frames
+    for(int i = 0; !sfs.is_done(); sfs.next(), i++) {
+      sfs.current()->zero_print_on_error(i, st, buf, buflen);
+      st->cr();
+    }
+
+    // Reset the frame anchor if necessary
+    if (!has_last_Java_frame)
+      jt->reset_last_Java_frame();
+  }
+#else
+  if (jt->has_last_Java_frame()) {
+    st->print_cr("Java frames: (J=compiled Java code, j=interpreted, Vv=VM code)");
+    for(StackFrameStream sfs(jt); !sfs.is_done(); sfs.next()) {
+      sfs.current()->print_on_error(st, buf, buflen, verbose);
+      st->cr();
+    }
+  }
+#endif // ZERO
+}
 
 // This is the main function to report a fatal error. Only one thread can
 // call this function, so we don't need to worry about MT-safety. But it's
@@ -324,7 +381,9 @@
   STEP(40, "(printing error message)")
 
      // error message
-     if (_message && _message[0] != '\0') {
+     if (_detail_msg) {
+       st->print_cr("#  %s: %s", _message ? _message : "Error", _detail_msg);
+     } else if (_message) {
        st->print_cr("#  Error: %s", _message);
      }
 
@@ -457,49 +516,7 @@
   STEP(130, "(printing Java stack)" )
 
      if (_verbose && _thread && _thread->is_Java_thread()) {
-       JavaThread* jt = (JavaThread*)_thread;
-#ifdef ZERO
-       if (jt->zero_stack()->sp() && jt->top_zero_frame()) {
-         // StackFrameStream uses the frame anchor, which may not have
-         // been set up.  This can be done at any time in Zero, however,
-         // so if it hasn't been set up then we just set it up now and
-         // clear it again when we're done.
-         bool has_last_Java_frame = jt->has_last_Java_frame();
-         if (!has_last_Java_frame)
-           jt->set_last_Java_frame();
-         st->print("Java frames:");
-
-         // If the top frame is a Shark frame and the frame anchor isn't
-         // set up then it's possible that the information in the frame
-         // is garbage: it could be from a previous decache, or it could
-         // simply have never been written.  So we print a warning...
-         StackFrameStream sfs(jt);
-         if (!has_last_Java_frame && !sfs.is_done()) {
-           if (sfs.current()->zeroframe()->is_shark_frame()) {
-             st->print(" (TOP FRAME MAY BE JUNK)");
-           }
-         }
-         st->cr();
-
-         // Print the frames
-         for(int i = 0; !sfs.is_done(); sfs.next(), i++) {
-           sfs.current()->zero_print_on_error(i, st, buf, sizeof(buf));
-           st->cr();
-         }
-
-         // Reset the frame anchor if necessary
-         if (!has_last_Java_frame)
-           jt->reset_last_Java_frame();
-       }
-#else
-       if (jt->has_last_Java_frame()) {
-         st->print_cr("Java frames: (J=compiled Java code, j=interpreted, Vv=VM code)");
-         for(StackFrameStream sfs(jt); !sfs.is_done(); sfs.next()) {
-           sfs.current()->print_on_error(st, buf, sizeof(buf));
-           st->cr();
-         }
-       }
-#endif // ZERO
+       print_stack_trace(st, (JavaThread*)_thread, buf, sizeof(buf));
      }
 
   STEP(135, "(printing target Java thread stack)" )
@@ -509,13 +526,7 @@
        JavaThread*  jt = ((NamedThread *)_thread)->processed_thread();
        if (jt != NULL) {
          st->print_cr("JavaThread " PTR_FORMAT " (nid = " UINTX_FORMAT ") was being processed", jt, jt->osthread()->thread_id());
-         if (jt->has_last_Java_frame()) {
-           st->print_cr("Java frames: (J=compiled Java code, j=interpreted, Vv=VM code)");
-           for(StackFrameStream sfs(jt); !sfs.is_done(); sfs.next()) {
-             sfs.current()->print_on_error(st, buf, sizeof(buf), true);
-             st->cr();
-           }
-         }
+         print_stack_trace(st, jt, buf, sizeof(buf), true);
        }
      }
 
@@ -807,8 +818,8 @@
       if (fd == -1) {
         // try temp directory
         const char * tmpdir = os::get_temp_directory();
-        jio_snprintf(buffer, sizeof(buffer), "%shs_err_pid%u.log",
-                     (tmpdir ? tmpdir : ""), os::current_process_id());
+        jio_snprintf(buffer, sizeof(buffer), "%s%shs_err_pid%u.log",
+                     tmpdir, os::file_separator(), os::current_process_id());
         fd = open(buffer, O_WRONLY | O_CREAT | O_TRUNC, 0666);
       }
 
--- a/src/share/vm/utilities/vmError.hpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/utilities/vmError.hpp	Wed May 19 10:22:39 2010 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,7 @@
                              //                     0x8xxxxxxx system warnings
 
   const char * _message;
+  const char * _detail_msg;
 
   Thread *     _thread;      // NULL if it's native thread
 
@@ -70,17 +71,24 @@
   // generate an error report
   void report(outputStream* st);
 
+  // generate a stack trace
+  static void print_stack_trace(outputStream* st, JavaThread* jt,
+                                char* buf, int buflen, bool verbose = false);
+
   // accessor
-  const char* message()         { return _message; }
+  const char* message() const    { return _message; }
+  const char* detail_msg() const { return _detail_msg; }
 
 public:
   // Constructor for crashes
   VMError(Thread* thread, int sig, address pc, void* siginfo, void* context);
   // Constructor for VM internal errors
-  VMError(Thread* thread, const char* message, const char* filename, int lineno);
+  VMError(Thread* thread, const char* filename, int lineno,
+          const char* message, const char * detail_msg);
 
-  // Constructors for VM OOM errors
-  VMError(Thread* thread, size_t size, const char* message, const char* filename, int lineno);
+  // Constructor for VM OOM errors
+  VMError(Thread* thread, const char* filename, int lineno, size_t size,
+          const char* message);
   // Constructor for non-fatal errors
   VMError(const char* message);
 
--- a/src/share/vm/utilities/xmlstream.cpp	Mon Apr 05 10:17:15 2010 -0700
+++ b/src/share/vm/utilities/xmlstream.cpp	Wed May 19 10:22:39 2010 -0700
@@ -328,7 +328,7 @@
 // ------------------------------------------------------------------
 void xmlStream::va_done(const char* format, va_list ap) {
   char buffer[200];
-  guarantee(strlen(format) + 10 < sizeof(buffer), "bigger format buffer")
+  guarantee(strlen(format) + 10 < sizeof(buffer), "bigger format buffer");
   const char* kind = format;
   const char* kind_end = strchr(kind, ' ');
   size_t kind_len = (kind_end != NULL) ? (kind_end - kind) : strlen(kind);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6431242/Test.java	Wed May 19 10:22:39 2010 -0700
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 6431242
+ * @run main/othervm -server -XX:+PrintCompilation Test
+ */
+
+public class Test{
+
+  int    _len      = 8;
+  int[]  _arr_i    = new int[_len];
+  long[] _arr_l    = new long[_len];
+
+  int[]  _arr_i_cp = new int [_len];
+  long[] _arr_l_cp = new long [_len];
+
+  int _k     = 0x12345678;
+  int _j     = 0;
+  int _ir    = 0x78563412;
+  int _ir1   = 0x78563413;
+  int _ir2   = 0x79563412;
+
+  long _m    = 0x123456789abcdef0L;
+  long _l    = 0L;
+  long _lr   = 0xf0debc9a78563412L;
+  long _lr1  = 0xf0debc9a78563413L;
+  long _lr2  = 0xf1debc9a78563412L;
+
+  void init() {
+    for (int i=0; i<_arr_i.length; i++) {
+      _arr_i[i] = _k;
+      _arr_l[i] = _m;
+    }
+  }
+
+  public int test_int_reversed(int i) {
+    return Integer.reverseBytes(i);
+  }
+
+  public long test_long_reversed(long i) {
+    return Long.reverseBytes(i);
+  }
+
+  public void test_copy_ints(int[] dst, int[] src) {
+    for(int i=0; i<src.length; i++) {
+      dst[i] = Integer.reverseBytes(src[i]);
+    }
+  }
+
+  public void test_copy_ints_reversed(int[] dst, int[] src) {
+    for (int i=0; i<src.length; i++) {
+      dst[i] = 1 + Integer.reverseBytes(src[i]);
+    }
+  }
+
+  public void test_copy_ints_store_reversed(int[] dst, int[] src) {
+    for(int i=0; i<src.length; i++) {
+      dst[i] = Integer.reverseBytes(1 + src[i]);
+    }
+  }
+
+  public void test_copy_longs(long[] dst, long[] src) {
+    for(int i=0; i<src.length; i++) {
+      dst[i] = Long.reverseBytes(src[i]);
+    }
+  }
+
+  public void test_copy_longs_reversed(long[] dst, long[] src) {
+    for (int i=0; i<src.length; i++) {
+      dst[i] = 1 + Long.reverseBytes(src[i]);
+    }
+  }
+
+  public void test_copy_longs_store_reversed(long[] dst, long[] src) {
+    for(int i=0; i<src.length; i++) {
+      dst[i] = Long.reverseBytes(1 + src[i]);
+    }
+  }
+
+  public void test() throws Exception {
+    int up_limit=90000;
+
+
+    //test single
+
+    for (int loop=0; loop<up_limit; loop++) {
+      _j = test_int_reversed(_k);
+      if (_j != _ir ) {
+        throw new Exception("Interger.reverseBytes failed " + _j + " iter " + loop);
+      }
+      _l = test_long_reversed(_m);
+      if (_l != _lr ) {
+        throw new Exception("Long.reverseBytes failed " + _l + " iter " + loop);
+      }
+    }
+
+    // test scalar load/store
+    for (int loop=0; loop<up_limit; loop++) {
+
+      test_copy_ints(_arr_i_cp, _arr_i);
+      for (int j=0; j< _arr_i.length; j++) {
+        if (_arr_i_cp[j] != _ir) {
+          throw new Exception("Interger.reverseBytes failed test_copy_ints iter " + loop);
+        }
+      }
+
+      test_copy_ints_reversed(_arr_i_cp, _arr_i);
+      for (int j=0; j< _arr_i.length; j++) {
+        if (_arr_i_cp[j] != _ir1) {
+          throw new Exception("Interger.reverseBytes failed test_copy_ints_reversed iter " + loop);
+        }
+      }
+      test_copy_ints_store_reversed(_arr_i_cp, _arr_i);
+      for (int j=0; j< _arr_i.length; j++) {
+        if (_arr_i_cp[j] != _ir2) {
+          throw new Exception("Interger.reverseBytes failed test_copy_ints_store_reversed iter " + loop);
+        }
+      }
+
+      test_copy_longs(_arr_l_cp, _arr_l);
+      for (int j=0; j< _arr_i.length; j++) {
+        if (_arr_l_cp[j] != _lr) {
+          throw new Exception("Long.reverseBytes failed test_copy_longs iter " + loop);
+        }
+      }
+      test_copy_longs_reversed(_arr_l_cp, _arr_l);
+      for (int j=0; j< _arr_i.length; j++) {
+        if (_arr_l_cp[j] != _lr1) {
+          throw new Exception("Long.reverseBytes failed test_copy_longs_reversed iter " + loop);
+        }
+      }
+      test_copy_longs_store_reversed(_arr_l_cp, _arr_l);
+      for (int j=0; j< _arr_i.length; j++) {
+        if (_arr_l_cp[j] != _lr2) {
+          throw new Exception("Long.reverseBytes failed test_copy_longs_store_reversed iter " + loop);
+        }
+      }
+
+    }
+  }
+
+  public static void main(String args[]) {
+    try {
+      Test t = new Test();
+      t.init();
+      t.test();
+      System.out.println("Passed");
+    }catch (Exception e) {
+      e.printStackTrace();
+      System.out.println("Failed");
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6892265/Test.java	Wed May 19 10:22:39 2010 -0700
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6892265
+ * @summary System.arraycopy unable to reference elements beyond Integer.MAX_VALUE bytes
+ *
+ * @run main/othervm Test
+ */
+
+public class Test {
+  static  final int NCOPY = 1;
+  static  final int OVERFLOW = 1;
+  static  int[] src2 = new int[NCOPY];
+  static  int[] dst2;
+
+  static void test() {
+    int N;
+    int SIZE;
+
+    N = Integer.MAX_VALUE/4 + OVERFLOW;
+    System.arraycopy(src2, 0, dst2, N, NCOPY);
+    System.arraycopy(dst2, N, src2, 0, NCOPY);
+  }
+
+  public static void main(String[] args) {
+    try {
+      dst2 = new int[NCOPY + Integer.MAX_VALUE/4 + OVERFLOW];
+    } catch (OutOfMemoryError e) {
+       System.exit(95); // Not enough memory
+    }
+    System.out.println("warmup");
+    for (int i=0; i <11000; i++) {
+      test();
+    }
+    System.out.println("start");
+    for (int i=0; i <1000; i++) {
+      test();
+    }
+    System.out.println("finish");
+  }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6946040/TestCharShortByteSwap.java	Wed May 19 10:22:39 2010 -0700
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2010 Google, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 6946040
+ * @summary Tests Character/Short.reverseBytes and their intrinsics implementation in the server compiler
+ * @run main/othervm -Xbatch -server -XX:CompileOnly=.testChar,.testShort TestCharShortByteSwap
+ */
+
+// This test must run without any command line arguments.
+
+public class TestCharShortByteSwap {
+
+  private static short initShort(String[] args, short v) {
+    if (args.length > 0) {
+      try {
+        return (short) Integer.valueOf(args[0]).intValue();
+      } catch (NumberFormatException e) { }
+    }
+    return v;
+  }
+
+  private static char initChar(String[] args, char v) {
+    if (args.length > 0) {
+      try {
+        return (char) Integer.valueOf(args[0]).intValue();
+      } catch (NumberFormatException e) { }
+    }
+    return v;
+  }
+
+  private static void testChar(char a, char b) {
+    if (a != Character.reverseBytes(b)) {
+      throw new RuntimeException("FAIL: " + (int)a + " != Character.reverseBytes(" + (int)b + ")");
+    }
+    if (b != Character.reverseBytes(a)) {
+      throw new RuntimeException("FAIL: " + (int)b + " != Character.reverseBytes(" + (int)a + ")");
+    }
+  }
+
+  private static void testShort(short a, short b) {
+    if (a != Short.reverseBytes(b)) {
+      throw new RuntimeException("FAIL: " + (int)a + " != Short.reverseBytes(" + (int)b + ")");
+    }
+    if (b != Short.reverseBytes(a)) {
+      throw new RuntimeException("FAIL: " + (int)b + " != Short.reverseBytes(" + (int)a + ")");
+    }
+  }
+
+  public static void main(String[] args) {
+    for (int i = 0; i < 100000; ++i) { // Trigger compilation
+      char c1 = initChar(args, (char) 0x0123);
+      char c2 = initChar(args, (char) 0x2301);
+      char c3 = initChar(args, (char) 0xaabb);
+      char c4 = initChar(args, (char) 0xbbaa);
+      short s1 = initShort(args, (short) 0x0123);
+      short s2 = initShort(args, (short) 0x2301);
+      short s3 = initShort(args, (short) 0xaabb);
+      short s4 = initShort(args, (short) 0xbbaa);
+      testChar(c1, c2);
+      testChar(c3, c4);
+      testShort(s1, s2);
+      testShort(s3, s4);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/6888954/vmerrors.sh	Wed May 19 10:22:39 2010 -0700
@@ -0,0 +1,71 @@
+# @test
+# @bug 6888954
+# @summary exercise HotSpot error handling code
+# @author John Coomes
+# @run shell vmerrors.sh
+
+# Repeatedly invoke java with a command-line option that causes HotSpot to
+# produce an error report and terminate just after initialization.  Each
+# invocation is identified by a small integer, <n>, which provokes a different
+# error (assertion failure, guarantee failure, fatal error, etc.).  The output
+# from stdout/stderr is written to <n>.out and the hs_err_pidXXX.log file is
+# renamed to <n>.log.
+#
+# The automated checking done by this script is minimal.  When updating the
+# fatal error handler it is more useful to run it manually or to use the -retain
+# option with the jtreg so that test directories are not removed automatically.
+# To run stand-alone:
+#
+# TESTJAVA=/java/home/dir
+# TESTVMOPTS=...
+# export TESTJAVA TESTVMOPTS
+# sh test/runtime/6888954/vmerrors.sh
+
+ulimit -c 0 # no core files
+
+i=1
+rc=0
+
+assert_re='(assert|guarantee)[(](str|num).*failed: *'
+guarantee_re='guarantee[(](str|num).*failed: *'
+fatal_re='fatal error: *'
+signal_re='(SIGSEGV|EXCEPTION_ACCESS_VIOLATION).* at pc='
+tail_1='.*expected null'
+tail_2='.*num='
+
+for re in                                                 \
+    "${assert_re}${tail_1}"    "${assert_re}${tail_2}"    \
+    "${guarantee_re}${tail_1}" "${guarantee_re}${tail_2}" \
+    "${fatal_re}${tail_1}"     "${fatal_re}${tail_2}"     \
+    "${fatal_re}.*truncated"   "ChunkPool::allocate"      \
+    "ShouldNotCall"            "ShouldNotReachHere"       \
+    "Unimplemented"            "$signal_re"
+    
+do
+    i2=$i
+    [ $i -lt 10 ] && i2=0$i
+
+    "$TESTJAVA/bin/java" $TESTVMOPTS -XX:+IgnoreUnrecognizedVMOptions \
+        -XX:ErrorHandlerTest=${i} -version > ${i2}.out 2>&1
+
+    # If ErrorHandlerTest is ignored (product build), stop.
+    #
+    # Using the built-in variable $! to get the pid does not work reliably on
+    # windows; use a wildcard instead.
+    mv hs_err_pid*.log ${i2}.log || exit $rc
+
+    for f in ${i2}.log ${i2}.out
+    do
+        egrep -- "$re" $f > $$
+        if [ $? -ne 0 ]
+        then
+            echo "ErrorHandlerTest=$i failed ($f)"
+            rc=1
+        fi
+    done
+    rm -f $$
+
+    i=$(expr $i + 1)
+done
+
+exit $rc
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/6925573/SortMethodsTest.java	Wed May 19 10:22:39 2010 -0700
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2008-2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+
+import java.lang.reflect.Method;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.Vector;
+
+import javax.tools.Diagnostic;
+import javax.tools.DiagnosticCollector;
+import javax.tools.FileObject;
+import javax.tools.ForwardingJavaFileManager;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaCompiler.CompilationTask;
+import javax.tools.JavaFileManager;
+import javax.tools.JavaFileObject;
+import javax.tools.JavaFileObject.Kind;
+import javax.tools.SimpleJavaFileObject;
+import javax.tools.StandardJavaFileManager;
+import javax.tools.ToolProvider;
+
+/*
+ * @test SortMethodsTest
+ * @bug 6925573
+ * @summary verify that class loading does not need quadratic time with regard to the number of class
+methods.
+ * @run main SortMethodsTest
+ * @author volker.simonis@gmail.com
+*/
+
+public class SortMethodsTest {
+
+  static String createClass(String name, int nrOfMethods) {
+    StringWriter sw = new StringWriter();
+    PrintWriter pw = new PrintWriter(sw);
+    pw.println("public class " + name + "{");
+    for (int i = 0; i < nrOfMethods; i++) {
+      pw.println("  public void m" + i + "() {}");
+    }
+    pw.println("  public static String sayHello() {");
+    pw.println("    return \"Hello from class \" + " + name +
+               ".class.getName() + \" with \" + " + name +
+               ".class.getDeclaredMethods().length + \" methods\";");
+    pw.println("  }");
+    pw.println("}");
+    pw.close();
+    return sw.toString();
+  }
+
+  public static void main(String args[]) {
+
+    JavaCompiler comp = ToolProvider.getSystemJavaCompiler();
+    DiagnosticCollector<JavaFileObject> diags = new DiagnosticCollector<JavaFileObject>();
+    final String cName = new String("ManyMethodsClass");
+    Vector<Long> results = new Vector<Long>();
+
+    for (int i = 6; i < 600000; i*=10) {
+      String klass =  createClass(cName, i);
+      JavaMemoryFileObject file = new JavaMemoryFileObject(cName, klass);
+      MemoryFileManager mfm = new MemoryFileManager(comp.getStandardFileManager(diags, null, null), file);
+      CompilationTask task = comp.getTask(null, mfm, diags, null, null, Arrays.asList(file));
+
+      if (task.call()) {
+        try {
+          MemoryClassLoader mcl = new MemoryClassLoader(file);
+          long start = System.nanoTime();
+          Class<? extends Object> c = Class.forName(cName, true, mcl);
+          long end = System.nanoTime();
+          results.add(end - start);
+          Method m = c.getDeclaredMethod("sayHello", new Class[0]);
+          String ret = (String)m.invoke(null, new Object[0]);
+          System.out.println(ret + " (loaded and resloved in " + (end - start) + "ns)");
+        } catch (Exception e) {
+          System.err.println(e);
+        }
+      }
+      else {
+        System.out.println(klass);
+        System.out.println();
+        for (Diagnostic diag : diags.getDiagnostics()) {
+          System.out.println(diag.getCode() + "\n" + diag.getKind() + "\n" + diag.getPosition());
+          System.out.println(diag.getSource() + "\n" + diag.getMessage(null));
+        }
+      }
+    }
+
+    long lastRatio = 0;
+    for (int i = 2; i < results.size(); i++) {
+      long normalized1 = Math.max(results.get(i-1) - results.get(0), 1);
+      long normalized2 = Math.max(results.get(i) - results.get(0), 1);
+      long ratio = normalized2/normalized1;
+      lastRatio = ratio;
+      System.out.println("10 x more methods requires " + ratio + " x more time");
+    }
+    // The following is just vague estimation but seems to work on current x86_64 and sparcv9 machines
+    if (lastRatio > 80) {
+      throw new RuntimeException("ATTENTION: it seems that class loading needs quadratic time with regard to the number of class methods!!!");
+    }
+  }
+}
+
+class JavaMemoryFileObject extends SimpleJavaFileObject {
+
+  private final String code;
+  private ByteArrayOutputStream byteCode;
+
+  JavaMemoryFileObject(String name, String code) {
+    super(URI.create("string:///" + name.replace('.','/') + Kind.SOURCE.extension), Kind.SOURCE);
+    this.code = code;
+  }
+
+  @Override
+  public CharSequence getCharContent(boolean ignoreEncodingErrors) {
+    return code;
+  }
+
+  @Override
+  public OutputStream openOutputStream() {
+    byteCode = new ByteArrayOutputStream();
+    return byteCode;
+  }
+
+  byte[] getByteCode() {
+    return byteCode.toByteArray();
+   }
+}
+
+class MemoryClassLoader extends ClassLoader {
+
+  private final JavaMemoryFileObject jfo;
+
+  public MemoryClassLoader(JavaMemoryFileObject jfo) {
+    this.jfo = jfo;
+  }
+
+  public Class findClass(String name) {
+    byte[] b = jfo.getByteCode();
+    return defineClass(name, b, 0, b.length);
+  }
+}
+
+class MemoryFileManager extends ForwardingJavaFileManager<JavaFileManager> {
+
+  private final JavaFileObject jfo;
+
+  public MemoryFileManager(StandardJavaFileManager jfm, JavaFileObject jfo) {
+    super(jfm);
+    this.jfo = jfo;
+  }
+
+  @Override
+  public FileObject getFileForInput(Location location, String packageName,
+                                    String relativeName) throws IOException {
+    return jfo;
+  }
+
+  @Override
+  public JavaFileObject getJavaFileForOutput(Location location, String qualifiedName,
+                                             Kind kind, FileObject outputFile) throws IOException {
+    return jfo;
+  }
+
+}