# HG changeset patch # User dcubed # Date 1273102787 25200 # Node ID 3fca8e9cd36a479dba886c05ab695d19eeee5fd9 # Parent befdf73d6b8258dc11a48cea7939803f734b4d9b# Parent f43b5e9f788119bc93fe79bcfbfe164b391d013a Merge diff -r f43b5e9f7881 -r 3fca8e9cd36a .hgtags --- a/.hgtags Wed May 05 09:28:13 2010 -0400 +++ b/.hgtags Wed May 05 16:39:47 2010 -0700 @@ -86,3 +86,6 @@ bf823ef06b4f211e66988d76a2e2669be5c0820e jdk7-b86 07226e9eab8f74b37346b32715f829a2ef2c3188 hs18-b01 e7e7e36ccdb5d56edd47e5744351202d38f3b7ad jdk7-b87 +4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b jdk7-b88 +15836273ac2494f36ef62088bc1cb6f3f011f565 jdk7-b89 +4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b hs18-b02 diff -r f43b5e9f7881 -r 3fca8e9cd36a agent/src/os/linux/ps_core.c --- a/agent/src/os/linux/ps_core.c Wed May 05 09:28:13 2010 -0400 +++ b/agent/src/os/linux/ps_core.c Wed May 05 16:39:47 2010 -0700 @@ -884,9 +884,12 @@ } // read name of the shared object - if (read_string(ph, (uintptr_t) lib_name_addr, lib_name, sizeof(lib_name)) != true) { + lib_name[0] = '\0'; + if (lib_name_addr != 0 && + read_string(ph, (uintptr_t) lib_name_addr, lib_name, sizeof(lib_name)) != true) { print_debug("can't read shared object name\n"); - return false; + // don't let failure to read the name stop opening the file. If something is really wrong + // it will fail later. } if (lib_name[0] != '\0') { diff -r f43b5e9f7881 -r 3fca8e9cd36a agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java --- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Wed May 05 09:28:13 2010 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -494,6 +494,68 @@ } } }, + new Command("revptrs", "revptrs address", false) { + public void doit(Tokens t) { + int tokens = t.countTokens(); + if (tokens != 1 && (tokens != 2 || !t.nextToken().equals("-c"))) { + usage(); + return; + } + boolean chase = tokens == 2; + ReversePtrs revptrs = VM.getVM().getRevPtrs(); + if (revptrs == null) { + out.println("Computing reverse pointers..."); + ReversePtrsAnalysis analysis = new ReversePtrsAnalysis(); + final boolean[] complete = new boolean[1]; + HeapProgressThunk thunk = new HeapProgressThunk() { + public void heapIterationFractionUpdate(double d) {} + public synchronized void heapIterationComplete() { + complete[0] = true; + notify(); + } + }; + analysis.setHeapProgressThunk(thunk); + analysis.run(); + while (!complete[0]) { + synchronized (thunk) { + try { + thunk.wait(); + } catch (Exception e) { + } + } + } + revptrs = VM.getVM().getRevPtrs(); + out.println("Done."); + } + Address a = VM.getVM().getDebugger().parseAddress(t.nextToken()); + if (VM.getVM().getUniverse().heap().isInReserved(a)) { + OopHandle handle = a.addOffsetToAsOopHandle(0); + Oop oop = VM.getVM().getObjectHeap().newOop(handle); + ArrayList ptrs = revptrs.get(oop); + if (ptrs == null) { + out.println("no live references to " + a); + } else { + if (chase) { + while (ptrs.size() == 1) { + LivenessPathElement e = (LivenessPathElement)ptrs.get(0); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + Oop.printOopValueOn(e.getObj(), new PrintStream(bos)); + out.println(bos.toString()); + ptrs = revptrs.get(e.getObj()); + } + } else { + for (int i = 0; i < ptrs.size(); i++) { + LivenessPathElement e = (LivenessPathElement)ptrs.get(i); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + Oop.printOopValueOn(e.getObj(), new PrintStream(bos)); + out.println(bos.toString()); + oop = e.getObj(); + } + } + } + } + } + }, new Command("inspect", "inspect expression", false) { public void doit(Tokens t) { if (t.countTokens() != 1) { @@ -816,8 +878,24 @@ dumpType(type); } else { Iterator i = agent.getTypeDataBase().getTypes(); + // Make sure the types are emitted in an order than can be read back in + HashSet emitted = new HashSet(); + Stack pending = new Stack(); while (i.hasNext()) { - dumpType((Type)i.next()); + Type n = (Type)i.next(); + if (emitted.contains(n.getName())) { + continue; + } + + while (n != null && !emitted.contains(n.getName())) { + pending.push(n); + n = n.getSuperclass(); + } + while (!pending.empty()) { + n = (Type)pending.pop(); + dumpType(n); + emitted.add(n.getName()); + } } } } @@ -846,83 +924,105 @@ } }, - new Command("search", "search [ heap | codecache | threads ] value", false) { + new Command("search", "search [ heap | perm | rawheap | codecache | threads ] value", false) { public void doit(Tokens t) { if (t.countTokens() != 2) { usage(); - } else { - String type = t.nextToken(); - final Address value = VM.getVM().getDebugger().parseAddress(t.nextToken()); - final long stride = VM.getVM().getAddressSize(); - if (type.equals("threads")) { - Threads threads = VM.getVM().getThreads(); - for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) { - Address base = thread.getBaseOfStackPointer(); - Address end = thread.getLastJavaSP(); - if (end == null) continue; - if (end.lessThan(base)) { - Address tmp = base; - base = end; - end = tmp; + return; + } + String type = t.nextToken(); + final Address value = VM.getVM().getDebugger().parseAddress(t.nextToken()); + final long stride = VM.getVM().getAddressSize(); + if (type.equals("threads")) { + Threads threads = VM.getVM().getThreads(); + for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) { + Address base = thread.getBaseOfStackPointer(); + Address end = thread.getLastJavaSP(); + if (end == null) continue; + if (end.lessThan(base)) { + Address tmp = base; + base = end; + end = tmp; + } + out.println("Searching " + base + " " + end); + while (base != null && base.lessThan(end)) { + Address val = base.getAddressAt(0); + if (AddressOps.equal(val, value)) { + out.println(base); + } + base = base.addOffsetTo(stride); + } + } + } else if (type.equals("rawheap")) { + RawHeapVisitor iterator = new RawHeapVisitor() { + public void prologue(long used) { } - out.println("Searching " + base + " " + end); - while (base != null && base.lessThan(end)) { - Address val = base.getAddressAt(0); + + public void visitAddress(Address addr) { + Address val = addr.getAddressAt(0); if (AddressOps.equal(val, value)) { - out.println(base); + out.println("found at " + addr); } - base = base.addOffsetTo(stride); + } + public void visitCompOopAddress(Address addr) { + Address val = addr.getCompOopAddressAt(0); + if (AddressOps.equal(val, value)) { + out.println("found at " + addr); + } } - } - } else if (type.equals("heap")) { - RawHeapVisitor iterator = new RawHeapVisitor() { - public void prologue(long used) { - } - - public void visitAddress(Address addr) { - Address val = addr.getAddressAt(0); + public void epilogue() { + } + }; + VM.getVM().getObjectHeap().iterateRaw(iterator); + } else if (type.equals("heap") || type.equals("perm")) { + HeapVisitor iterator = new DefaultHeapVisitor() { + public boolean doObj(Oop obj) { + int index = 0; + Address start = obj.getHandle(); + long end = obj.getObjectSize(); + while (index < end) { + Address val = start.getAddressAt(index); if (AddressOps.equal(val, value)) { - out.println("found at " + addr); + out.println("found in " + obj.getHandle()); + break; } - } - public void visitCompOopAddress(Address addr) { - Address val = addr.getCompOopAddressAt(0); - if (AddressOps.equal(val, value)) { - out.println("found at " + addr); - } - } - public void epilogue() { - } - }; - VM.getVM().getObjectHeap().iterateRaw(iterator); - } else if (type.equals("codecache")) { - CodeCacheVisitor v = new CodeCacheVisitor() { - public void prologue(Address start, Address end) { + index += 4; } - public void visit(CodeBlob blob) { - boolean printed = false; - Address base = blob.getAddress(); - Address end = base.addOffsetTo(blob.getSize()); - while (base != null && base.lessThan(end)) { - Address val = base.getAddressAt(0); - if (AddressOps.equal(val, value)) { - if (!printed) { - printed = true; - blob.printOn(out); - } - out.println("found at " + base + "\n"); + return false; + } + }; + if (type.equals("heap")) { + VM.getVM().getObjectHeap().iterate(iterator); + } else { + VM.getVM().getObjectHeap().iteratePerm(iterator); + } + } else if (type.equals("codecache")) { + CodeCacheVisitor v = new CodeCacheVisitor() { + public void prologue(Address start, Address end) { + } + public void visit(CodeBlob blob) { + boolean printed = false; + Address base = blob.getAddress(); + Address end = base.addOffsetTo(blob.getSize()); + while (base != null && base.lessThan(end)) { + Address val = base.getAddressAt(0); + if (AddressOps.equal(val, value)) { + if (!printed) { + printed = true; + blob.printOn(out); } - base = base.addOffsetTo(stride); + out.println("found at " + base + "\n"); } + base = base.addOffsetTo(stride); } - public void epilogue() { - } + } + public void epilogue() { + } - }; - VM.getVM().getCodeCache().iterate(v); + }; + VM.getVM().getCodeCache().iterate(v); - } } } }, @@ -957,12 +1057,19 @@ Threads threads = VM.getVM().getThreads(); boolean all = name.equals("-a"); for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) { - StringWriter sw = new StringWriter(); ByteArrayOutputStream bos = new ByteArrayOutputStream(); thread.printThreadIDOn(new PrintStream(bos)); if (all || bos.toString().equals(name)) { + out.println(bos.toString() + " = " + thread.getAddress()); HTMLGenerator gen = new HTMLGenerator(false); - out.println(gen.genHTMLForJavaStackTrace(thread)); + try { + out.println(gen.genHTMLForJavaStackTrace(thread)); + } catch (Exception e) { + err.println("Error: " + e); + if (verboseExceptions) { + e.printStackTrace(err); + } + } if (!all) return; } } @@ -970,6 +1077,26 @@ } } }, + new Command("thread", "thread { -a | id }", false) { + public void doit(Tokens t) { + if (t.countTokens() != 1) { + usage(); + } else { + String name = t.nextToken(); + Threads threads = VM.getVM().getThreads(); + boolean all = name.equals("-a"); + for (JavaThread thread = threads.first(); thread != null; thread = thread.next()) { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + thread.printThreadIDOn(new PrintStream(bos)); + if (all || bos.toString().equals(name)) { + out.println(bos.toString() + " = " + thread.getAddress()); + if (!all) return; + } + } + out.println("Couldn't find thread " + name); + } + } + }, new Command("threads", false) { public void doit(Tokens t) { @@ -1161,7 +1288,7 @@ } } - static Pattern historyPattern = Pattern.compile("((!\\*)|(!\\$)|(!!-?)|(!-?[0-9][0-9]*))"); + static Pattern historyPattern = Pattern.compile("((!\\*)|(!\\$)|(!!-?)|(!-?[0-9][0-9]*)|(![a-zA-Z][^ ]*))"); public void executeCommand(String ln) { if (ln.indexOf('!') != -1) { @@ -1195,14 +1322,37 @@ result.append(item.at(item.countTokens() - 1)); } else { String tail = cmd.substring(1); - int index = Integer.parseInt(tail); - if (index < 0) { - index = history.size() + index; + switch (tail.charAt(0)) { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '-': { + int index = Integer.parseInt(tail); + if (index < 0) { + index = history.size() + index; + } + if (index > size) { + err.println("No such history item"); + } else { + result.append((String)history.get(index)); + } + break; } - if (index > size) { - err.println("No such history item"); - } else { - result.append((String)history.get(index)); + default: { + for (int i = history.size() - 1; i >= 0; i--) { + String s = (String)history.get(i); + if (s.startsWith(tail)) { + result.append(s); + } + } + } } } } diff -r f43b5e9f7881 -r 3fca8e9cd36a agent/src/share/classes/sun/jvm/hotspot/HSDB.java --- a/agent/src/share/classes/sun/jvm/hotspot/HSDB.java Wed May 05 09:28:13 2010 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/HSDB.java Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -985,6 +985,12 @@ annoPanel.addAnnotation(new Annotation(curFrame.addressOfInterpreterFrameExpressionStack(), curFrame.addressOfInterpreterFrameTOS(), "Interpreter expression stack")); + Address monBegin = curFrame.interpreterFrameMonitorBegin().address(); + Address monEnd = curFrame.interpreterFrameMonitorEnd().address(); + if (!monBegin.equals(monEnd)) { + annoPanel.addAnnotation(new Annotation(monBegin, monEnd, + "BasicObjectLocks")); + } if (interpreterFrameMethod != null) { // The offset is just to get the right stack slots highlighted in the output int offset = 1; diff -r f43b5e9f7881 -r 3fca8e9cd36a agent/src/share/classes/sun/jvm/hotspot/bugspot/BugSpot.java --- a/agent/src/share/classes/sun/jvm/hotspot/bugspot/BugSpot.java Wed May 05 09:28:13 2010 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/bugspot/BugSpot.java Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2003 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -294,7 +294,7 @@ attachDialog.setSize(400, 300); GraphicsUtilities.centerInContainer(attachDialog.getComponent(), getParentDimension(attachDialog.getComponent())); - attachDialog.show(); + attachDialog.setVisible(true); } public void showThreadsDialog() { @@ -321,7 +321,7 @@ getParentDimension(threadsDialog.getComponent())); GraphicsUtilities.centerInContainer(threadsDialog.getComponent(), getParentDimension(threadsDialog.getComponent())); - threadsDialog.show(); + threadsDialog.setVisible(true); } public void showMemoryDialog() { @@ -341,7 +341,7 @@ getParentDimension(memoryDialog.getComponent())); GraphicsUtilities.centerInContainer(memoryDialog.getComponent(), getParentDimension(memoryDialog.getComponent())); - memoryDialog.show(); + memoryDialog.setVisible(true); } /** Changes the editor factory this debugger uses to display source @@ -530,7 +530,7 @@ addFrame(stackFrame); stackFrame.setSize(400, 200); GraphicsUtilities.moveToInContainer(stackFrame.getComponent(), 0.0f, 1.0f, 0, 20); - stackFrame.show(); + stackFrame.setVisible(true); // Create register panel registerPanel = new RegisterPanel(); @@ -544,7 +544,7 @@ registerFrame.setSize(225, 200); GraphicsUtilities.moveToInContainer(registerFrame.getComponent(), 1.0f, 0.0f, 0, 0); - registerFrame.show(); + registerFrame.setVisible(true); resetCurrentThread(); } catch (DebuggerException e) { @@ -979,7 +979,7 @@ 1.0f, 0.85f, getParentDimension(editorFrame.getComponent())); - editorFrame.show(); + editorFrame.setVisible(true); shown = true; } code.showLineNumber(lineNo); diff -r f43b5e9f7881 -r 3fca8e9cd36a agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java --- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java Wed May 05 09:28:13 2010 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2002 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,10 +96,6 @@ addBytecodeClass(Bytecodes._dstore, BytecodeStore.class); addBytecodeClass(Bytecodes._astore, BytecodeStore.class); addBytecodeClass(Bytecodes._tableswitch, BytecodeTableswitch.class); - - // only special fast_xxx cases. others are handled differently. - addBytecodeClass(Bytecodes._fast_iaccess_0, BytecodeFastAAccess0.class); - addBytecodeClass(Bytecodes._fast_aaccess_0, BytecodeFastIAccess0.class); } public BytecodeDisassembler(Method method) { diff -r f43b5e9f7881 -r 3fca8e9cd36a agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java --- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Wed May 05 09:28:13 2010 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -263,11 +263,12 @@ case JVM_CONSTANT_NameAndType: return "JVM_CONSTANT_NameAndType"; case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid"; case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass"; + case JVM_CONSTANT_UnresolvedClassInError: return "JVM_CONSTANT_UnresolvedClassInError"; case JVM_CONSTANT_ClassIndex: return "JVM_CONSTANT_ClassIndex"; case JVM_CONSTANT_UnresolvedString: return "JVM_CONSTANT_UnresolvedString"; case JVM_CONSTANT_StringIndex: return "JVM_CONSTANT_StringIndex"; } - throw new InternalError("unknown tag"); + throw new InternalError("Unknown tag: " + tag); } public void iterateFields(OopVisitor visitor, boolean doVMFields) { @@ -304,6 +305,7 @@ index++; break; + case JVM_CONSTANT_UnresolvedClassInError: case JVM_CONSTANT_UnresolvedClass: case JVM_CONSTANT_Class: case JVM_CONSTANT_UnresolvedString: @@ -409,6 +411,7 @@ } // case JVM_CONSTANT_ClassIndex: + case JVM_CONSTANT_UnresolvedClassInError: case JVM_CONSTANT_UnresolvedClass: { dos.writeByte(JVM_CONSTANT_Class); String klassName = getSymbolAt(ci).asString(); @@ -464,6 +467,8 @@ + ", type = " + signatureIndex); break; } + default: + throw new InternalError("unknown tag: " + cpConstType); } // switch } dos.flush(); diff -r f43b5e9f7881 -r 3fca8e9cd36a agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java --- a/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java Wed May 05 09:28:13 2010 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,6 +58,9 @@ // Temporary tag while constructing constant pool public static final int JVM_CONSTANT_StringIndex = 103; + // Temporary tag while constructing constant pool + public static final int JVM_CONSTANT_UnresolvedClassInError = 104; + // 1.5 major/minor version numbers from JVM spec. 3rd edition public static final short MAJOR_VERSION = 49; public static final short MINOR_VERSION = 0; diff -r f43b5e9f7881 -r 3fca8e9cd36a agent/src/share/classes/sun/jvm/hotspot/runtime/SignatureIterator.java --- a/agent/src/share/classes/sun/jvm/hotspot/runtime/SignatureIterator.java Wed May 05 09:28:13 2010 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/SignatureIterator.java Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -108,7 +108,7 @@ return BasicTypeSize.getTArraySize(); } } - throw new RuntimeException("Should not reach here"); + throw new RuntimeException("Should not reach here: char " + (char)_signature.getByteAt(_index) + " @ " + _index + " in " + _signature.asString()); } protected void checkSignatureEnd() { if (_index < _signature.getLength()) { diff -r f43b5e9f7881 -r 3fca8e9cd36a agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java --- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Wed May 05 09:28:13 2010 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -238,6 +238,7 @@ } // case JVM_CONSTANT_ClassIndex: + case JVM_CONSTANT_UnresolvedClassInError: case JVM_CONSTANT_UnresolvedClass: { dos.writeByte(JVM_CONSTANT_Class); String klassName = cpool.getSymbolAt(ci).asString(); @@ -296,6 +297,8 @@ + ", type = " + signatureIndex); break; } + default: + throw new InternalError("Unknown tag: " + cpConstType); } // switch } } diff -r f43b5e9f7881 -r 3fca8e9cd36a agent/src/share/classes/sun/jvm/hotspot/ui/FrameWrapper.java --- a/agent/src/share/classes/sun/jvm/hotspot/ui/FrameWrapper.java Wed May 05 09:28:13 2010 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/ui/FrameWrapper.java Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,6 @@ public void setVisible(boolean visible); public void setSize(int x, int y); public void pack(); - public void show(); public void dispose(); public void setBackground(Color color); public void setResizable(boolean resizable); diff -r f43b5e9f7881 -r 3fca8e9cd36a agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java --- a/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Wed May 05 09:28:13 2010 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -95,8 +95,10 @@ // list tags void beginList() { beginTag("ul"); nl(); } + void endList() { endTag("ul"); nl(); } + void beginListItem() { beginTag("li"); } + void endListItem() { endTag("li"); nl(); } void li(String s) { wrap("li", s); nl(); } - void endList() { endTag("ul"); nl(); } // table tags void beginTable(int border) { @@ -505,6 +507,11 @@ buf.cell(cpool.getSymbolAt(index).asString()); break; + case JVM_CONSTANT_UnresolvedClassInError: + buf.cell("JVM_CONSTANT_UnresolvedClassInError"); + buf.cell(cpool.getSymbolAt(index).asString()); + break; + case JVM_CONSTANT_Class: buf.cell("JVM_CONSTANT_Class"); Klass klass = (Klass) cpool.getObjAt(index); @@ -564,6 +571,9 @@ buf.cell("JVM_CONSTANT_StringIndex"); buf.cell(Integer.toString(cpool.getIntAt(index))); break; + + default: + throw new InternalError("unknown tag: " + ctag); } buf.endTag("tr"); @@ -671,7 +681,16 @@ buf.cell(Integer.toString(curBci) + spaces); buf.beginTag("td"); - String instrStr = escapeHTMLSpecialChars(instr.toString()); + String instrStr = null; + try { + instrStr = escapeHTMLSpecialChars(instr.toString()); + } catch (RuntimeException re) { + buf.append("exception during bytecode processing"); + buf.endTag("td"); + buf.endTag("tr"); + re.printStackTrace(); + return; + } if (instr instanceof BytecodeNew) { BytecodeNew newBytecode = (BytecodeNew) instr; @@ -1396,9 +1415,7 @@ final SymbolFinder symFinder = createSymbolFinder(); final Disassembler disasm = createDisassembler(startPc, code); class NMethodVisitor implements InstructionVisitor { - boolean prevWasCall; public void prologue() { - prevWasCall = false; } public void visit(long currentPc, Instruction instr) { @@ -1418,8 +1435,7 @@ PCDesc pcDesc = (PCDesc) safepoints.get(longToAddress(currentPc)); - boolean isSafepoint = (pcDesc != null); - if (isSafepoint && prevWasCall) { + if (pcDesc != null) { buf.append(genSafepointInfo(nmethod, pcDesc)); } @@ -1435,11 +1451,6 @@ } buf.br(); - if (isSafepoint && !prevWasCall) { - buf.append(genSafepointInfo(nmethod, pcDesc)); - } - - prevWasCall = instr.isCall(); } public void epilogue() { @@ -1783,22 +1794,20 @@ buf.h3("Fields"); buf.beginList(); for (int f = 0; f < numFields; f += InstanceKlass.NEXT_OFFSET) { - int nameIndex = fields.getShortAt(f + InstanceKlass.NAME_INDEX_OFFSET); - int sigIndex = fields.getShortAt(f + InstanceKlass.SIGNATURE_INDEX_OFFSET); - int genSigIndex = fields.getShortAt(f + InstanceKlass.GENERIC_SIGNATURE_INDEX_OFFSET); - Symbol f_name = cp.getSymbolAt(nameIndex); - Symbol f_sig = cp.getSymbolAt(sigIndex); - Symbol f_genSig = (genSigIndex != 0)? cp.getSymbolAt(genSigIndex) : null; - AccessFlags acc = new AccessFlags(fields.getShortAt(f + InstanceKlass.ACCESS_FLAGS_OFFSET)); + sun.jvm.hotspot.oops.Field field = klass.getFieldByIndex(f); + String f_name = ((NamedFieldIdentifier)field.getID()).getName(); + Symbol f_sig = field.getSignature(); + Symbol f_genSig = field.getGenericSignature(); + AccessFlags acc = field.getAccessFlagsObj(); - buf.beginTag("li"); + buf.beginListItem(); buf.append(genFieldModifierString(acc)); buf.append(' '); Formatter sigBuf = new Formatter(genHTML); new SignatureConverter(f_sig, sigBuf.getBuffer()).dispatchField(); buf.append(sigBuf.toString().replace('/', '.')); buf.append(' '); - buf.append(f_name.asString()); + buf.append(f_name); buf.append(';'); // is it generic? if (f_genSig != null) { @@ -1806,7 +1815,8 @@ buf.append(escapeHTMLSpecialChars(f_genSig.asString())); buf.append("] "); } - buf.endTag("li"); + buf.append(" (offset = " + field.getOffset() + ")"); + buf.endListItem(); } buf.endList(); } diff -r f43b5e9f7881 -r 3fca8e9cd36a agent/src/share/classes/sun/jvm/hotspot/utilities/Assert.java --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/Assert.java Wed May 05 09:28:13 2010 -0400 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/Assert.java Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ public static boolean ASSERTS_ENABLED = true; public static void that(boolean test, String message) { - if (!test) { + if (ASSERTS_ENABLED && !test) { throw new AssertionFailure(message); } } diff -r f43b5e9f7881 -r 3fca8e9cd36a make/hotspot_version --- a/make/hotspot_version Wed May 05 09:28:13 2010 -0400 +++ b/make/hotspot_version Wed May 05 16:39:47 2010 -0700 @@ -35,7 +35,7 @@ HS_MAJOR_VER=18 HS_MINOR_VER=0 -HS_BUILD_NUMBER=02 +HS_BUILD_NUMBER=03 JDK_MAJOR_VER=1 JDK_MINOR_VER=7 diff -r f43b5e9f7881 -r 3fca8e9cd36a make/linux/makefiles/adlc.make --- a/make/linux/makefiles/adlc.make Wed May 05 09:28:13 2010 -0400 +++ b/make/linux/makefiles/adlc.make Wed May 05 16:39:47 2010 -0700 @@ -127,6 +127,9 @@ # Note that product files are updated via "mv", which is atomic. TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$) +# Debuggable by default +CFLAGS += -g + # Pass -D flags into ADLC. ADLCFLAGS += $(SYSDEFS) @@ -135,7 +138,7 @@ # Normally, debugging is done directly on the ad_*.cpp files. # But -g will put #line directives in those files pointing back to .ad. -#ADLCFLAGS += -g +ADLCFLAGS += -g ifdef LP64 ADLCFLAGS += -D_LP64 diff -r f43b5e9f7881 -r 3fca8e9cd36a make/solaris/makefiles/adlc.make --- a/make/solaris/makefiles/adlc.make Wed May 05 09:28:13 2010 -0400 +++ b/make/solaris/makefiles/adlc.make Wed May 05 16:39:47 2010 -0700 @@ -147,6 +147,9 @@ # Note that product files are updated via "mv", which is atomic. TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$) +# Debuggable by default +CFLAGS += -g + # Pass -D flags into ADLC. ADLCFLAGS += $(SYSDEFS) @@ -155,7 +158,7 @@ # Normally, debugging is done directly on the ad_*.cpp files. # But -g will put #line directives in those files pointing back to .ad. -#ADLCFLAGS += -g +ADLCFLAGS += -g ifdef LP64 ADLCFLAGS += -D_LP64 diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/sparc/vm/assembler_sparc.cpp --- a/src/cpu/sparc/vm/assembler_sparc.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/sparc/vm/assembler_sparc.cpp Wed May 05 16:39:47 2010 -0700 @@ -4082,7 +4082,7 @@ // make it work. static void check_index(int ind) { assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0), - "Invariants.") + "Invariants."); } static void generate_satb_log_enqueue(bool with_frame) { diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/sparc/vm/assembler_sparc.hpp --- a/src/cpu/sparc/vm/assembler_sparc.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp Wed May 05 16:39:47 2010 -0700 @@ -661,9 +661,6 @@ stx_op3 = 0x0e, swap_op3 = 0x0f, - lduwa_op3 = 0x10, - ldxa_op3 = 0x1b, - stwa_op3 = 0x14, stxa_op3 = 0x1e, @@ -1065,7 +1062,7 @@ } void assert_not_delayed(const char* msg) { #ifdef CHECK_DELAY - assert_msg ( delay_state == no_delay, msg); + assert(delay_state == no_delay, msg); #endif } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Wed May 05 16:39:47 2010 -0700 @@ -388,6 +388,60 @@ } +// Emit the code to remove the frame from the stack in the exception +// unwind path. +int LIR_Assembler::emit_unwind_handler() { +#ifndef PRODUCT + if (CommentedAssembly) { + _masm->block_comment("Unwind handler"); + } +#endif + + int offset = code_offset(); + + // Fetch the exception from TLS and clear out exception related thread state + __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0); + __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); + __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); + + __ bind(_unwind_handler_entry); + __ verify_not_null_oop(O0); + if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { + __ mov(O0, I0); // Preserve the exception + } + + // Preform needed unlocking + MonitorExitStub* stub = NULL; + if (method()->is_synchronized()) { + monitor_address(0, FrameMap::I1_opr); + stub = new MonitorExitStub(FrameMap::I1_opr, true, 0); + __ unlock_object(I3, I2, I1, *stub->entry()); + __ bind(*stub->continuation()); + } + + if (compilation()->env()->dtrace_method_probes()) { + jobject2reg(method()->constant_encoding(), O0); + __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type); + __ delayed()->nop(); + } + + if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { + __ mov(I0, O0); // Restore the exception + } + + // dispatch to the unwind logic + __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); + __ delayed()->nop(); + + // Emit the slow path assembly + if (stub != NULL) { + stub->emit_code(this); + } + + return offset; +} + + int LIR_Assembler::emit_deopt_handler() { // if the last instruction is a call (typically to do a throw which // is coming at the end after block reordering) the return address @@ -1728,9 +1782,13 @@ ShouldNotReachHere(); } } else if (code == lir_cmp_l2i) { +#ifdef _LP64 + __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register()); +#else __ lcmp(left->as_register_hi(), left->as_register_lo(), right->as_register_hi(), right->as_register_lo(), dst->as_register()); +#endif } else { ShouldNotReachHere(); } @@ -2046,26 +2104,29 @@ } -void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) { +void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { assert(exceptionOop->as_register() == Oexception, "should match"); - assert(unwind || exceptionPC->as_register() == Oissuing_pc, "should match"); + assert(exceptionPC->as_register() == Oissuing_pc, "should match"); info->add_register_oop(exceptionOop); - if (unwind) { - __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); - __ delayed()->nop(); - } else { - // reuse the debug info from the safepoint poll for the throw op itself - address pc_for_athrow = __ pc(); - int pc_for_athrow_offset = __ offset(); - RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); - __ set(pc_for_athrow, Oissuing_pc, rspec); - add_call_info(pc_for_athrow_offset, info); // for exception handler - - __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); - __ delayed()->nop(); - } + // reuse the debug info from the safepoint poll for the throw op itself + address pc_for_athrow = __ pc(); + int pc_for_athrow_offset = __ offset(); + RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); + __ set(pc_for_athrow, Oissuing_pc, rspec); + add_call_info(pc_for_athrow_offset, info); // for exception handler + + __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); + __ delayed()->nop(); +} + + +void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { + assert(exceptionOop->as_register() == Oexception, "should match"); + + __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry); + __ delayed()->nop(); } @@ -2354,7 +2415,7 @@ if (UseSlowPath || (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { - __ br(Assembler::always, false, Assembler::pn, *op->stub()->entry()); + __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); __ delayed()->nop(); } else { __ allocate_array(op->obj()->as_register(), @@ -2849,7 +2910,7 @@ void LIR_Assembler::align_backward_branch_target() { - __ align(16); + __ align(OptoLoopAlignment); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/sparc/vm/c2_globals_sparc.hpp --- a/src/cpu/sparc/vm/c2_globals_sparc.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp Wed May 05 16:39:47 2010 -0700 @@ -60,9 +60,6 @@ define_pd_global(intx, INTPRESSURE, 48); // large register set define_pd_global(intx, InteriorEntryAlignment, 16); // = CodeEntryAlignment define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K)); -// The default setting 16/16 seems to work best. -// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.) -define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize define_pd_global(intx, RegisterCostAreaRatio, 12000); define_pd_global(bool, UseTLAB, true); define_pd_global(bool, ResizeTLAB, true); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/sparc/vm/globals_sparc.hpp --- a/src/cpu/sparc/vm/globals_sparc.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/sparc/vm/globals_sparc.hpp Wed May 05 16:39:47 2010 -0700 @@ -40,6 +40,9 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast define_pd_global(intx, CodeEntryAlignment, 32); +// The default setting 16/16 seems to work best. +// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.) +define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC define_pd_global(intx, InlineSmallCode, 1500); #ifdef _LP64 diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/sparc/vm/sparc.ad --- a/src/cpu/sparc/vm/sparc.ad Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/sparc/vm/sparc.ad Wed May 05 16:39:47 2010 -0700 @@ -471,6 +471,9 @@ source %{ #define __ _masm. +// Block initializing store +#define ASI_BLK_INIT_QUAD_LDD_P 0xE2 + // tertiary op of a LoadP or StoreP encoding #define REGP_OP true @@ -920,38 +923,6 @@ #endif } -void emit_form3_mem_reg_asi(CodeBuffer &cbuf, const MachNode* n, int primary, int tertiary, - int src1_enc, int disp32, int src2_enc, int dst_enc, int asi) { - - uint instr; - instr = (Assembler::ldst_op << 30) - | (dst_enc << 25) - | (primary << 19) - | (src1_enc << 14); - - int disp = disp32; - int index = src2_enc; - - if (src1_enc == R_SP_enc || src1_enc == R_FP_enc) - disp += STACK_BIAS; - - // We should have a compiler bailout here rather than a guarantee. - // Better yet would be some mechanism to handle variable-size matches correctly. - guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" ); - - if( disp != 0 ) { - // use reg-reg form - // set src2=R_O7 contains offset - index = R_O7_enc; - emit3_simm13( cbuf, Assembler::arith_op, index, Assembler::or_op3, 0, disp); - } - instr |= (asi << 5); - instr |= index; - uint *code = (uint*)cbuf.code_end(); - *code = instr; - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); -} - void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false, bool force_far_call = false) { // The method which records debug information at every safepoint // expects the call to be the first instruction in the snippet as @@ -1951,11 +1922,6 @@ $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); %} - enc_class form3_mem_reg_little( memory mem, iRegI dst) %{ - emit_form3_mem_reg_asi(cbuf, this, $primary, -1, - $mem$$base, $mem$$disp, $mem$$index, $dst$$reg, Assembler::ASI_PRIMARY_LITTLE); - %} - enc_class form3_mem_prefetch_read( memory mem ) %{ emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/); @@ -4308,8 +4274,8 @@ // instructions for every form of operand when the instruction accepts // multiple operand types with the same basic encoding and format. The classic // case of this is memory operands. -// Indirect is not included since its use is limited to Compare & Swap opclass memory( indirect, indOffset13, indIndex ); +opclass indIndexMemory( indIndex ); //----------PIPELINE----------------------------------------------------------- pipeline %{ @@ -6147,6 +6113,7 @@ %} instruct prefetchw( memory mem ) %{ + predicate(AllocatePrefetchStyle != 3 ); match( PrefetchWrite mem ); ins_cost(MEMORY_REF_COST); @@ -6156,6 +6123,23 @@ ins_pipe(iload_mem); %} +// Use BIS instruction to prefetch. +instruct prefetchw_bis( memory mem ) %{ + predicate(AllocatePrefetchStyle == 3); + match( PrefetchWrite mem ); + ins_cost(MEMORY_REF_COST); + + format %{ "STXA G0,$mem\t! // Block initializing store" %} + ins_encode %{ + Register base = as_Register($mem$$base); + int disp = $mem$$disp; + if (disp != 0) { + __ add(base, AllocatePrefetchStepSize, base); + } + __ stxa(G0, base, G0, ASI_BLK_INIT_QUAD_LDD_P); + %} + ins_pipe(istore_mem_reg); +%} //----------Store Instructions------------------------------------------------- // Store Byte @@ -9645,84 +9629,179 @@ instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{ match(Set dst (ReverseBytesI src)); - effect(DEF dst, USE src); + + // Op cost is artificially doubled to make sure that load or store + // instructions are preferred over this one which requires a spill + // onto a stack slot. + ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); + format %{ "LDUWA $src, $dst\t!asi=primary_little" %} + + ins_encode %{ + __ set($src$$disp + STACK_BIAS, O7); + __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); + %} + ins_pipe( iload_mem ); +%} + +instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{ + match(Set dst (ReverseBytesL src)); // Op cost is artificially doubled to make sure that load or store // instructions are preferred over this one which requires a spill // onto a stack slot. ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); - size(8); - format %{ "LDUWA $src, $dst\t!asi=primary_little" %} - opcode(Assembler::lduwa_op3); - ins_encode( form3_mem_reg_little(src, dst) ); + format %{ "LDXA $src, $dst\t!asi=primary_little" %} + + ins_encode %{ + __ set($src$$disp + STACK_BIAS, O7); + __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); + %} ins_pipe( iload_mem ); %} -instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{ - match(Set dst (ReverseBytesL src)); - effect(DEF dst, USE src); +instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{ + match(Set dst (ReverseBytesUS src)); + + // Op cost is artificially doubled to make sure that load or store + // instructions are preferred over this one which requires a spill + // onto a stack slot. + ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); + format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %} + + ins_encode %{ + // the value was spilled as an int so bias the load + __ set($src$$disp + STACK_BIAS + 2, O7); + __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); + %} + ins_pipe( iload_mem ); +%} + +instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{ + match(Set dst (ReverseBytesS src)); // Op cost is artificially doubled to make sure that load or store // instructions are preferred over this one which requires a spill // onto a stack slot. ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); - size(8); - format %{ "LDXA $src, $dst\t!asi=primary_little" %} - - opcode(Assembler::ldxa_op3); - ins_encode( form3_mem_reg_little(src, dst) ); + format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %} + + ins_encode %{ + // the value was spilled as an int so bias the load + __ set($src$$disp + STACK_BIAS + 2, O7); + __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); + %} ins_pipe( iload_mem ); %} // Load Integer reversed byte order -instruct loadI_reversed(iRegI dst, memory src) %{ +instruct loadI_reversed(iRegI dst, indIndexMemory src) %{ match(Set dst (ReverseBytesI (LoadI src))); ins_cost(DEFAULT_COST + MEMORY_REF_COST); - size(8); + size(4); format %{ "LDUWA $src, $dst\t!asi=primary_little" %} - opcode(Assembler::lduwa_op3); - ins_encode( form3_mem_reg_little( src, dst) ); + ins_encode %{ + __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); + %} ins_pipe(iload_mem); %} // Load Long - aligned and reversed -instruct loadL_reversed(iRegL dst, memory src) %{ +instruct loadL_reversed(iRegL dst, indIndexMemory src) %{ match(Set dst (ReverseBytesL (LoadL src))); - ins_cost(DEFAULT_COST + MEMORY_REF_COST); - size(8); + ins_cost(MEMORY_REF_COST); + size(4); format %{ "LDXA $src, $dst\t!asi=primary_little" %} - opcode(Assembler::ldxa_op3); - ins_encode( form3_mem_reg_little( src, dst ) ); + ins_encode %{ + __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); + %} + ins_pipe(iload_mem); +%} + +// Load unsigned short / char reversed byte order +instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{ + match(Set dst (ReverseBytesUS (LoadUS src))); + + ins_cost(MEMORY_REF_COST); + size(4); + format %{ "LDUHA $src, $dst\t!asi=primary_little" %} + + ins_encode %{ + __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); + %} + ins_pipe(iload_mem); +%} + +// Load short reversed byte order +instruct loadS_reversed(iRegI dst, indIndexMemory src) %{ + match(Set dst (ReverseBytesS (LoadS src))); + + ins_cost(MEMORY_REF_COST); + size(4); + format %{ "LDSHA $src, $dst\t!asi=primary_little" %} + + ins_encode %{ + __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); + %} ins_pipe(iload_mem); %} // Store Integer reversed byte order -instruct storeI_reversed(memory dst, iRegI src) %{ +instruct storeI_reversed(indIndexMemory dst, iRegI src) %{ match(Set dst (StoreI dst (ReverseBytesI src))); ins_cost(MEMORY_REF_COST); - size(8); + size(4); format %{ "STWA $src, $dst\t!asi=primary_little" %} - opcode(Assembler::stwa_op3); - ins_encode( form3_mem_reg_little( dst, src) ); + ins_encode %{ + __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); + %} ins_pipe(istore_mem_reg); %} // Store Long reversed byte order -instruct storeL_reversed(memory dst, iRegL src) %{ +instruct storeL_reversed(indIndexMemory dst, iRegL src) %{ match(Set dst (StoreL dst (ReverseBytesL src))); ins_cost(MEMORY_REF_COST); - size(8); + size(4); format %{ "STXA $src, $dst\t!asi=primary_little" %} - opcode(Assembler::stxa_op3); - ins_encode( form3_mem_reg_little( dst, src) ); + ins_encode %{ + __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); + %} + ins_pipe(istore_mem_reg); +%} + +// Store unsighed short/char reversed byte order +instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{ + match(Set dst (StoreC dst (ReverseBytesUS src))); + + ins_cost(MEMORY_REF_COST); + size(4); + format %{ "STHA $src, $dst\t!asi=primary_little" %} + + ins_encode %{ + __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); + %} + ins_pipe(istore_mem_reg); +%} + +// Store short reversed byte order +instruct storeS_reversed(indIndexMemory dst, iRegI src) %{ + match(Set dst (StoreC dst (ReverseBytesS src))); + + ins_cost(MEMORY_REF_COST); + size(4); + format %{ "STHA $src, $dst\t!asi=primary_little" %} + + ins_encode %{ + __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); + %} ins_pipe(istore_mem_reg); %} diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/sparc/vm/stubGenerator_sparc.cpp --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Wed May 05 16:39:47 2010 -0700 @@ -1148,7 +1148,7 @@ __ andn(from, 7, from); // Align address __ ldx(from, 0, O3); __ inc(from, 8); - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_loop); __ ldx(from, 0, O4); __ deccc(count, count_dec); // Can we do next iteration after this one? @@ -1220,7 +1220,7 @@ // __ andn(end_from, 7, end_from); // Align address __ ldx(end_from, 0, O3); - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_loop); __ ldx(end_from, -8, O4); __ deccc(count, count_dec); // Can we do next iteration after this one? @@ -1349,7 +1349,7 @@ __ BIND(L_copy_byte); __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit); __ delayed()->nop(); - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_copy_byte_loop); __ ldub(from, offset, O3); __ deccc(count); @@ -1445,7 +1445,7 @@ L_aligned_copy, L_copy_byte); } // copy 4 elements (16 bytes) at a time - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_aligned_copy); __ dec(end_from, 16); __ ldx(end_from, 8, O3); @@ -1461,7 +1461,7 @@ __ BIND(L_copy_byte); __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit); __ delayed()->nop(); - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_copy_byte_loop); __ dec(end_from); __ dec(end_to); @@ -1577,7 +1577,7 @@ __ BIND(L_copy_2_bytes); __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit); __ delayed()->nop(); - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_copy_2_bytes_loop); __ lduh(from, offset, O3); __ deccc(count); @@ -1684,7 +1684,7 @@ L_aligned_copy, L_copy_2_bytes); } // copy 4 elements (16 bytes) at a time - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_aligned_copy); __ dec(end_from, 16); __ ldx(end_from, 8, O3); @@ -1781,7 +1781,7 @@ // copy with shift 4 elements (16 bytes) at a time __ dec(count, 4); // The cmp at the beginning guaranty count >= 4 - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_copy_16_bytes); __ ldx(from, 4, O4); __ deccc(count, 4); // Can we do next iteration after this one? @@ -1907,7 +1907,7 @@ // to form 2 aligned 8-bytes chunks to store. // __ ldx(end_from, -4, O3); - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_copy_16_bytes); __ ldx(end_from, -12, O4); __ deccc(count, 4); @@ -1929,7 +1929,7 @@ __ delayed()->inc(count, 4); // copy 4 elements (16 bytes) at a time - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_aligned_copy); __ dec(end_from, 16); __ ldx(end_from, 8, O3); @@ -2000,6 +2000,27 @@ // to: O1 // count: O2 treated as signed // + // count -= 2; + // if ( count >= 0 ) { // >= 2 elements + // if ( count > 6) { // >= 8 elements + // count -= 6; // original count - 8 + // do { + // copy_8_elements; + // count -= 8; + // } while ( count >= 0 ); + // count += 6; + // } + // if ( count >= 0 ) { // >= 2 elements + // do { + // copy_2_elements; + // } while ( (count=count-2) >= 0 ); + // } + // } + // count += 2; + // if ( count != 0 ) { // 1 element left + // copy_1_element; + // } + // void generate_disjoint_long_copy_core(bool aligned) { Label L_copy_8_bytes, L_copy_16_bytes, L_exit; const Register from = O0; // source array address @@ -2012,7 +2033,39 @@ __ mov(G0, offset0); // offset from start of arrays (0) __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes ); __ delayed()->add(offset0, 8, offset8); - __ align(16); + + // Copy by 64 bytes chunks + Label L_copy_64_bytes; + const Register from64 = O3; // source address + const Register to64 = G3; // destination address + __ subcc(count, 6, O3); + __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes ); + __ delayed()->mov(to, to64); + // Now we can use O4(offset0), O5(offset8) as temps + __ mov(O3, count); + __ mov(from, from64); + + __ align(OptoLoopAlignment); + __ BIND(L_copy_64_bytes); + for( int off = 0; off < 64; off += 16 ) { + __ ldx(from64, off+0, O4); + __ ldx(from64, off+8, O5); + __ stx(O4, to64, off+0); + __ stx(O5, to64, off+8); + } + __ deccc(count, 8); + __ inc(from64, 64); + __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_64_bytes); + __ delayed()->inc(to64, 64); + + // Restore O4(offset0), O5(offset8) + __ sub(from64, from, offset0); + __ inccc(count, 6); + __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes ); + __ delayed()->add(offset0, 8, offset8); + + // Copy by 16 bytes chunks + __ align(OptoLoopAlignment); __ BIND(L_copy_16_bytes); __ ldx(from, offset0, O3); __ ldx(from, offset8, G3); @@ -2023,6 +2076,7 @@ __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes); __ delayed()->inc(offset8, 16); + // Copy last 8 bytes __ BIND(L_copy_8_bytes); __ inccc(count, 2); __ brx(Assembler::zero, true, Assembler::pn, L_exit ); @@ -2085,7 +2139,7 @@ __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes ); __ delayed()->sllx(count, LogBytesPerLong, offset8); __ sub(offset8, 8, offset0); - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_copy_16_bytes); __ ldx(from, offset8, O2); __ ldx(from, offset0, O3); @@ -2351,7 +2405,7 @@ // (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays // (O2 = len; O2 != 0; O2--) --- number of oops *remaining* // G3, G4, G5 --- current oop, oop.klass, oop.klass.super - __ align(16); + __ align(OptoLoopAlignment); __ BIND(store_element); __ deccc(G1_remain); // decrement the count diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/sparc/vm/vm_version_sparc.cpp --- a/src/cpu/sparc/vm/vm_version_sparc.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/sparc/vm/vm_version_sparc.cpp Wed May 05 16:39:47 2010 -0700 @@ -86,14 +86,24 @@ if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) { FLAG_SET_DEFAULT(InteriorEntryAlignment, 4); } + if (is_niagara1_plus()) { + if (AllocatePrefetchStyle > 0 && FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { + // Use BIS instruction for allocation prefetch. + FLAG_SET_DEFAULT(AllocatePrefetchStyle, 3); + if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) { + // Use smaller prefetch distance on N2 with BIS + FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64); + } + } + if (AllocatePrefetchStyle != 3 && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) { + // Use different prefetch distance without BIS + FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256); + } + } +#endif if (FLAG_IS_DEFAULT(OptoLoopAlignment)) { FLAG_SET_DEFAULT(OptoLoopAlignment, 4); } - if (is_niagara1_plus() && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) { - // Use smaller prefetch distance on N2 - FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256); - } -#endif } // Use hardware population count instruction if available. diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/x86/vm/assembler_x86.cpp --- a/src/cpu/x86/vm/assembler_x86.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/x86/vm/assembler_x86.cpp Wed May 05 16:39:47 2010 -0700 @@ -3365,6 +3365,13 @@ #else // LP64 +void Assembler::set_byte_if_not_zero(Register dst) { + int enc = prefix_and_encode(dst->encoding(), true); + emit_byte(0x0F); + emit_byte(0x95); + emit_byte(0xE0 | enc); +} + // 64bit only pieces of the assembler // This should only be used by 64bit instructions that can use rip-relative // it cannot be used by instructions that want an immediate value. @@ -7968,7 +7975,7 @@ case 2: return "special"; case 3: return "empty"; } - ShouldNotReachHere() + ShouldNotReachHere(); return NULL; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/x86/vm/c1_LIRAssembler_x86.cpp --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Wed May 05 16:39:47 2010 -0700 @@ -455,6 +455,60 @@ } +// Emit the code to remove the frame from the stack in the exception +// unwind path. +int LIR_Assembler::emit_unwind_handler() { +#ifndef PRODUCT + if (CommentedAssembly) { + _masm->block_comment("Unwind handler"); + } +#endif + + int offset = code_offset(); + + // Fetch the exception from TLS and clear out exception related thread state + __ get_thread(rsi); + __ movptr(rax, Address(rsi, JavaThread::exception_oop_offset())); + __ movptr(Address(rsi, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); + __ movptr(Address(rsi, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); + + __ bind(_unwind_handler_entry); + __ verify_not_null_oop(rax); + if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { + __ mov(rsi, rax); // Preserve the exception + } + + // Preform needed unlocking + MonitorExitStub* stub = NULL; + if (method()->is_synchronized()) { + monitor_address(0, FrameMap::rax_opr); + stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); + __ unlock_object(rdi, rbx, rax, *stub->entry()); + __ bind(*stub->continuation()); + } + + if (compilation()->env()->dtrace_method_probes()) { + __ movoop(Address(rsp, 0), method()->constant_encoding()); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); + } + + if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { + __ mov(rax, rsi); // Restore the exception + } + + // remove the activation and dispatch to the unwind handler + __ remove_frame(initial_frame_size_in_bytes()); + __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); + + // Emit the slow path assembly + if (stub != NULL) { + stub->emit_code(this); + } + + return offset; +} + + int LIR_Assembler::emit_deopt_handler() { // if the last instruction is a call (typically to do a throw which // is coming at the end after block reordering) the return address @@ -1190,8 +1244,7 @@ break; #endif // _L64 case T_INT: - // %%% could this be a movl? this is safer but longer instruction - __ movl2ptr(dest->as_register(), from_addr); + __ movl(dest->as_register(), from_addr); break; case T_LONG: { @@ -1249,7 +1302,6 @@ __ shll(dest_reg, 24); __ sarl(dest_reg, 24); } - // These are unsigned so the zero extension on 64bit is just what we need break; } @@ -1261,8 +1313,6 @@ } else { __ movw(dest_reg, from_addr); } - // This is unsigned so the zero extension on 64bit is just what we need - // __ movl2ptr(dest_reg, dest_reg); break; } @@ -1275,8 +1325,6 @@ __ shll(dest_reg, 16); __ sarl(dest_reg, 16); } - // Might not be needed in 64bit but certainly doesn't hurt (except for code size) - __ movl2ptr(dest_reg, dest_reg); break; } @@ -2690,19 +2738,14 @@ } else { assert(code == lir_cmp_l2i, "check"); #ifdef _LP64 - Register dest = dst->as_register(); - __ xorptr(dest, dest); - Label high, done; - __ cmpptr(left->as_register_lo(), right->as_register_lo()); - __ jcc(Assembler::equal, done); - __ jcc(Assembler::greater, high); - __ decrement(dest); - __ jmp(done); - __ bind(high); - __ increment(dest); - - __ bind(done); - + Label done; + Register dest = dst->as_register(); + __ cmpptr(left->as_register_lo(), right->as_register_lo()); + __ movl(dest, -1); + __ jccb(Assembler::less, done); + __ set_byte_if_not_zero(dest); + __ movzbl(dest, dest); + __ bind(done); #else __ lcmp2int(left->as_register_hi(), left->as_register_lo(), @@ -2795,47 +2838,48 @@ // On 64bit this will die since it will take a movq & jmp, must be only a jmp __ jump(RuntimeAddress(__ pc())); - assert(__ offset() - start <= call_stub_size, "stub too big") + assert(__ offset() - start <= call_stub_size, "stub too big"); __ end_a_stub(); } -void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) { +void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { assert(exceptionOop->as_register() == rax, "must match"); - assert(unwind || exceptionPC->as_register() == rdx, "must match"); + assert(exceptionPC->as_register() == rdx, "must match"); // exception object is not added to oop map by LinearScan // (LinearScan assumes that no oops are in fixed registers) info->add_register_oop(exceptionOop); Runtime1::StubID unwind_id; - if (!unwind) { - // get current pc information - // pc is only needed if the method has an exception handler, the unwind code does not need it. - int pc_for_athrow_offset = __ offset(); - InternalAddress pc_for_athrow(__ pc()); - __ lea(exceptionPC->as_register(), pc_for_athrow); - add_call_info(pc_for_athrow_offset, info); // for exception handler - - __ verify_not_null_oop(rax); - // search an exception handler (rax: exception oop, rdx: throwing pc) - if (compilation()->has_fpu_code()) { - unwind_id = Runtime1::handle_exception_id; - } else { - unwind_id = Runtime1::handle_exception_nofpu_id; - } - __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); + // get current pc information + // pc is only needed if the method has an exception handler, the unwind code does not need it. + int pc_for_athrow_offset = __ offset(); + InternalAddress pc_for_athrow(__ pc()); + __ lea(exceptionPC->as_register(), pc_for_athrow); + add_call_info(pc_for_athrow_offset, info); // for exception handler + + __ verify_not_null_oop(rax); + // search an exception handler (rax: exception oop, rdx: throwing pc) + if (compilation()->has_fpu_code()) { + unwind_id = Runtime1::handle_exception_id; } else { - // remove the activation - __ remove_frame(initial_frame_size_in_bytes()); - __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); + unwind_id = Runtime1::handle_exception_nofpu_id; } + __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); // enough room for two byte trap __ nop(); } +void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { + assert(exceptionOop->as_register() == rax, "must match"); + + __ jmp(_unwind_handler_entry); +} + + void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { // optimized version for linear scan: diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/x86/vm/c1_Runtime1_x86.cpp --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Wed May 05 16:39:47 2010 -0700 @@ -781,7 +781,7 @@ // Restore SP from BP if the exception PC is a MethodHandle call site. NOT_LP64(__ get_thread(thread);) - __ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0); + __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); __ cmovptr(Assembler::notEqual, rsp, rbp); // continue at exception handler (return address removed) diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/x86/vm/c2_globals_x86.hpp --- a/src/cpu/x86/vm/c2_globals_x86.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/x86/vm/c2_globals_x86.hpp Wed May 05 16:39:47 2010 -0700 @@ -80,7 +80,6 @@ // Ergonomics related flags define_pd_global(uint64_t,MaxRAM, 4ULL*G); #endif // AMD64 -define_pd_global(intx, OptoLoopAlignment, 16); define_pd_global(intx, RegisterCostAreaRatio, 16000); // Peephole and CISC spilling both break the graph, and so makes the diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/x86/vm/frame_x86.cpp --- a/src/cpu/x86/vm/frame_x86.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/x86/vm/frame_x86.cpp Wed May 05 16:39:47 2010 -0700 @@ -291,8 +291,8 @@ BasicObjectLock* frame::interpreter_frame_monitor_end() const { BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset); // make sure the pointer points inside the frame - assert((intptr_t) fp() > (intptr_t) result, "result must < than frame pointer"); - assert((intptr_t) sp() <= (intptr_t) result, "result must >= than stack pointer"); + assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer"); + assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer"); return result; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/x86/vm/globals_x86.hpp --- a/src/cpu/x86/vm/globals_x86.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/x86/vm/globals_x86.hpp Wed May 05 16:39:47 2010 -0700 @@ -45,6 +45,7 @@ #else define_pd_global(intx, CodeEntryAlignment, 16); #endif // COMPILER2 +define_pd_global(intx, OptoLoopAlignment, 16); define_pd_global(intx, InlineFrequencyCount, 100); define_pd_global(intx, InlineSmallCode, 1000); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/x86/vm/runtime_x86_32.cpp --- a/src/cpu/x86/vm/runtime_x86_32.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/x86/vm/runtime_x86_32.cpp Wed May 05 16:39:47 2010 -0700 @@ -115,8 +115,8 @@ // rax: exception handler for given - // Restore SP from BP if the exception PC is a MethodHandle call. - __ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0); + // Restore SP from BP if the exception PC is a MethodHandle call site. + __ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0); __ cmovptr(Assembler::notEqual, rsp, rbp); // We have a handler in rax, (could be deopt blob) diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/x86/vm/sharedRuntime_x86_64.cpp --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Wed May 05 16:39:47 2010 -0700 @@ -3328,8 +3328,8 @@ // rax: exception handler - // Restore SP from BP if the exception PC is a MethodHandle call. - __ cmpl(Address(r15_thread, JavaThread::is_method_handle_exception_offset()), 0); + // Restore SP from BP if the exception PC is a MethodHandle call site. + __ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0); __ cmovptr(Assembler::notEqual, rsp, rbp); // We have a handler in rax (could be deopt blob). diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/x86/vm/stubGenerator_x86_32.cpp --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Wed May 05 16:39:47 2010 -0700 @@ -430,7 +430,7 @@ __ verify_oop(exception_oop); // Restore SP from BP if the exception PC is a MethodHandle call site. - __ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0); + __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); __ cmovptr(Assembler::notEqual, rsp, rbp); // continue at exception handler (return address removed) @@ -812,7 +812,7 @@ Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; // Copy 64-byte chunks __ jmpb(L_copy_64_bytes); - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_copy_64_bytes_loop); if(UseUnalignedLoadStores) { @@ -874,7 +874,7 @@ Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; // Copy 64-byte chunks __ jmpb(L_copy_64_bytes); - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_copy_64_bytes_loop); __ movq(mmx0, Address(from, 0)); __ movq(mmx1, Address(from, 8)); @@ -1144,7 +1144,7 @@ __ movl(Address(to, count, sf, 0), rdx); __ jmpb(L_copy_8_bytes); - __ align(16); + __ align(OptoLoopAlignment); // Move 8 bytes __ BIND(L_copy_8_bytes_loop); if (UseXMMForArrayCopy) { @@ -1235,7 +1235,7 @@ } } else { __ jmpb(L_copy_8_bytes); - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_copy_8_bytes_loop); __ fild_d(Address(from, 0)); __ fistp_d(Address(from, to_from, Address::times_1)); @@ -1282,7 +1282,7 @@ __ jmpb(L_copy_8_bytes); - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_copy_8_bytes_loop); if (VM_Version::supports_mmx()) { if (UseXMMForArrayCopy) { @@ -1454,7 +1454,7 @@ // Loop control: // for (count = -count; count != 0; count++) // Base pointers src, dst are biased by 8*count,to last element. - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_store_element); __ movptr(to_element_addr, elem); // store the oop diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/x86/vm/stubGenerator_x86_64.cpp --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Wed May 05 16:39:47 2010 -0700 @@ -871,9 +871,8 @@ } address generate_fp_mask(const char *stub_name, int64_t mask) { + __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", stub_name); - - __ align(16); address start = __ pc(); __ emit_data64( mask, relocInfo::none ); @@ -1268,7 +1267,7 @@ Label& L_copy_32_bytes, Label& L_copy_8_bytes) { DEBUG_ONLY(__ stop("enter at entry label, not here")); Label L_loop; - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_loop); if(UseUnalignedLoadStores) { __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); @@ -1309,7 +1308,7 @@ Label& L_copy_32_bytes, Label& L_copy_8_bytes) { DEBUG_ONLY(__ stop("enter at entry label, not here")); Label L_loop; - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_loop); if(UseUnalignedLoadStores) { __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); @@ -2229,7 +2228,7 @@ // Loop control: // for (count = -count; count != 0; count++) // Base pointers src, dst are biased by 8*(count-1),to last element. - __ align(16); + __ align(OptoLoopAlignment); __ BIND(L_store_element); __ store_heap_oop(to_element_addr, rax_oop); // store the oop diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/x86/vm/x86_32.ad --- a/src/cpu/x86/vm/x86_32.ad Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/x86/vm/x86_32.ad Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ // -// Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -6272,6 +6272,30 @@ ins_pipe( ialu_reg_reg); %} +instruct bytes_reverse_unsigned_short(eRegI dst) %{ + match(Set dst (ReverseBytesUS dst)); + + format %{ "BSWAP $dst\n\t" + "SHR $dst,16\n\t" %} + ins_encode %{ + __ bswapl($dst$$Register); + __ shrl($dst$$Register, 16); + %} + ins_pipe( ialu_reg ); +%} + +instruct bytes_reverse_short(eRegI dst) %{ + match(Set dst (ReverseBytesS dst)); + + format %{ "BSWAP $dst\n\t" + "SAR $dst,16\n\t" %} + ins_encode %{ + __ bswapl($dst$$Register); + __ sarl($dst$$Register, 16); + %} + ins_pipe( ialu_reg ); +%} + //---------- Zeros Count Instructions ------------------------------------------ diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/x86/vm/x86_64.ad --- a/src/cpu/x86/vm/x86_64.ad Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/x86/vm/x86_64.ad Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ // -// Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -7371,6 +7371,30 @@ ins_pipe( ialu_reg); %} +instruct bytes_reverse_unsigned_short(rRegI dst) %{ + match(Set dst (ReverseBytesUS dst)); + + format %{ "bswapl $dst\n\t" + "shrl $dst,16\n\t" %} + ins_encode %{ + __ bswapl($dst$$Register); + __ shrl($dst$$Register, 16); + %} + ins_pipe( ialu_reg ); +%} + +instruct bytes_reverse_short(rRegI dst) %{ + match(Set dst (ReverseBytesS dst)); + + format %{ "bswapl $dst\n\t" + "sar $dst,16\n\t" %} + ins_encode %{ + __ bswapl($dst$$Register); + __ sarl($dst$$Register, 16); + %} + ins_pipe( ialu_reg ); +%} + instruct loadI_reversed(rRegI dst, memory src) %{ match(Set dst (ReverseBytesI (LoadI src))); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/zero/vm/cppInterpreter_zero.cpp --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed May 05 16:39:47 2010 -0700 @@ -39,21 +39,9 @@ void CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) { JavaThread *thread = (JavaThread *) THREAD; - ZeroStack *stack = thread->zero_stack(); - - // Adjust the caller's stack frame to accomodate any additional - // local variables we have contiguously with our parameters. - int extra_locals = method->max_locals() - method->size_of_parameters(); - if (extra_locals > 0) { - if (extra_locals > stack->available_words()) { - Unimplemented(); - } - for (int i = 0; i < extra_locals; i++) - stack->push(0); - } // Allocate and initialize our frame. - InterpreterFrame *frame = InterpreterFrame::build(stack, method, thread); + InterpreterFrame *frame = InterpreterFrame::build(method, CHECK); thread->push_zero_frame(frame); // Execute those bytecodes! @@ -76,12 +64,6 @@ intptr_t *result = NULL; int result_slots = 0; - // Check we're not about to run out of stack - if (stack_overflow_imminent(thread)) { - CALL_VM_NOCHECK(InterpreterRuntime::throw_StackOverflowError(thread)); - goto unwind_and_return; - } - while (true) { // We can set up the frame anchor with everything we want at // this point as we are thread_in_Java and no safepoints can @@ -123,9 +105,9 @@ int monitor_words = frame::interpreter_frame_monitor_size(); // Allocate the space - if (monitor_words > stack->available_words()) { - Unimplemented(); - } + stack->overflow_check(monitor_words, THREAD); + if (HAS_PENDING_EXCEPTION) + break; stack->alloc(monitor_words * wordSize); // Move the expression stack contents @@ -172,8 +154,6 @@ } } - unwind_and_return: - // Unwind the current frame thread->pop_zero_frame(); @@ -193,17 +173,11 @@ ZeroStack *stack = thread->zero_stack(); // Allocate and initialize our frame - InterpreterFrame *frame = InterpreterFrame::build(stack, method, thread); + InterpreterFrame *frame = InterpreterFrame::build(method, CHECK); thread->push_zero_frame(frame); interpreterState istate = frame->interpreter_state(); intptr_t *locals = istate->locals(); - // Check we're not about to run out of stack - if (stack_overflow_imminent(thread)) { - CALL_VM_NOCHECK(InterpreterRuntime::throw_StackOverflowError(thread)); - goto unwind_and_return; - } - // Update the invocation counter if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) { InvocationCounter *counter = method->invocation_counter(); @@ -264,9 +238,10 @@ assert(function != NULL, "should be set if signature handler is"); // Build the argument list - if (handler->argument_count() * 2 > stack->available_words()) { - Unimplemented(); - } + stack->overflow_check(handler->argument_count() * 2, THREAD); + if (HAS_PENDING_EXCEPTION) + goto unlock_unwind_and_return; + void **arguments; void *mirror; { arguments = @@ -503,9 +478,7 @@ switch (entry->flag_state()) { case ltos: case dtos: - if (stack->available_words() < 1) { - Unimplemented(); - } + stack->overflow_check(1, CHECK); stack->alloc(wordSize); break; } @@ -601,39 +574,30 @@ stack->set_sp(stack->sp() + method->size_of_parameters()); } -bool CppInterpreter::stack_overflow_imminent(JavaThread *thread) { - // How is the ABI stack? - address stack_top = thread->stack_base() - thread->stack_size(); - int free_stack = os::current_stack_pointer() - stack_top; - if (free_stack < StackShadowPages * os::vm_page_size()) { - return true; - } +InterpreterFrame *InterpreterFrame::build(const methodOop method, TRAPS) { + JavaThread *thread = (JavaThread *) THREAD; + ZeroStack *stack = thread->zero_stack(); + + // Calculate the size of the frame we'll build, including + // any adjustments to the caller's frame that we'll make. + int extra_locals = 0; + int monitor_words = 0; + int stack_words = 0; - // How is the Zero stack? - // Throwing a StackOverflowError involves a VM call, which means - // we need a frame on the stack. We should be checking here to - // ensure that methods we call have enough room to install the - // largest possible frame, but that's more than twice the size - // of the entire Zero stack we get by default, so we just check - // we have *some* space instead... - free_stack = thread->zero_stack()->available_words() * wordSize; - if (free_stack < StackShadowPages * os::vm_page_size()) { - return true; + if (!method->is_native()) { + extra_locals = method->max_locals() - method->size_of_parameters(); + stack_words = method->max_stack(); } + if (method->is_synchronized()) { + monitor_words = frame::interpreter_frame_monitor_size(); + } + stack->overflow_check( + extra_locals + header_words + monitor_words + stack_words, CHECK_NULL); - return false; -} - -InterpreterFrame *InterpreterFrame::build(ZeroStack* stack, - const methodOop method, - JavaThread* thread) { - int monitor_words = - method->is_synchronized() ? frame::interpreter_frame_monitor_size() : 0; - int stack_words = method->is_native() ? 0 : method->max_stack(); - - if (header_words + monitor_words + stack_words > stack->available_words()) { - Unimplemented(); - } + // Adjust the caller's stack frame to accomodate any additional + // local variables we have contiguously with our parameters. + for (int i = 0; i < extra_locals; i++) + stack->push(0); intptr_t *locals; if (method->is_native()) @@ -812,14 +776,13 @@ // Deoptimization helpers -InterpreterFrame *InterpreterFrame::build(ZeroStack* stack, int size) { +InterpreterFrame *InterpreterFrame::build(int size, TRAPS) { + ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack(); + int size_in_words = size >> LogBytesPerWord; assert(size_in_words * wordSize == size, "unaligned"); assert(size_in_words >= header_words, "too small"); - - if (size_in_words > stack->available_words()) { - Unimplemented(); - } + stack->overflow_check(size_in_words, CHECK_NULL); stack->push(0); // next_frame, filled in later intptr_t *fp = stack->sp(); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/zero/vm/cppInterpreter_zero.hpp --- a/src/cpu/zero/vm/cppInterpreter_zero.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/zero/vm/cppInterpreter_zero.hpp Wed May 05 16:39:47 2010 -0700 @@ -39,9 +39,5 @@ static void main_loop(int recurse, TRAPS); private: - // Stack overflow checks - static bool stack_overflow_imminent(JavaThread *thread); - - private: // Fast result type determination static BasicType result_type_of(methodOop method); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/zero/vm/entryFrame_zero.hpp --- a/src/cpu/zero/vm/entryFrame_zero.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/zero/vm/entryFrame_zero.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,6 +1,6 @@ /* * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. - * Copyright 2008 Red Hat, Inc. + * Copyright 2008, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,10 +47,10 @@ }; public: - static EntryFrame *build(ZeroStack* stack, - const intptr_t* parameters, + static EntryFrame *build(const intptr_t* parameters, int parameter_words, - JavaCallWrapper* call_wrapper); + JavaCallWrapper* call_wrapper, + TRAPS); public: JavaCallWrapper *call_wrapper() const { return (JavaCallWrapper *) value_of_word(call_wrapper_off); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/zero/vm/fakeStubFrame_zero.hpp --- a/src/cpu/zero/vm/fakeStubFrame_zero.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/zero/vm/fakeStubFrame_zero.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,6 +1,6 @@ /* * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. - * Copyright 2008 Red Hat, Inc. + * Copyright 2008, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,7 @@ }; public: - static FakeStubFrame *build(ZeroStack* stack); + static FakeStubFrame *build(TRAPS); public: void identify_word(int frame_index, diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/zero/vm/globals_zero.hpp --- a/src/cpu/zero/vm/globals_zero.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/zero/vm/globals_zero.hpp Wed May 05 16:39:47 2010 -0700 @@ -35,6 +35,7 @@ define_pd_global(bool, UncommonNullCast, true); define_pd_global(intx, CodeEntryAlignment, 32); +define_pd_global(intx, OptoLoopAlignment, 16); define_pd_global(intx, InlineFrequencyCount, 100); define_pd_global(intx, PreInflateSpin, 10); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/zero/vm/interpreterFrame_zero.hpp --- a/src/cpu/zero/vm/interpreterFrame_zero.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/zero/vm/interpreterFrame_zero.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,6 +1,6 @@ /* * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. - * Copyright 2008 Red Hat, Inc. + * Copyright 2008, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,10 +55,8 @@ }; public: - static InterpreterFrame *build(ZeroStack* stack, - const methodOop method, - JavaThread* thread); - static InterpreterFrame *build(ZeroStack* stack, int size); + static InterpreterFrame *build(const methodOop method, TRAPS); + static InterpreterFrame *build(int size, TRAPS); public: interpreterState interpreter_state() const { diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/zero/vm/interpreterRT_zero.cpp --- a/src/cpu/zero/vm/interpreterRT_zero.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/zero/vm/interpreterRT_zero.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,6 +1,6 @@ /* * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. - * Copyright 2007, 2008 Red Hat, Inc. + * Copyright 2007, 2008, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -140,9 +140,8 @@ int required_words = (align_size_up(sizeof(ffi_cif), wordSize) >> LogBytesPerWord) + (method->is_static() ? 2 : 1) + method->size_of_parameters() + 1; - if (required_words > stack->available_words()) { - Unimplemented(); - } + + stack->overflow_check(required_words, CHECK_NULL); intptr_t *buf = (intptr_t *) stack->alloc(required_words * wordSize); SlowSignatureHandlerGenerator sshg(methodHandle(thread, method), buf); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/zero/vm/stack_zero.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/zero/vm/stack_zero.cpp Wed May 05 16:39:47 2010 -0700 @@ -0,0 +1,73 @@ +/* + * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2010 Red Hat, Inc. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +#include "incls/_precompiled.incl" +#include "incls/_stack_zero.cpp.incl" + +void ZeroStack::handle_overflow(TRAPS) { + JavaThread *thread = (JavaThread *) THREAD; + + // Set up the frame anchor if it isn't already + bool has_last_Java_frame = thread->has_last_Java_frame(); + if (!has_last_Java_frame) { + ZeroFrame *frame = thread->top_zero_frame(); + while (frame) { + if (frame->is_shark_frame()) + break; + + if (frame->is_interpreter_frame()) { + interpreterState istate = + frame->as_interpreter_frame()->interpreter_state(); + if (istate->self_link() == istate) + break; + } + + frame = frame->next(); + } + + if (frame == NULL) + fatal("unrecoverable stack overflow"); + + thread->set_last_Java_frame(frame); + } + + // Throw the exception + switch (thread->thread_state()) { + case _thread_in_Java: + InterpreterRuntime::throw_StackOverflowError(thread); + break; + + case _thread_in_vm: + Exceptions::throw_stack_overflow_exception(thread, __FILE__, __LINE__); + break; + + default: + ShouldNotReachHere(); + } + + // Reset the frame anchor if necessary + if (!has_last_Java_frame) + thread->reset_last_Java_frame(); +} diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/zero/vm/stack_zero.hpp --- a/src/cpu/zero/vm/stack_zero.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/zero/vm/stack_zero.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,6 +1,6 @@ /* * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. - * Copyright 2008, 2009 Red Hat, Inc. + * Copyright 2008, 2009, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,9 +29,14 @@ intptr_t *_top; // the word past the end of the stack intptr_t *_sp; // the top word on the stack + private: + int _shadow_pages_size; // how much ABI stack must we keep free? + public: ZeroStack() - : _base(NULL), _top(NULL), _sp(NULL) {} + : _base(NULL), _top(NULL), _sp(NULL) { + _shadow_pages_size = StackShadowPages * os::vm_page_size(); + } bool needs_setup() const { return _base == NULL; @@ -81,6 +86,14 @@ return _sp -= count; } + int shadow_pages_size() const { + return _shadow_pages_size; + } + + public: + void overflow_check(int required_words, TRAPS); + static void handle_overflow(TRAPS); + public: static ByteSize base_offset() { return byte_offset_of(ZeroStack, _base); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/zero/vm/stack_zero.inline.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/cpu/zero/vm/stack_zero.inline.hpp Wed May 05 16:39:47 2010 -0700 @@ -0,0 +1,43 @@ +/* + * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2010 Red Hat, Inc. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// This function should match SharkStack::CreateStackOverflowCheck +inline void ZeroStack::overflow_check(int required_words, TRAPS) { + JavaThread *thread = (JavaThread *) THREAD; + + // Check the Zero stack + if (required_words > available_words()) { + handle_overflow(THREAD); + return; + } + + // Check the ABI stack + address stack_top = thread->stack_base() - thread->stack_size(); + int free_stack = ((address) &stack_top) - stack_top; + if (free_stack < shadow_pages_size()) { + handle_overflow(THREAD); + return; + } +} diff -r f43b5e9f7881 -r 3fca8e9cd36a src/cpu/zero/vm/stubGenerator_zero.cpp --- a/src/cpu/zero/vm/stubGenerator_zero.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/cpu/zero/vm/stubGenerator_zero.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,6 +1,6 @@ /* * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. - * Copyright 2007, 2008 Red Hat, Inc. + * Copyright 2007, 2008, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,38 +60,43 @@ } // Allocate and initialize our frame - thread->push_zero_frame( - EntryFrame::build(stack, parameters, parameter_words, call_wrapper)); + EntryFrame *frame = + EntryFrame::build(parameters, parameter_words, call_wrapper, THREAD); - // Make the call - Interpreter::invoke_method(method, entry_point, THREAD); - - // Store result depending on type if (!HAS_PENDING_EXCEPTION) { - switch (result_type) { - case T_INT: - *(jint *) result = *(jint *) stack->sp(); - break; - case T_LONG: - *(jlong *) result = *(jlong *) stack->sp(); - break; - case T_FLOAT: - *(jfloat *) result = *(jfloat *) stack->sp(); - break; - case T_DOUBLE: - *(jdouble *) result = *(jdouble *) stack->sp(); - break; - case T_OBJECT: - *(oop *) result = *(oop *) stack->sp(); - break; - default: - ShouldNotReachHere(); + // Push the frame + thread->push_zero_frame(frame); + + // Make the call + Interpreter::invoke_method(method, entry_point, THREAD); + + // Store the result + if (!HAS_PENDING_EXCEPTION) { + switch (result_type) { + case T_INT: + *(jint *) result = *(jint *) stack->sp(); + break; + case T_LONG: + *(jlong *) result = *(jlong *) stack->sp(); + break; + case T_FLOAT: + *(jfloat *) result = *(jfloat *) stack->sp(); + break; + case T_DOUBLE: + *(jdouble *) result = *(jdouble *) stack->sp(); + break; + case T_OBJECT: + *(oop *) result = *(oop *) stack->sp(); + break; + default: + ShouldNotReachHere(); + } } + + // Unwind the frame + thread->pop_zero_frame(); } - // Unwind our frame - thread->pop_zero_frame(); - // Tear down the stack if necessary if (stack_needs_teardown) stack->teardown(); @@ -226,13 +231,13 @@ StubGenerator g(code, all); } -EntryFrame *EntryFrame::build(ZeroStack* stack, - const intptr_t* parameters, +EntryFrame *EntryFrame::build(const intptr_t* parameters, int parameter_words, - JavaCallWrapper* call_wrapper) { - if (header_words + parameter_words > stack->available_words()) { - Unimplemented(); - } + JavaCallWrapper* call_wrapper, + TRAPS) { + + ZeroStack *stack = ((JavaThread *) THREAD)->zero_stack(); + stack->overflow_check(header_words + parameter_words, CHECK_NULL); stack->push(0); // next_frame, filled in later intptr_t *fp = stack->sp(); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/os/linux/vm/os_linux.cpp --- a/src/os/linux/vm/os_linux.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/os/linux/vm/os_linux.cpp Wed May 05 16:39:47 2010 -0700 @@ -3495,7 +3495,8 @@ // libjsig also interposes the sigaction() call below and saves the // old sigaction on it own. } else { - fatal2("Encountered unexpected pre-existing sigaction handler %#lx for signal %d.", (long)oldhand, sig); + fatal(err_msg("Encountered unexpected pre-existing sigaction handler " + "%#lx for signal %d.", (long)oldhand, sig)); } } @@ -3817,7 +3818,8 @@ Linux::set_page_size(sysconf(_SC_PAGESIZE)); if (Linux::page_size() == -1) { - fatal1("os_linux.cpp: os::init: sysconf failed (%s)", strerror(errno)); + fatal(err_msg("os_linux.cpp: os::init: sysconf failed (%s)", + strerror(errno))); } init_page_sizes((size_t) Linux::page_size()); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/os/solaris/vm/os_solaris.cpp --- a/src/os/solaris/vm/os_solaris.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/os/solaris/vm/os_solaris.cpp Wed May 05 16:39:47 2010 -0700 @@ -1567,7 +1567,8 @@ // treat %g2 as a caller-save register, preserving it in a %lN. thread_key_t tk; if (thr_keycreate( &tk, NULL ) ) - fatal1("os::allocate_thread_local_storage: thr_keycreate failed (%s)", strerror(errno)); + fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed " + "(%s)", strerror(errno))); return int(tk); } @@ -1585,7 +1586,8 @@ if (errno == ENOMEM) { vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space"); } else { - fatal1("os::thread_local_storage_at_put: thr_setspecific failed (%s)", strerror(errno)); + fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed " + "(%s)", strerror(errno))); } } else { ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ; @@ -1738,7 +1740,7 @@ jlong os::javaTimeMillis() { timeval t; if (gettimeofday( &t, NULL) == -1) - fatal1("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)); + fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno))); return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; } @@ -4233,7 +4235,8 @@ // libjsig also interposes the sigaction() call below and saves the // old sigaction on it own. } else { - fatal2("Encountered unexpected pre-existing sigaction handler %#lx for signal %d.", (long)oldhand, sig); + fatal(err_msg("Encountered unexpected pre-existing sigaction handler " + "%#lx for signal %d.", (long)oldhand, sig)); } } @@ -4764,7 +4767,8 @@ page_size = sysconf(_SC_PAGESIZE); if (page_size == -1) - fatal1("os_solaris.cpp: os::init: sysconf failed (%s)", strerror(errno)); + fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)", + strerror(errno))); init_page_sizes((size_t) page_size); Solaris::initialize_system_info(); @@ -4775,7 +4779,7 @@ int fd = open("/dev/zero", O_RDWR); if (fd < 0) { - fatal1("os::init: cannot open /dev/zero (%s)", strerror(errno)); + fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno))); } else { Solaris::set_dev_zero_fd(fd); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/os/solaris/vm/threadCritical_solaris.cpp --- a/src/os/solaris/vm/threadCritical_solaris.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/os/solaris/vm/threadCritical_solaris.cpp Wed May 05 16:39:47 2010 -0700 @@ -47,7 +47,8 @@ thread_t owner = thr_self(); if (global_mut_owner != owner) { if (os::Solaris::mutex_lock(&global_mut)) - fatal1("ThreadCritical::ThreadCritical: mutex_lock failed (%s)", strerror(errno)); + fatal(err_msg("ThreadCritical::ThreadCritical: mutex_lock failed (%s)", + strerror(errno))); assert(global_mut_count == 0, "must have clean count"); assert(global_mut_owner == -1, "must have clean owner"); } @@ -66,7 +67,8 @@ if (global_mut_count == 0) { global_mut_owner = -1; if (os::Solaris::mutex_unlock(&global_mut)) - fatal1("ThreadCritical::~ThreadCritical: mutex_unlock failed (%s)", strerror(errno)); + fatal(err_msg("ThreadCritical::~ThreadCritical: mutex_unlock failed " + "(%s)", strerror(errno))); } } else { assert (Threads::number_of_threads() == 0, "valid only during initialization"); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/os/windows/vm/os_windows.cpp --- a/src/os/windows/vm/os_windows.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/os/windows/vm/os_windows.cpp Wed May 05 16:39:47 2010 -0700 @@ -724,7 +724,7 @@ java_origin.wMilliseconds = 0; FILETIME jot; if (!SystemTimeToFileTime(&java_origin, &jot)) { - fatal1("Error = %d\nWindows error", GetLastError()); + fatal(err_msg("Error = %d\nWindows error", GetLastError())); } _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); _has_calculated_offset = 1; @@ -4095,7 +4095,7 @@ } int err = GetLastError(); if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { - fatal1("heap walk aborted with error %d", err); + fatal(err_msg("heap walk aborted with error %d", err)); } HeapUnlock(heap); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp --- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Wed May 05 16:39:47 2010 -0700 @@ -153,7 +153,7 @@ if (rslt == ENOMEM) { vm_exit_out_of_memory(0, "pthread_getattr_np"); } else { - fatal1("pthread_getattr_np failed with errno = %d", rslt); + fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt)); } } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/os_cpu/linux_x86/vm/os_linux_x86.cpp --- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Wed May 05 16:39:47 2010 -0700 @@ -680,7 +680,7 @@ if (rslt == ENOMEM) { vm_exit_out_of_memory(0, "pthread_getattr_np"); } else { - fatal1("pthread_getattr_np failed with errno = %d", rslt); + fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt)); } } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/os_cpu/linux_zero/vm/thread_linux_zero.hpp --- a/src/os_cpu/linux_zero/vm/thread_linux_zero.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/os_cpu/linux_zero/vm/thread_linux_zero.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,6 +1,6 @@ /* * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. - * Copyright 2007, 2008, 2009 Red Hat, Inc. + * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,12 +68,13 @@ public: void set_last_Java_frame() { - JavaFrameAnchor *jfa = frame_anchor(); - jfa->set_last_Java_sp((intptr_t *) top_zero_frame()); + set_last_Java_frame(top_zero_frame()); } void reset_last_Java_frame() { - JavaFrameAnchor *jfa = frame_anchor(); - jfa->set_last_Java_sp(NULL); + set_last_Java_frame(NULL); + } + void set_last_Java_frame(ZeroFrame* frame) { + frame_anchor()->set_last_Java_sp((intptr_t *) frame); } private: diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/adlc/formssel.cpp --- a/src/share/vm/adlc/formssel.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/adlc/formssel.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -3861,6 +3861,8 @@ strcmp(opType,"RoundFloat")==0 || strcmp(opType,"ReverseBytesI")==0 || strcmp(opType,"ReverseBytesL")==0 || + strcmp(opType,"ReverseBytesUS")==0 || + strcmp(opType,"ReverseBytesS")==0 || strcmp(opType,"Replicate16B")==0 || strcmp(opType,"Replicate8B")==0 || strcmp(opType,"Replicate4B")==0 || diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/adlc/output_c.cpp --- a/src/share/vm/adlc/output_c.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/adlc/output_c.cpp Wed May 05 16:39:47 2010 -0700 @@ -721,8 +721,8 @@ fprintf(fp_cpp, " }\n"); fprintf(fp_cpp, "#endif\n\n"); #endif - fprintf(fp_cpp, " assert(this, \"NULL pipeline info\")\n"); - fprintf(fp_cpp, " assert(pred, \"NULL predecessor pipline info\")\n\n"); + fprintf(fp_cpp, " assert(this, \"NULL pipeline info\");\n"); + fprintf(fp_cpp, " assert(pred, \"NULL predecessor pipline info\");\n\n"); fprintf(fp_cpp, " if (pred->hasFixedLatency())\n return (pred->fixedLatency());\n\n"); fprintf(fp_cpp, " // If this is not an operand, then assume a dependence with 0 latency\n"); fprintf(fp_cpp, " if (opnd > _read_stage_count)\n return (0);\n\n"); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/asm/assembler.cpp --- a/src/share/vm/asm/assembler.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/asm/assembler.cpp Wed May 05 16:39:47 2010 -0700 @@ -43,7 +43,8 @@ _code_pos = cs->end(); _oop_recorder= code->oop_recorder(); if (_code_begin == NULL) { - vm_exit_out_of_memory1(0, "CodeCache: no room for %s", code->name()); + vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s", + code->name())); } } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/asm/codeBuffer.hpp --- a/src/share/vm/asm/codeBuffer.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/asm/codeBuffer.hpp Wed May 05 16:39:47 2010 -0700 @@ -40,6 +40,7 @@ Exceptions, // Offset where exception handler lives Deopt, // Offset where deopt handler lives DeoptMH, // Offset where MethodHandle deopt handler lives + UnwindHandler, // Offset to default unwind handler max_Entries }; // special value to note codeBlobs where profile (forte) stack walking is @@ -59,6 +60,7 @@ _values[Exceptions ] = -1; _values[Deopt ] = -1; _values[DeoptMH ] = -1; + _values[UnwindHandler ] = -1; } int value(Entries e) { return _values[e]; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/c1/c1_Compilation.cpp --- a/src/share/vm/c1/c1_Compilation.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/c1/c1_Compilation.cpp Wed May 05 16:39:47 2010 -0700 @@ -229,6 +229,10 @@ code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler()); CHECK_BAILOUT(); + // Emit the handler to remove the activation from the stack and + // dispatch to the caller. + offsets()->set_value(CodeOffsets::UnwindHandler, assembler->emit_unwind_handler()); + // done masm()->flush(); } @@ -312,7 +316,7 @@ implicit_exception_table(), compiler(), _env->comp_level(), - needs_debug_information(), + true, has_unsafe_access() ); } @@ -445,8 +449,6 @@ assert(_arena == NULL, "shouldn't only one instance of Compilation in existence at a time"); _arena = Thread::current()->resource_area(); _compilation = this; - _needs_debug_information = _env->jvmti_can_examine_or_deopt_anywhere() || - JavaMonitorsInStackTrace || AlwaysEmitDebugInfo || DeoptimizeALot; _exception_info_list = new ExceptionInfoList(); _implicit_exception_table.set_size(0); compile_method(); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/c1/c1_Compilation.hpp --- a/src/share/vm/c1/c1_Compilation.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/c1/c1_Compilation.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,7 +70,6 @@ int _max_spills; FrameMap* _frame_map; C1_MacroAssembler* _masm; - bool _needs_debug_information; bool _has_exception_handlers; bool _has_fpu_code; bool _has_unsafe_access; @@ -117,7 +116,6 @@ // accessors ciEnv* env() const { return _env; } AbstractCompiler* compiler() const { return _compiler; } - bool needs_debug_information() const { return _needs_debug_information; } bool has_exception_handlers() const { return _has_exception_handlers; } bool has_fpu_code() const { return _has_fpu_code; } bool has_unsafe_access() const { return _has_unsafe_access; } @@ -132,7 +130,6 @@ CodeOffsets* offsets() { return &_offsets; } // setters - void set_needs_debug_information(bool f) { _needs_debug_information = f; } void set_has_exception_handlers(bool f) { _has_exception_handlers = f; } void set_has_fpu_code(bool f) { _has_fpu_code = f; } void set_has_unsafe_access(bool f) { _has_unsafe_access = f; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/c1/c1_GraphBuilder.cpp --- a/src/share/vm/c1/c1_GraphBuilder.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Wed May 05 16:39:47 2010 -0700 @@ -829,12 +829,8 @@ // should be left alone since there can be only one and all code // should dispatch to the same one. XHandler* h = handlers->handler_at(i); - if (h->handler_bci() != SynchronizationEntryBCI) { - h->set_entry_block(block_at(h->handler_bci())); - } else { - assert(h->entry_block()->is_set(BlockBegin::default_exception_handler_flag), - "should be the synthetic unlock block"); - } + assert(h->handler_bci() != SynchronizationEntryBCI, "must be real"); + h->set_entry_block(block_at(h->handler_bci())); } _jsr_xhandlers = handlers; } @@ -1497,7 +1493,6 @@ Dependencies* GraphBuilder::dependency_recorder() const { assert(DeoptC1, "need debug information"); - compilation()->set_needs_debug_information(true); return compilation()->dependency_recorder(); } @@ -2867,19 +2862,6 @@ _initial_state = state_at_entry(); start_block->merge(_initial_state); - // setup an exception handler to do the unlocking and/or - // notification and unwind the frame. - BlockBegin* sync_handler = new BlockBegin(-1); - sync_handler->set(BlockBegin::exception_entry_flag); - sync_handler->set(BlockBegin::is_on_work_list_flag); - sync_handler->set(BlockBegin::default_exception_handler_flag); - - ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0); - XHandler* h = new XHandler(desc); - h->set_entry_block(sync_handler); - scope_data()->xhandlers()->append(h); - scope_data()->set_has_handler(); - // complete graph _vmap = new ValueMap(); scope->compute_lock_stack_size(); @@ -2930,19 +2912,6 @@ } CHECK_BAILOUT(); - if (sync_handler && sync_handler->state() != NULL) { - Value lock = NULL; - if (method()->is_synchronized()) { - lock = method()->is_static() ? new Constant(new InstanceConstant(method()->holder()->java_mirror())) : - _initial_state->local_at(0); - - sync_handler->state()->unlock(); - sync_handler->state()->lock(scope, lock); - - } - fill_sync_handler(lock, sync_handler, true); - } - _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state); eliminate_redundant_phis(_start); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/c1/c1_Instruction.hpp --- a/src/share/vm/c1/c1_Instruction.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/c1/c1_Instruction.hpp Wed May 05 16:39:47 2010 -0700 @@ -1628,11 +1628,10 @@ backward_branch_target_flag = 1 << 4, is_on_work_list_flag = 1 << 5, was_visited_flag = 1 << 6, - default_exception_handler_flag = 1 << 8, // identify block which represents the default exception handler - parser_loop_header_flag = 1 << 9, // set by parser to identify blocks where phi functions can not be created on demand - critical_edge_split_flag = 1 << 10, // set for all blocks that are introduced when critical edges are split - linear_scan_loop_header_flag = 1 << 11, // set during loop-detection for LinearScan - linear_scan_loop_end_flag = 1 << 12 // set during loop-detection for LinearScan + parser_loop_header_flag = 1 << 7, // set by parser to identify blocks where phi functions can not be created on demand + critical_edge_split_flag = 1 << 8, // set for all blocks that are introduced when critical edges are split + linear_scan_loop_header_flag = 1 << 9, // set during loop-detection for LinearScan + linear_scan_loop_end_flag = 1 << 10 // set during loop-detection for LinearScan }; void set(Flag f) { _flags |= f; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/c1/c1_LIR.cpp --- a/src/share/vm/c1/c1_LIR.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/c1/c1_LIR.cpp Wed May 05 16:39:47 2010 -0700 @@ -626,8 +626,7 @@ break; } - case lir_throw: - case lir_unwind: { + case lir_throw: { assert(op->as_Op2() != NULL, "must be"); LIR_Op2* op2 = (LIR_Op2*)op; @@ -639,6 +638,17 @@ break; } + case lir_unwind: { + assert(op->as_Op1() != NULL, "must be"); + LIR_Op1* op1 = (LIR_Op1*)op; + + assert(op1->_info == NULL, "no info"); + assert(op1->_opr->is_valid(), "exception oop"); do_input(op1->_opr); + assert(op1->_result->is_illegal(), "no result"); + + break; + } + case lir_tan: case lir_sin: diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/c1/c1_LIR.hpp --- a/src/share/vm/c1/c1_LIR.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/c1/c1_LIR.hpp Wed May 05 16:39:47 2010 -0700 @@ -801,6 +801,7 @@ , lir_monaddr , lir_roundfp , lir_safepoint + , lir_unwind , end_op1 , begin_op2 , lir_cmp @@ -830,7 +831,6 @@ , lir_ushr , lir_alloc_array , lir_throw - , lir_unwind , lir_compare_to , end_op2 , begin_op3 @@ -1827,8 +1827,12 @@ void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor, left, right, dst)); } void null_check(LIR_Opr opr, CodeEmitInfo* info) { append(new LIR_Op1(lir_null_check, opr, info)); } - void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); } - void unwind_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_unwind, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); } + void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { + append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); + } + void unwind_exception(LIR_Opr exceptionOop) { + append(new LIR_Op1(lir_unwind, exceptionOop)); + } void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_compare_to, left, right, dst)); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/c1/c1_LIRAssembler.cpp --- a/src/share/vm/c1/c1_LIRAssembler.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp Wed May 05 16:39:47 2010 -0700 @@ -552,6 +552,10 @@ monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); break; + case lir_unwind: + unwind_op(op->in_opr()); + break; + default: Unimplemented(); break; @@ -707,8 +711,7 @@ break; case lir_throw: - case lir_unwind: - throw_op(op->in_opr1(), op->in_opr2(), op->info(), op->code() == lir_unwind); + throw_op(op->in_opr1(), op->in_opr2(), op->info()); break; default: diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/c1/c1_LIRAssembler.hpp --- a/src/share/vm/c1/c1_LIRAssembler.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp Wed May 05 16:39:47 2010 -0700 @@ -39,6 +39,8 @@ Instruction* _pending_non_safepoint; int _pending_non_safepoint_offset; + Label _unwind_handler_entry; + #ifdef ASSERT BlockList _branch_target_blocks; void check_no_unbound_labels(); @@ -134,6 +136,7 @@ // code patterns int emit_exception_handler(); + int emit_unwind_handler(); void emit_exception_entries(ExceptionInfoList* info_list); int emit_deopt_handler(); @@ -217,7 +220,8 @@ void build_frame(); - void throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind); + void throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info); + void unwind_op(LIR_Opr exceptionOop); void monitor_address(int monitor_ix, LIR_Opr dst); void align_backward_branch_target(); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/c1/c1_LIRGenerator.cpp --- a/src/share/vm/c1/c1_LIRGenerator.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Wed May 05 16:39:47 2010 -0700 @@ -1765,35 +1765,17 @@ __ null_check(exception_opr, new CodeEmitInfo(info, true)); } - if (compilation()->env()->jvmti_can_post_on_exceptions() && - !block()->is_set(BlockBegin::default_exception_handler_flag)) { + if (compilation()->env()->jvmti_can_post_on_exceptions()) { // we need to go through the exception lookup path to get JVMTI // notification done unwind = false; } - assert(!block()->is_set(BlockBegin::default_exception_handler_flag) || unwind, - "should be no more handlers to dispatch to"); - - if (compilation()->env()->dtrace_method_probes() && - block()->is_set(BlockBegin::default_exception_handler_flag)) { - // notify that this frame is unwinding - BasicTypeList signature; - signature.append(T_INT); // thread - signature.append(T_OBJECT); // methodOop - LIR_OprList* args = new LIR_OprList(); - args->append(getThreadPointer()); - LIR_Opr meth = new_register(T_OBJECT); - __ oop2reg(method()->constant_encoding(), meth); - args->append(meth); - call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL); - } - // move exception oop into fixed register __ move(exception_opr, exceptionOopOpr()); if (unwind) { - __ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info); + __ unwind_exception(exceptionOopOpr()); } else { __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/c1/c1_LinearScan.cpp --- a/src/share/vm/c1/c1_LinearScan.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/c1/c1_LinearScan.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2608,12 +2608,17 @@ } else if (opr->is_double_xmm()) { assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation"); VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg(); +# ifdef _LP64 + first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first)); + second = &_int_0_scope_value; +# else first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); // %%% This is probably a waste but we'll keep things as they were for now if (true) { VMReg rname_second = rname_first->next(); second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second)); } +# endif #endif } else if (opr->is_double_fpu()) { @@ -2639,13 +2644,17 @@ #endif VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi()); - +#ifdef _LP64 + first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first)); + second = &_int_0_scope_value; +#else first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); // %%% This is probably a waste but we'll keep things as they were for now if (true) { VMReg rname_second = rname_first->next(); second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second)); } +#endif } else { ShouldNotReachHere(); @@ -2805,9 +2814,6 @@ void LinearScan::compute_debug_info(CodeEmitInfo* info, int op_id) { - if (!compilation()->needs_debug_information()) { - return; - } TRACE_LINEAR_SCAN(3, tty->print_cr("creating debug information at op_id %d", op_id)); IRScope* innermost_scope = info->scope(); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/c1/c1_globals.hpp --- a/src/share/vm/c1/c1_globals.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/c1/c1_globals.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2000-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -252,9 +252,6 @@ develop(bool, BailoutOnExceptionHandlers, false, \ "bailout of compilation for methods with exception handlers") \ \ - develop(bool, AlwaysEmitDebugInfo, false, \ - "always emit debug info") \ - \ develop(bool, InstallMethods, true, \ "Install methods at the end of successful compilations") \ \ diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/ci/bcEscapeAnalyzer.cpp --- a/src/share/vm/ci/bcEscapeAnalyzer.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp Wed May 05 16:39:47 2010 -0700 @@ -1408,8 +1408,11 @@ } void BCEscapeAnalyzer::copy_dependencies(Dependencies *deps) { - if(!has_dependencies()) - return; + if (ciEnv::current()->jvmti_can_hotswap_or_post_breakpoint()) { + // Also record evol dependencies so redefinition of the + // callee will trigger recompilation. + deps->assert_evol_method(method()); + } for (int i = 0; i < _dependencies.length(); i+=2) { ciKlass *k = _dependencies[i]->as_klass(); ciMethod *m = _dependencies[i+1]->as_method(); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/ci/ciEnv.cpp --- a/src/share/vm/ci/ciEnv.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/ci/ciEnv.cpp Wed May 05 16:39:47 2010 -0700 @@ -176,7 +176,6 @@ // Get Jvmti capabilities under lock to get consistant values. MutexLocker mu(JvmtiThreadState_lock); _jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint(); - _jvmti_can_examine_or_deopt_anywhere = JvmtiExport::can_examine_or_deopt_anywhere(); _jvmti_can_access_local_variables = JvmtiExport::can_access_local_variables(); _jvmti_can_post_on_exceptions = JvmtiExport::can_post_on_exceptions(); } @@ -887,8 +886,6 @@ if (!failing() && ( (!jvmti_can_hotswap_or_post_breakpoint() && JvmtiExport::can_hotswap_or_post_breakpoint()) || - (!jvmti_can_examine_or_deopt_anywhere() && - JvmtiExport::can_examine_or_deopt_anywhere()) || (!jvmti_can_access_local_variables() && JvmtiExport::can_access_local_variables()) || (!jvmti_can_post_on_exceptions() && diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/ci/ciEnv.hpp --- a/src/share/vm/ci/ciEnv.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/ci/ciEnv.hpp Wed May 05 16:39:47 2010 -0700 @@ -55,7 +55,6 @@ // Cache Jvmti state bool _jvmti_can_hotswap_or_post_breakpoint; - bool _jvmti_can_examine_or_deopt_anywhere; bool _jvmti_can_access_local_variables; bool _jvmti_can_post_on_exceptions; @@ -257,7 +256,6 @@ // Cache Jvmti state void cache_jvmti_state(); bool jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint; } - bool jvmti_can_examine_or_deopt_anywhere() const { return _jvmti_can_examine_or_deopt_anywhere; } bool jvmti_can_access_local_variables() const { return _jvmti_can_access_local_variables; } bool jvmti_can_post_on_exceptions() const { return _jvmti_can_post_on_exceptions; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/classfile/classFileParser.cpp --- a/src/share/vm/classfile/classFileParser.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/classfile/classFileParser.cpp Wed May 05 16:39:47 2010 -0700 @@ -334,7 +334,8 @@ } break; default: - fatal1("bad constant pool tag value %u", cp->tag_at(index).value()); + fatal(err_msg("bad constant pool tag value %u", + cp->tag_at(index).value())); ShouldNotReachHere(); break; } // end of switch @@ -2956,8 +2957,8 @@ #endif bool compact_fields = CompactFields; int allocation_style = FieldsAllocationStyle; - if( allocation_style < 0 || allocation_style > 1 ) { // Out of range? - assert(false, "0 <= FieldsAllocationStyle <= 1"); + if( allocation_style < 0 || allocation_style > 2 ) { // Out of range? + assert(false, "0 <= FieldsAllocationStyle <= 2"); allocation_style = 1; // Optimistic } @@ -2993,6 +2994,25 @@ } else if( allocation_style == 1 ) { // Fields order: longs/doubles, ints, shorts/chars, bytes, oops next_nonstatic_double_offset = next_nonstatic_field_offset; + } else if( allocation_style == 2 ) { + // Fields allocation: oops fields in super and sub classes are together. + if( nonstatic_field_size > 0 && super_klass() != NULL && + super_klass->nonstatic_oop_map_size() > 0 ) { + int map_size = super_klass->nonstatic_oop_map_size(); + OopMapBlock* first_map = super_klass->start_of_nonstatic_oop_maps(); + OopMapBlock* last_map = first_map + map_size - 1; + int next_offset = last_map->offset() + (last_map->count() * heapOopSize); + if (next_offset == next_nonstatic_field_offset) { + allocation_style = 0; // allocate oops first + next_nonstatic_oop_offset = next_nonstatic_field_offset; + next_nonstatic_double_offset = next_nonstatic_oop_offset + + (nonstatic_oop_count * heapOopSize); + } + } + if( allocation_style == 2 ) { + allocation_style = 1; // allocate oops last + next_nonstatic_double_offset = next_nonstatic_field_offset; + } } else { ShouldNotReachHere(); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/classfile/dictionary.cpp --- a/src/share/vm/classfile/dictionary.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/classfile/dictionary.cpp Wed May 05 16:39:47 2010 -0700 @@ -127,7 +127,7 @@ bool Dictionary::do_unloading(BoolObjectClosure* is_alive) { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint") + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); bool class_was_unloaded = false; int index = 0; // Defined here for portability! Do not move diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/classfile/loaderConstraints.cpp --- a/src/share/vm/classfile/loaderConstraints.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/classfile/loaderConstraints.cpp Wed May 05 16:39:47 2010 -0700 @@ -103,7 +103,7 @@ void LoaderConstraintTable::purge_loader_constraints(BoolObjectClosure* is_alive) { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint") + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); // Remove unloaded entries from constraint table for (int index = 0; index < table_size(); index++) { LoaderConstraintEntry** p = bucket_addr(index); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/classfile/resolutionErrors.cpp --- a/src/share/vm/classfile/resolutionErrors.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/classfile/resolutionErrors.cpp Wed May 05 16:39:47 2010 -0700 @@ -102,7 +102,7 @@ // Remove unloaded entries from the table void ResolutionErrorTable::purge_resolution_errors(BoolObjectClosure* is_alive) { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint") + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); for (int i = 0; i < table_size(); i++) { for (ResolutionErrorEntry** p = bucket_addr(i); *p != NULL; ) { ResolutionErrorEntry* entry = *p; diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/classfile/vmSymbols.hpp --- a/src/share/vm/classfile/vmSymbols.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/classfile/vmSymbols.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -357,6 +357,8 @@ template(void_double_signature, "()D") \ template(int_void_signature, "(I)V") \ template(int_int_signature, "(I)I") \ + template(char_char_signature, "(C)C") \ + template(short_short_signature, "(S)S") \ template(int_bool_signature, "(I)Z") \ template(float_int_signature, "(F)I") \ template(double_long_signature, "(D)J") \ @@ -585,6 +587,10 @@ do_name( reverseBytes_name, "reverseBytes") \ do_intrinsic(_reverseBytes_l, java_lang_Long, reverseBytes_name, long_long_signature, F_S) \ /* (symbol reverseBytes_name defined above) */ \ + do_intrinsic(_reverseBytes_c, java_lang_Character, reverseBytes_name, char_char_signature, F_S) \ + /* (symbol reverseBytes_name defined above) */ \ + do_intrinsic(_reverseBytes_s, java_lang_Short, reverseBytes_name, short_short_signature, F_S) \ + /* (symbol reverseBytes_name defined above) */ \ \ do_intrinsic(_identityHashCode, java_lang_System, identityHashCode_name, object_int_signature, F_S) \ do_name( identityHashCode_name, "identityHashCode") \ diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/code/codeCache.cpp --- a/src/share/vm/code/codeCache.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/code/codeCache.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -284,9 +284,11 @@ cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); } #endif //PRODUCT - if (is_live) + if (is_live) { // Perform cur->oops_do(f), maybe just once per nmethod. f->do_code_blob(cur); + cur->fix_oop_relocations(); + } } // Check for stray marks. diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/code/exceptionHandlerTable.cpp --- a/src/share/vm/code/exceptionHandlerTable.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/code/exceptionHandlerTable.cpp Wed May 05 16:39:47 2010 -0700 @@ -221,6 +221,6 @@ for (uint i = 0; i < len(); i++) { if ((*adr(i) > (unsigned int)nm->code_size()) || (*(adr(i)+1) > (unsigned int)nm->code_size())) - fatal1("Invalid offset in ImplicitExceptionTable at %lx", _data); + fatal(err_msg("Invalid offset in ImplicitExceptionTable at " PTR_FORMAT, _data)); } } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/code/nmethod.cpp --- a/src/share/vm/code/nmethod.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/code/nmethod.cpp Wed May 05 16:39:47 2010 -0700 @@ -685,6 +685,7 @@ _exception_offset = 0; _deoptimize_offset = 0; _deoptimize_mh_offset = 0; + _unwind_handler_offset = -1; _trap_offset = offsets->value(CodeOffsets::Dtrace_trap); _orig_pc_offset = 0; _stub_offset = data_offset(); @@ -798,6 +799,11 @@ _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); _deoptimize_offset = _stub_offset + offsets->value(CodeOffsets::Deopt); _deoptimize_mh_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH); + if (offsets->value(CodeOffsets::UnwindHandler) != -1) { + _unwind_handler_offset = instructions_offset() + offsets->value(CodeOffsets::UnwindHandler); + } else { + _unwind_handler_offset = -1; + } _consts_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start()); _scopes_data_offset = data_offset(); _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize); @@ -1528,7 +1534,8 @@ } } ic->set_to_clean(); - assert(ic->cached_oop() == NULL, "cached oop in IC should be cleared") + assert(ic->cached_oop() == NULL, + "cached oop in IC should be cleared"); } } } @@ -2117,7 +2124,7 @@ ResourceMark rm; if (!CodeCache::contains(this)) { - fatal1("nmethod at " INTPTR_FORMAT " not in zone", this); + fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this)); } if(is_native_method() ) @@ -2125,7 +2132,8 @@ nmethod* nm = CodeCache::find_nmethod(verified_entry_point()); if (nm != this) { - fatal1("findNMethod did not find this nmethod (" INTPTR_FORMAT ")", this); + fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")", + this)); } for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/code/nmethod.hpp --- a/src/share/vm/code/nmethod.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/code/nmethod.hpp Wed May 05 16:39:47 2010 -0700 @@ -154,6 +154,9 @@ // All deoptee's at a MethodHandle call site will resume execution // at this location described by this offset. int _deoptimize_mh_offset; + // Offset of the unwind handler if it exists + int _unwind_handler_offset; + #ifdef HAVE_DTRACE_H int _trap_offset; #endif // def HAVE_DTRACE_H @@ -341,6 +344,7 @@ address exception_begin () const { return header_begin() + _exception_offset ; } address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; } address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; } + address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; } address stub_begin () const { return header_begin() + _stub_offset ; } address stub_end () const { return header_begin() + _consts_offset ; } address consts_begin () const { return header_begin() + _consts_offset ; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/code/stubs.cpp --- a/src/share/vm/code/stubs.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/code/stubs.cpp Wed May 05 16:39:47 2010 -0700 @@ -62,7 +62,9 @@ Mutex* lock, const char* name) : _mutex(lock) { intptr_t size = round_to(buffer_size, 2*BytesPerWord); BufferBlob* blob = BufferBlob::create(name, size); - if( blob == NULL ) vm_exit_out_of_memory1(size, "CodeCache: no room for %s", name); + if( blob == NULL) { + vm_exit_out_of_memory(size, err_msg("CodeCache: no room for %s", name)); + } _stub_interface = stub_interface; _buffer_size = blob->instructions_size(); _buffer_limit = blob->instructions_size(); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/code/vtableStubs.cpp --- a/src/share/vm/code/vtableStubs.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/code/vtableStubs.cpp Wed May 05 16:39:47 2010 -0700 @@ -45,7 +45,9 @@ if (_chunk == NULL || _chunk + real_size > _chunk_end) { const int bytes = chunk_factor * real_size + pd_code_alignment(); BufferBlob* blob = BufferBlob::create("vtable chunks", bytes); - if( blob == NULL ) vm_exit_out_of_memory1(bytes, "CodeCache: no room for %s", "vtable chunks"); + if (blob == NULL) { + vm_exit_out_of_memory(bytes, "CodeCache: no room for vtable chunks"); + } _chunk = blob->instructions_begin(); _chunk_end = _chunk + bytes; VTune::register_stub("vtable stub", _chunk, _chunk_end); @@ -189,7 +191,9 @@ instanceKlass* ik = instanceKlass::cast(klass); klassVtable* vt = ik->vtable(); klass->print(); - fatal3("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", index %d (vtable length %d)", (address)receiver, index, vt->length()); + fatal(err_msg("bad compiled vtable dispatch: receiver " INTPTR_FORMAT ", " + "index %d (vtable length %d)", + (address)receiver, index, vt->length())); } #endif // Product diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/compiler/compileBroker.hpp --- a/src/share/vm/compiler/compileBroker.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/compiler/compileBroker.hpp Wed May 05 16:39:47 2010 -0700 @@ -310,7 +310,7 @@ static AbstractCompiler* compiler(int level ) { if (level == CompLevel_fast_compile) return _compilers[0]; - assert(level == CompLevel_highest_tier, "what level?") + assert(level == CompLevel_highest_tier, "what level?"); return _compilers[1]; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/compiler/compileLog.cpp --- a/src/share/vm/compiler/compileLog.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/compiler/compileLog.cpp Wed May 05 16:39:47 2010 -0700 @@ -68,7 +68,7 @@ return attrs; } else { // park it in the buffer, so we can put a null on the end - assert(!(kind >= buffer && kind < buffer+100), "not obviously in buffer") + assert(!(kind >= buffer && kind < buffer+100), "not obviously in buffer"); int klen = attrs - kind; strncpy(buffer, kind, klen); buffer[klen] = 0; diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp Wed May 05 16:39:47 2010 -0700 @@ -118,7 +118,7 @@ // TreeList from the first chunk to the next chunk and update all // the TreeList pointers in the chunks in the list. if (nextTC == NULL) { - assert(prevFC == NULL, "Not last chunk in the list") + assert(prevFC == NULL, "Not last chunk in the list"); set_tail(NULL); set_head(NULL); } else { diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2007-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,11 +32,10 @@ ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; } void initialize_gc_policy_counters(); -#if 1 + virtual void initialize_size_policy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size); -#endif // Returns true if the incremental mode is enabled. virtual bool has_soft_ended_eden(); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1815,8 +1815,19 @@ do_compaction_work(clear_all_soft_refs); // Has the GC time limit been exceeded? - check_gc_time_limit(); - + DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration(); + size_t max_eden_size = young_gen->max_capacity() - + young_gen->to()->capacity() - + young_gen->from()->capacity(); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + GCCause::Cause gc_cause = gch->gc_cause(); + size_policy()->check_gc_overhead_limit(_young_gen->used(), + young_gen->eden()->used(), + _cmsGen->max_capacity(), + max_eden_size, + full, + gc_cause, + gch->collector_policy()); } else { do_mark_sweep_work(clear_all_soft_refs, first_state, should_start_over); @@ -1828,55 +1839,6 @@ return; } -void CMSCollector::check_gc_time_limit() { - - // Ignore explicit GC's. Exiting here does not set the flag and - // does not reset the count. Updating of the averages for system - // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC. - GCCause::Cause gc_cause = GenCollectedHeap::heap()->gc_cause(); - if (GCCause::is_user_requested_gc(gc_cause) || - GCCause::is_serviceability_requested_gc(gc_cause)) { - return; - } - - // Calculate the fraction of the CMS generation was freed during - // the last collection. - // Only consider the STW compacting cost for now. - // - // Note that the gc time limit test only works for the collections - // of the young gen + tenured gen and not for collections of the - // permanent gen. That is because the calculation of the space - // freed by the collection is the free space in the young gen + - // tenured gen. - - double fraction_free = - ((double)_cmsGen->free())/((double)_cmsGen->max_capacity()); - if ((100.0 * size_policy()->compacting_gc_cost()) > - ((double) GCTimeLimit) && - ((fraction_free * 100) < GCHeapFreeLimit)) { - size_policy()->inc_gc_time_limit_count(); - if (UseGCOverheadLimit && - (size_policy()->gc_time_limit_count() > - AdaptiveSizePolicyGCTimeLimitThreshold)) { - size_policy()->set_gc_time_limit_exceeded(true); - // Avoid consecutive OOM due to the gc time limit by resetting - // the counter. - size_policy()->reset_gc_time_limit_count(); - if (PrintGCDetails) { - gclog_or_tty->print_cr(" GC is exceeding overhead limit " - "of %d%%", GCTimeLimit); - } - } else { - if (PrintGCDetails) { - gclog_or_tty->print_cr(" GC would exceed overhead limit " - "of %d%%", GCTimeLimit); - } - } - } else { - size_policy()->reset_gc_time_limit_count(); - } -} - // Resize the perm generation and the tenured generation // after obtaining the free list locks for the // two generations. @@ -6182,6 +6144,11 @@ } curAddr = chunk.end(); } + // A successful mostly concurrent collection has been done. + // Because only the full (i.e., concurrent mode failure) collections + // are being measured for gc overhead limits, clean the "near" flag + // and count. + sp->reset_gc_overhead_limit_count(); _collectorState = Idling; } else { // already have the lock diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -570,10 +570,6 @@ ConcurrentMarkSweepPolicy* _collector_policy; ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } - // Check whether the gc time limit has been - // exceeded and set the size policy flag - // appropriately. - void check_gc_time_limit(); // XXX Move these to CMSStats ??? FIX ME !!! elapsedTimer _inter_sweep_timer; // time between sweeps elapsedTimer _intra_sweep_timer; // time _in_ sweeps diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp --- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,9 +69,9 @@ G1CollectorPolicy* g1p = g1h->g1_policy(); if (g1p->adaptive_young_list_length()) { int regions_visited = 0; - g1h->young_list_rs_length_sampling_init(); - while (g1h->young_list_rs_length_sampling_more()) { - g1h->young_list_rs_length_sampling_next(); + g1h->young_list()->rs_length_sampling_init(); + while (g1h->young_list()->rs_length_sampling_more()) { + g1h->young_list()->rs_length_sampling_next(); ++regions_visited; // we try to yield every time we visit 10 regions @@ -162,6 +162,7 @@ if (_worker_id >= cg1r()->worker_thread_num()) { run_young_rs_sampling(); terminate(); + return; } _vtime_start = os::elapsedVTime(); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed May 05 16:39:47 2010 -0700 @@ -297,6 +297,11 @@ } } +// Currently we do not call this at all. Normally we would call it +// during the concurrent marking / remark phases but we now call +// the lock-based version instead. But we might want to resurrect this +// code in the future. So, we'll leave it here commented out. +#if 0 MemRegion CMRegionStack::pop() { while (true) { // Otherwise... @@ -321,6 +326,41 @@ // Otherwise, we need to try again. } } +#endif // 0 + +void CMRegionStack::push_with_lock(MemRegion mr) { + assert(mr.word_size() > 0, "Precondition"); + MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag); + + if (isFull()) { + _overflow = true; + return; + } + + _base[_index] = mr; + _index += 1; +} + +MemRegion CMRegionStack::pop_with_lock() { + MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag); + + while (true) { + if (_index == 0) { + return MemRegion(); + } + _index -= 1; + + MemRegion mr = _base[_index]; + if (mr.start() != NULL) { + assert(mr.end() != NULL, "invariant"); + assert(mr.word_size() > 0, "invariant"); + return mr; + } else { + // that entry was invalidated... let's skip it + assert(mr.end() == NULL, "invariant"); + } + } +} bool CMRegionStack::invalidate_entries_into_cset() { bool result = false; @@ -668,24 +708,46 @@ // void ConcurrentMark::clearNextBitmap() { - guarantee(!G1CollectedHeap::heap()->mark_in_progress(), "Precondition."); - - // clear the mark bitmap (no grey objects to start with). - // We need to do this in chunks and offer to yield in between - // each chunk. - HeapWord* start = _nextMarkBitMap->startWord(); - HeapWord* end = _nextMarkBitMap->endWord(); - HeapWord* cur = start; - size_t chunkSize = M; - while (cur < end) { - HeapWord* next = cur + chunkSize; - if (next > end) - next = end; - MemRegion mr(cur,next); - _nextMarkBitMap->clearRange(mr); - cur = next; - do_yield_check(); - } + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + G1CollectorPolicy* g1p = g1h->g1_policy(); + + // Make sure that the concurrent mark thread looks to still be in + // the current cycle. + guarantee(cmThread()->during_cycle(), "invariant"); + + // We are finishing up the current cycle by clearing the next + // marking bitmap and getting it ready for the next cycle. During + // this time no other cycle can start. So, let's make sure that this + // is the case. + guarantee(!g1h->mark_in_progress(), "invariant"); + + // clear the mark bitmap (no grey objects to start with). + // We need to do this in chunks and offer to yield in between + // each chunk. + HeapWord* start = _nextMarkBitMap->startWord(); + HeapWord* end = _nextMarkBitMap->endWord(); + HeapWord* cur = start; + size_t chunkSize = M; + while (cur < end) { + HeapWord* next = cur + chunkSize; + if (next > end) + next = end; + MemRegion mr(cur,next); + _nextMarkBitMap->clearRange(mr); + cur = next; + do_yield_check(); + + // Repeat the asserts from above. We'll do them as asserts here to + // minimize their overhead on the product. However, we'll have + // them as guarantees at the beginning / end of the bitmap + // clearing to get some checking in the product. + assert(cmThread()->during_cycle(), "invariant"); + assert(!g1h->mark_in_progress(), "invariant"); + } + + // Repeat the asserts from above. + guarantee(cmThread()->during_cycle(), "invariant"); + guarantee(!g1h->mark_in_progress(), "invariant"); } class NoteStartOfMarkHRClosure: public HeapRegionClosure { @@ -705,7 +767,8 @@ _has_aborted = false; if (G1PrintReachableAtInitialMark) { - print_reachable(true, "before"); + print_reachable("at-cycle-start", + true /* use_prev_marking */, true /* all */); } // Initialise marking structures. This has to be done in a STW phase. @@ -1917,19 +1980,21 @@ #ifndef PRODUCT -class ReachablePrinterOopClosure: public OopClosure { +class PrintReachableOopClosure: public OopClosure { private: G1CollectedHeap* _g1h; CMBitMapRO* _bitmap; outputStream* _out; bool _use_prev_marking; + bool _all; public: - ReachablePrinterOopClosure(CMBitMapRO* bitmap, - outputStream* out, - bool use_prev_marking) : + PrintReachableOopClosure(CMBitMapRO* bitmap, + outputStream* out, + bool use_prev_marking, + bool all) : _g1h(G1CollectedHeap::heap()), - _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { } + _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { } void do_oop(narrowOop* p) { do_oop_work(p); } void do_oop( oop* p) { do_oop_work(p); } @@ -1939,9 +2004,11 @@ const char* str = NULL; const char* str2 = ""; - if (!_g1h->is_in_g1_reserved(obj)) - str = "outside G1 reserved"; - else { + if (obj == NULL) { + str = ""; + } else if (!_g1h->is_in_g1_reserved(obj)) { + str = " O"; + } else { HeapRegion* hr = _g1h->heap_region_containing(obj); guarantee(hr != NULL, "invariant"); bool over_tams = false; @@ -1950,74 +2017,67 @@ } else { over_tams = hr->obj_allocated_since_next_marking(obj); } + bool marked = _bitmap->isMarked((HeapWord*) obj); if (over_tams) { - str = "over TAMS"; - if (_bitmap->isMarked((HeapWord*) obj)) { + str = " >"; + if (marked) { str2 = " AND MARKED"; } - } else if (_bitmap->isMarked((HeapWord*) obj)) { - str = "marked"; + } else if (marked) { + str = " M"; } else { - str = "#### NOT MARKED ####"; + str = " NOT"; } } - _out->print_cr(" "PTR_FORMAT" contains "PTR_FORMAT" %s%s", + _out->print_cr(" "PTR_FORMAT": "PTR_FORMAT"%s%s", p, (void*) obj, str, str2); } }; -class ReachablePrinterClosure: public BitMapClosure { +class PrintReachableObjectClosure : public ObjectClosure { private: CMBitMapRO* _bitmap; outputStream* _out; bool _use_prev_marking; + bool _all; + HeapRegion* _hr; public: - ReachablePrinterClosure(CMBitMapRO* bitmap, - outputStream* out, - bool use_prev_marking) : - _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { } - - bool do_bit(size_t offset) { - HeapWord* addr = _bitmap->offsetToHeapWord(offset); - ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking); - - _out->print_cr(" obj "PTR_FORMAT", offset %10d (marked)", addr, offset); - oop(addr)->oop_iterate(&oopCl); - _out->print_cr(""); - - return true; + PrintReachableObjectClosure(CMBitMapRO* bitmap, + outputStream* out, + bool use_prev_marking, + bool all, + HeapRegion* hr) : + _bitmap(bitmap), _out(out), + _use_prev_marking(use_prev_marking), _all(all), _hr(hr) { } + + void do_object(oop o) { + bool over_tams; + if (_use_prev_marking) { + over_tams = _hr->obj_allocated_since_prev_marking(o); + } else { + over_tams = _hr->obj_allocated_since_next_marking(o); + } + bool marked = _bitmap->isMarked((HeapWord*) o); + bool print_it = _all || over_tams || marked; + + if (print_it) { + _out->print_cr(" "PTR_FORMAT"%s", + o, (over_tams) ? " >" : (marked) ? " M" : ""); + PrintReachableOopClosure oopCl(_bitmap, _out, _use_prev_marking, _all); + o->oop_iterate(&oopCl); + } } }; -class ObjInRegionReachablePrinterClosure : public ObjectClosure { +class PrintReachableRegionClosure : public HeapRegionClosure { private: CMBitMapRO* _bitmap; outputStream* _out; bool _use_prev_marking; - -public: - ObjInRegionReachablePrinterClosure(CMBitMapRO* bitmap, - outputStream* out, - bool use_prev_marking) : - _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { } - - void do_object(oop o) { - ReachablePrinterOopClosure oopCl(_bitmap, _out, _use_prev_marking); - - _out->print_cr(" obj "PTR_FORMAT" (over TAMS)", (void*) o); - o->oop_iterate(&oopCl); - _out->print_cr(""); - } -}; - -class RegionReachablePrinterClosure : public HeapRegionClosure { -private: - CMBitMapRO* _bitmap; - outputStream* _out; - bool _use_prev_marking; + bool _all; public: bool doHeapRegion(HeapRegion* hr) { @@ -2032,22 +2092,35 @@ } _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" " "TAMS: "PTR_FORMAT, b, e, t, p); - _out->print_cr(""); - - ObjInRegionReachablePrinterClosure ocl(_bitmap, _out, _use_prev_marking); - hr->object_iterate_mem_careful(MemRegion(p, t), &ocl); + _out->cr(); + + HeapWord* from = b; + HeapWord* to = t; + + if (to > from) { + _out->print_cr("Objects in ["PTR_FORMAT", "PTR_FORMAT"]", from, to); + _out->cr(); + PrintReachableObjectClosure ocl(_bitmap, _out, + _use_prev_marking, _all, hr); + hr->object_iterate_mem_careful(MemRegion(from, to), &ocl); + _out->cr(); + } return false; } - RegionReachablePrinterClosure(CMBitMapRO* bitmap, - outputStream* out, - bool use_prev_marking) : - _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking) { } + PrintReachableRegionClosure(CMBitMapRO* bitmap, + outputStream* out, + bool use_prev_marking, + bool all) : + _bitmap(bitmap), _out(out), _use_prev_marking(use_prev_marking), _all(all) { } }; -void ConcurrentMark::print_reachable(bool use_prev_marking, const char* str) { - gclog_or_tty->print_cr("== Doing reachable object dump... "); +void ConcurrentMark::print_reachable(const char* str, + bool use_prev_marking, + bool all) { + gclog_or_tty->cr(); + gclog_or_tty->print_cr("== Doing heap dump... "); if (G1PrintReachableBaseFile == NULL) { gclog_or_tty->print_cr(" #### error: no base file defined"); @@ -2082,19 +2155,14 @@ out->print_cr("-- USING %s", (use_prev_marking) ? "PTAMS" : "NTAMS"); out->cr(); - RegionReachablePrinterClosure rcl(bitmap, out, use_prev_marking); - out->print_cr("--- ITERATING OVER REGIONS WITH TAMS < TOP"); - out->cr(); - _g1h->heap_region_iterate(&rcl); + out->print_cr("--- ITERATING OVER REGIONS"); out->cr(); - - ReachablePrinterClosure cl(bitmap, out, use_prev_marking); - out->print_cr("--- ITERATING OVER MARKED OBJECTS ON THE BITMAP"); - out->cr(); - bitmap->iterate(&cl); + PrintReachableRegionClosure rcl(bitmap, out, use_prev_marking, all); + _g1h->heap_region_iterate(&rcl); out->cr(); gclog_or_tty->print_cr(" done"); + gclog_or_tty->flush(); } #endif // PRODUCT @@ -3363,7 +3431,7 @@ gclog_or_tty->print_cr("[%d] draining region stack, size = %d", _task_id, _cm->region_stack_size()); - MemRegion mr = _cm->region_stack_pop(); + MemRegion mr = _cm->region_stack_pop_with_lock(); // it returns MemRegion() if the pop fails statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); @@ -3384,7 +3452,7 @@ if (has_aborted()) mr = MemRegion(); else { - mr = _cm->region_stack_pop(); + mr = _cm->region_stack_pop_with_lock(); // it returns MemRegion() if the pop fails statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); } @@ -3417,7 +3485,7 @@ } // Now push the part of the region we didn't scan on the // region stack to make sure a task scans it later. - _cm->region_stack_push(newRegion); + _cm->region_stack_push_with_lock(newRegion); } // break from while mr = MemRegion(); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/concurrentMark.hpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -252,9 +252,19 @@ // with other "push" operations (no pops). void push(MemRegion mr); +#if 0 + // This is currently not used. See the comment in the .cpp file. + // Lock-free; assumes that it will only be called in parallel // with other "pop" operations (no pushes). MemRegion pop(); +#endif // 0 + + // These two are the implementations that use a lock. They can be + // called concurrently with each other but they should not be called + // concurrently with the lock-free versions (push() / pop()). + void push_with_lock(MemRegion mr); + MemRegion pop_with_lock(); bool isEmpty() { return _index == 0; } bool isFull() { return _index == _capacity; } @@ -540,6 +550,10 @@ // Manipulation of the region stack bool region_stack_push(MemRegion mr) { + // Currently we only call the lock-free version during evacuation + // pauses. + assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); + _regionStack.push(mr); if (_regionStack.overflow()) { set_has_overflown(); @@ -547,7 +561,33 @@ } return true; } - MemRegion region_stack_pop() { return _regionStack.pop(); } +#if 0 + // Currently this is not used. See the comment in the .cpp file. + MemRegion region_stack_pop() { return _regionStack.pop(); } +#endif // 0 + + bool region_stack_push_with_lock(MemRegion mr) { + // Currently we only call the lock-based version during either + // concurrent marking or remark. + assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(), + "if we are at a safepoint it should be the remark safepoint"); + + _regionStack.push_with_lock(mr); + if (_regionStack.overflow()) { + set_has_overflown(); + return false; + } + return true; + } + MemRegion region_stack_pop_with_lock() { + // Currently we only call the lock-based version during either + // concurrent marking or remark. + assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(), + "if we are at a safepoint it should be the remark safepoint"); + + return _regionStack.pop_with_lock(); + } + int region_stack_size() { return _regionStack.size(); } bool region_stack_overflow() { return _regionStack.overflow(); } bool region_stack_empty() { return _regionStack.isEmpty(); } @@ -612,11 +652,24 @@ // we do nothing. void markAndGrayObjectIfNecessary(oop p); - // This iterates over the marking bitmap (either prev or next) and - // prints out all objects that are marked on the bitmap and indicates - // whether what they point to is also marked or not. It also iterates - // the objects over TAMS (either prev or next). - void print_reachable(bool use_prev_marking, const char* str); + // It iterates over the heap and for each object it comes across it + // will dump the contents of its reference fields, as well as + // liveness information for the object and its referents. The dump + // will be written to a file with the following name: + // G1PrintReachableBaseFile + "." + str. use_prev_marking decides + // whether the prev (use_prev_marking == true) or next + // (use_prev_marking == false) marking information will be used to + // determine the liveness of each object / referent. If all is true, + // all objects in the heap will be dumped, otherwise only the live + // ones. In the dump the following symbols / abbreviations are used: + // M : an explicitly live object (its bitmap bit is set) + // > : an implicitly live object (over tams) + // O : an object outside the G1 heap (typically: in the perm gen) + // NOT : a reference field whose referent is not live + // AND MARKED : indicates that an object is both explicitly and + // implicitly live (it should be one or the other, not both) + void print_reachable(const char* str, + bool use_prev_marking, bool all) PRODUCT_RETURN; // Clear the next marking bitmap (will be called concurrently). void clearNextBitmap(); @@ -680,6 +733,19 @@ // to determine whether any heap regions are located above the finger. void registerCSetRegion(HeapRegion* hr); + // Registers the maximum region-end associated with a set of + // regions with CM. Again this is used to determine whether any + // heap regions are located above the finger. + void register_collection_set_finger(HeapWord* max_finger) { + // max_finger is the highest heap region end of the regions currently + // contained in the collection set. If this value is larger than + // _min_finger then we need to gray objects. + // This routine is like registerCSetRegion but for an entire + // collection of regions. + if (max_finger > _min_finger) + _should_gray_objects = true; + } + // Returns "true" if at least one mark has been completed. bool at_least_one_mark_complete() { return _at_least_one_mark_complete; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp Wed May 05 16:39:47 2010 -0700 @@ -42,8 +42,8 @@ private: ConcurrentMark* _cm; - bool _started; - bool _in_progress; + volatile bool _started; + volatile bool _in_progress; void sleepBeforeNextCycle(); @@ -67,15 +67,25 @@ // Counting virtual time so far. double vtime_count_accum() { return _vtime_count_accum; } - ConcurrentMark* cm() { return _cm; } + ConcurrentMark* cm() { return _cm; } + + void set_started() { _started = true; } + void clear_started() { _started = false; } + bool started() { return _started; } + + void set_in_progress() { _in_progress = true; } + void clear_in_progress() { _in_progress = false; } + bool in_progress() { return _in_progress; } - void set_started() { _started = true; } - void clear_started() { _started = false; } - bool started() { return _started; } - - void set_in_progress() { _in_progress = true; } - void clear_in_progress() { _in_progress = false; } - bool in_progress() { return _in_progress; } + // This flag returns true from the moment a marking cycle is + // initiated (during the initial-mark pause when started() is set) + // to the moment when the cycle completes (just after the next + // marking bitmap has been cleared and in_progress() is + // cleared). While this flag is true we will not start another cycle + // so that cycles do not overlap. We cannot use just in_progress() + // as the CM thread might take some time to wake up before noticing + // that started() is set and set in_progress(). + bool during_cycle() { return started() || in_progress(); } // Yield for GC void yield(); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp --- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Wed May 05 16:39:47 2010 -0700 @@ -517,7 +517,7 @@ assert(blk_start != NULL && blk_end > blk_start, "phantom block"); assert(blk_end > threshold, "should be past threshold"); - assert(blk_start <= threshold, "blk_start should be at or before threshold") + assert(blk_start <= threshold, "blk_start should be at or before threshold"); assert(pointer_delta(threshold, blk_start) <= N_words, "offset should be <= BlockOffsetSharedArray::N"); assert(Universe::heap()->is_in_reserved(blk_start), diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ // turn it on so that the contents of the young list (scan-only / // to-be-collected) are printed at "strategic" points before / during // / after the collection --- this is useful for debugging -#define SCAN_ONLY_VERBOSE 0 +#define YOUNG_LIST_VERBOSE 0 // CURRENT STATUS // This file is under construction. Search for "FIXME". @@ -133,8 +133,7 @@ YoungList::YoungList(G1CollectedHeap* g1h) : _g1h(g1h), _head(NULL), - _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL), - _length(0), _scan_only_length(0), + _length(0), _last_sampled_rs_lengths(0), _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) { @@ -166,48 +165,6 @@ ++_survivor_length; } -HeapRegion* YoungList::pop_region() { - while (_head != NULL) { - assert( length() > 0, "list should not be empty" ); - HeapRegion* ret = _head; - _head = ret->get_next_young_region(); - ret->set_next_young_region(NULL); - --_length; - assert(ret->is_young(), "region should be very young"); - - // Replace 'Survivor' region type with 'Young'. So the region will - // be treated as a young region and will not be 'confused' with - // newly created survivor regions. - if (ret->is_survivor()) { - ret->set_young(); - } - - if (!ret->is_scan_only()) { - return ret; - } - - // scan-only, we'll add it to the scan-only list - if (_scan_only_tail == NULL) { - guarantee( _scan_only_head == NULL, "invariant" ); - - _scan_only_head = ret; - _curr_scan_only = ret; - } else { - guarantee( _scan_only_head != NULL, "invariant" ); - _scan_only_tail->set_next_young_region(ret); - } - guarantee( ret->get_next_young_region() == NULL, "invariant" ); - _scan_only_tail = ret; - - // no need to be tagged as scan-only any more - ret->set_young(); - - ++_scan_only_length; - } - assert( length() == 0, "list should be empty" ); - return NULL; -} - void YoungList::empty_list(HeapRegion* list) { while (list != NULL) { HeapRegion* next = list->get_next_young_region(); @@ -225,12 +182,6 @@ _head = NULL; _length = 0; - empty_list(_scan_only_head); - _scan_only_head = NULL; - _scan_only_tail = NULL; - _scan_only_length = 0; - _curr_scan_only = NULL; - empty_list(_survivor_head); _survivor_head = NULL; _survivor_tail = NULL; @@ -248,11 +199,11 @@ HeapRegion* curr = _head; HeapRegion* last = NULL; while (curr != NULL) { - if (!curr->is_young() || curr->is_scan_only()) { + if (!curr->is_young()) { gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" " - "incorrectly tagged (%d, %d)", + "incorrectly tagged (y: %d, surv: %d)", curr->bottom(), curr->end(), - curr->is_young(), curr->is_scan_only()); + curr->is_young(), curr->is_survivor()); ret = false; } ++length; @@ -267,47 +218,10 @@ length, _length); } - bool scan_only_ret = true; - length = 0; - curr = _scan_only_head; - last = NULL; - while (curr != NULL) { - if (!curr->is_young() || curr->is_scan_only()) { - gclog_or_tty->print_cr("### SCAN-ONLY REGION "PTR_FORMAT"-"PTR_FORMAT" " - "incorrectly tagged (%d, %d)", - curr->bottom(), curr->end(), - curr->is_young(), curr->is_scan_only()); - scan_only_ret = false; - } - ++length; - last = curr; - curr = curr->get_next_young_region(); - } - scan_only_ret = scan_only_ret && (length == _scan_only_length); - - if ( (last != _scan_only_tail) || - (_scan_only_head == NULL && _scan_only_tail != NULL) || - (_scan_only_head != NULL && _scan_only_tail == NULL) ) { - gclog_or_tty->print_cr("## _scan_only_tail is set incorrectly"); - scan_only_ret = false; - } - - if (_curr_scan_only != NULL && _curr_scan_only != _scan_only_head) { - gclog_or_tty->print_cr("### _curr_scan_only is set incorrectly"); - scan_only_ret = false; - } - - if (!scan_only_ret) { - gclog_or_tty->print_cr("### SCAN-ONLY LIST seems not well formed!"); - gclog_or_tty->print_cr("### list has %d entries, _scan_only_length is %d", - length, _scan_only_length); - } - - return ret && scan_only_ret; + return ret; } -bool YoungList::check_list_empty(bool ignore_scan_only_list, - bool check_sample) { +bool YoungList::check_list_empty(bool check_sample) { bool ret = true; if (_length != 0) { @@ -327,28 +241,7 @@ gclog_or_tty->print_cr("### YOUNG LIST does not seem empty"); } - if (ignore_scan_only_list) - return ret; - - bool scan_only_ret = true; - if (_scan_only_length != 0) { - gclog_or_tty->print_cr("### SCAN-ONLY LIST should have 0 length, not %d", - _scan_only_length); - scan_only_ret = false; - } - if (_scan_only_head != NULL) { - gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL head"); - scan_only_ret = false; - } - if (_scan_only_tail != NULL) { - gclog_or_tty->print_cr("### SCAN-ONLY LIST does not have a NULL tail"); - scan_only_ret = false; - } - if (!scan_only_ret) { - gclog_or_tty->print_cr("### SCAN-ONLY LIST does not seem empty"); - } - - return ret && scan_only_ret; + return ret; } void @@ -365,7 +258,18 @@ void YoungList::rs_length_sampling_next() { assert( _curr != NULL, "invariant" ); - _sampled_rs_lengths += _curr->rem_set()->occupied(); + size_t rs_length = _curr->rem_set()->occupied(); + + _sampled_rs_lengths += rs_length; + + // The current region may not yet have been added to the + // incremental collection set (it gets added when it is + // retired as the current allocation region). + if (_curr->in_collection_set()) { + // Update the collection set policy information for this region + _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length); + } + _curr = _curr->get_next_young_region(); if (_curr == NULL) { _last_sampled_rs_lengths = _sampled_rs_lengths; @@ -375,54 +279,46 @@ void YoungList::reset_auxilary_lists() { - // We could have just "moved" the scan-only list to the young list. - // However, the scan-only list is ordered according to the region - // age in descending order, so, by moving one entry at a time, we - // ensure that it is recreated in ascending order. - guarantee( is_empty(), "young list should be empty" ); assert(check_list_well_formed(), "young list should be well formed"); // Add survivor regions to SurvRateGroup. _g1h->g1_policy()->note_start_adding_survivor_regions(); _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */); + for (HeapRegion* curr = _survivor_head; curr != NULL; curr = curr->get_next_young_region()) { _g1h->g1_policy()->set_region_survivors(curr); + + // The region is a non-empty survivor so let's add it to + // the incremental collection set for the next evacuation + // pause. + _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); } _g1h->g1_policy()->note_stop_adding_survivor_regions(); + _head = _survivor_head; + _length = _survivor_length; if (_survivor_head != NULL) { - _head = _survivor_head; - _length = _survivor_length + _scan_only_length; - _survivor_tail->set_next_young_region(_scan_only_head); - } else { - _head = _scan_only_head; - _length = _scan_only_length; - } - - for (HeapRegion* curr = _scan_only_head; - curr != NULL; - curr = curr->get_next_young_region()) { - curr->recalculate_age_in_surv_rate_group(); - } - _scan_only_head = NULL; - _scan_only_tail = NULL; - _scan_only_length = 0; - _curr_scan_only = NULL; - - _survivor_head = NULL; - _survivor_tail = NULL; - _survivor_length = 0; + assert(_survivor_tail != NULL, "cause it shouldn't be"); + assert(_survivor_length > 0, "invariant"); + _survivor_tail->set_next_young_region(NULL); + } + + // Don't clear the survivor list handles until the start of + // the next evacuation pause - we need it in order to re-tag + // the survivor regions from this evacuation pause as 'young' + // at the start of the next. + _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */); assert(check_list_well_formed(), "young list should be well formed"); } void YoungList::print() { - HeapRegion* lists[] = {_head, _scan_only_head, _survivor_head}; - const char* names[] = {"YOUNG", "SCAN-ONLY", "SURVIVOR"}; + HeapRegion* lists[] = {_head, _survivor_head}; + const char* names[] = {"YOUNG", "SURVIVOR"}; for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); @@ -431,7 +327,7 @@ gclog_or_tty->print_cr(" empty"); while (curr != NULL) { gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " - "age: %4d, y: %d, s-o: %d, surv: %d", + "age: %4d, y: %d, surv: %d", curr->bottom(), curr->end(), curr->top(), curr->prev_top_at_mark_start(), @@ -439,7 +335,6 @@ curr->top_at_conc_mark_count(), curr->age_in_surv_rate_group_cond(), curr->is_young(), - curr->is_scan_only(), curr->is_survivor()); curr = curr->get_next_young_region(); } @@ -707,6 +602,12 @@ // region below. if (_cur_alloc_region != NULL) { // We're finished with the _cur_alloc_region. + // As we're builing (at least the young portion) of the collection + // set incrementally we'll add the current allocation region to + // the collection set here. + if (_cur_alloc_region->is_young()) { + g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); + } _summary_bytes_used += _cur_alloc_region->used(); _cur_alloc_region = NULL; } @@ -820,6 +721,12 @@ _free_regions++; free_region(_cur_alloc_region); } else { + // As we're builing (at least the young portion) of the collection + // set incrementally we'll add the current allocation region to + // the collection set here. + if (_cur_alloc_region->is_young()) { + g1_policy()->add_region_to_incremental_cset_lhs(_cur_alloc_region); + } _summary_bytes_used += _cur_alloc_region->used(); } _cur_alloc_region = NULL; @@ -902,6 +809,10 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, size_t word_size) { + if (GC_locker::check_active_before_gc()) { + return; // GC is disabled (e.g. JNI GetXXXCritical operation) + } + ResourceMark rm; if (PrintHeapAtGC) { @@ -909,16 +820,16 @@ } if (full && DisableExplicitGC) { - gclog_or_tty->print("\n\n\nDisabling Explicit GC\n\n\n"); return; } assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); - if (GC_locker::is_active()) { - return; // GC is disabled (e.g. JNI GetXXXCritical operation) - } + const bool do_clear_all_soft_refs = clear_all_soft_refs || + collector_policy()->should_clear_all_soft_refs(); + + ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); { IsGCActiveMark x; @@ -926,7 +837,8 @@ // Timing gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty); + TraceTime t(full ? "Full GC (System.gc())" : "Full GC", + PrintGC, true, gclog_or_tty); TraceMemoryManagerStats tms(true /* fullGC */); @@ -970,6 +882,15 @@ g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); tear_down_region_lists(); set_used_regions_to_need_zero_fill(); + + // We may have added regions to the current incremental collection + // set between the last GC or pause and now. We need to clear the + // incremental collection set and then start rebuilding it afresh + // after this full GC. + abandon_collection_set(g1_policy()->inc_cset_head()); + g1_policy()->clear_incremental_cset(); + g1_policy()->stop_incremental_cset_building(); + if (g1_policy()->in_young_gc_mode()) { empty_young_list(); g1_policy()->set_full_young_gcs(true); @@ -985,12 +906,12 @@ ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); ref_processor()->enable_discovery(); - ref_processor()->setup_policy(clear_all_soft_refs); + ref_processor()->setup_policy(do_clear_all_soft_refs); // Do collection work { HandleMark hm; // Discard invalid handles created during gc - G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); + G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs); } // Because freeing humongous regions may have added some unclean // regions, it is necessary to tear down again before rebuilding. @@ -1053,6 +974,15 @@ perm()->compute_new_size(); } + // Start a new incremental collection set for the next pause + assert(g1_policy()->collection_set() == NULL, "must be"); + g1_policy()->start_incremental_cset_building(); + + // Clear the _cset_fast_test bitmap in anticipation of adding + // regions to the incremental collection set for the next + // evacuation pause. + clear_cset_fast_test(); + double end = os::elapsedTime(); g1_policy()->record_full_collection_end(); @@ -1071,7 +1001,9 @@ if (g1_policy()->in_young_gc_mode()) { _young_list->reset_sampled_info(); - assert( check_young_list_empty(false, false), + // At this point there should be no regions in the + // entire heap tagged as young. + assert( check_young_list_empty(true /* check_heap */), "young list should be empty at this point"); } @@ -1208,6 +1140,9 @@ return result; } + assert(!collector_policy()->should_clear_all_soft_refs(), + "Flag should have been handled and cleared prior to this point"); + // What else? We might try synchronous finalization later. If the total // space available is large enough for the allocation, then a more // complete compaction phase than we've tried so far might be @@ -1565,6 +1500,20 @@ _g1h = this; + _in_cset_fast_test_length = max_regions(); + _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); + + // We're biasing _in_cset_fast_test to avoid subtracting the + // beginning of the heap every time we want to index; basically + // it's the same with what we do with the card table. + _in_cset_fast_test = _in_cset_fast_test_base - + ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); + + // Clear the _cset_fast_test bitmap in anticipation of adding + // regions to the incremental collection set for the first + // evacuation pause. + clear_cset_fast_test(); + // Create the ConcurrentMark data structure and thread. // (Must do this late, so that "max_regions" is defined.) _cm = new ConcurrentMark(heap_rs, (int) max_regions()); @@ -2185,8 +2134,10 @@ assert(o != NULL, "Huh?"); if (!_g1h->is_obj_dead_cond(o, _use_prev_marking)) { o->oop_iterate(&isLive); - if (!_hr->obj_allocated_since_prev_marking(o)) - _live_bytes += (o->size() * HeapWordSize); + if (!_hr->obj_allocated_since_prev_marking(o)) { + size_t obj_size = o->size(); // Make sure we don't overflow + _live_bytes += (obj_size * HeapWordSize); + } } } size_t live_bytes() { return _live_bytes; } @@ -2388,8 +2339,8 @@ print_on(gclog_or_tty, true /* extended */); gclog_or_tty->print_cr(""); if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) { - concurrent_mark()->print_reachable(use_prev_marking, - "failed-verification"); + concurrent_mark()->print_reachable("at-verification-failure", + use_prev_marking, false /* all */); } gclog_or_tty->flush(); } @@ -2658,6 +2609,10 @@ void G1CollectedHeap::do_collection_pause_at_safepoint() { + if (GC_locker::check_active_before_gc()) { + return; // GC is disabled (e.g. JNI GetXXXCritical operation) + } + if (PrintHeapAtGC) { Universe::print_heap_before_gc(); } @@ -2665,6 +2620,11 @@ { ResourceMark rm; + // This call will decide whether this pause is an initial-mark + // pause. If it is, during_initial_mark_pause() will return true + // for the duration of this pause. + g1_policy()->decide_on_conc_mark_initiation(); + char verbose_str[128]; sprintf(verbose_str, "GC pause "); if (g1_policy()->in_young_gc_mode()) { @@ -2673,7 +2633,7 @@ else strcat(verbose_str, "(partial)"); } - if (g1_policy()->should_initiate_conc_mark()) + if (g1_policy()->during_initial_mark_pause()) strcat(verbose_str, " (initial-mark)"); // if PrintGCDetails is on, we'll print long statistics information @@ -2697,10 +2657,6 @@ "young list should be well formed"); } - if (GC_locker::is_active()) { - return; // GC is disabled (e.g. JNI GetXXXCritical operation) - } - bool abandoned = false; { // Call to jvmpi::post_class_unload_events must occur outside of active GC IsGCActiveMark x; @@ -2736,27 +2692,21 @@ double start_time_sec = os::elapsedTime(); size_t start_used_bytes = used(); +#if YOUNG_LIST_VERBOSE + gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); + _young_list->print(); + g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); +#endif // YOUNG_LIST_VERBOSE + g1_policy()->record_collection_pause_start(start_time_sec, start_used_bytes); - guarantee(_in_cset_fast_test == NULL, "invariant"); - guarantee(_in_cset_fast_test_base == NULL, "invariant"); - _in_cset_fast_test_length = max_regions(); - _in_cset_fast_test_base = - NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); - memset(_in_cset_fast_test_base, false, - _in_cset_fast_test_length * sizeof(bool)); - // We're biasing _in_cset_fast_test to avoid subtracting the - // beginning of the heap every time we want to index; basically - // it's the same with what we do with the card table. - _in_cset_fast_test = _in_cset_fast_test_base - - ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); - -#if SCAN_ONLY_VERBOSE +#if YOUNG_LIST_VERBOSE + gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); _young_list->print(); -#endif // SCAN_ONLY_VERBOSE - - if (g1_policy()->should_initiate_conc_mark()) { +#endif // YOUNG_LIST_VERBOSE + + if (g1_policy()->during_initial_mark_pause()) { concurrent_mark()->checkpointRootsInitialPre(); } save_marks(); @@ -2781,12 +2731,15 @@ if (mark_in_progress()) concurrent_mark()->newCSet(); - // Now choose the CS. - g1_policy()->choose_collection_set(); - - // We may abandon a pause if we find no region that will fit in the MMU - // pause. - bool abandoned = (g1_policy()->collection_set() == NULL); +#if YOUNG_LIST_VERBOSE + gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); + _young_list->print(); + g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); +#endif // YOUNG_LIST_VERBOSE + + // Now choose the CS. We may abandon a pause if we find no + // region that will fit in the MMU pause. + bool abandoned = g1_policy()->choose_collection_set(); // Nothing to do if we were unable to choose a collection set. if (!abandoned) { @@ -2804,40 +2757,64 @@ // Actually do the work... evacuate_collection_set(); + free_collection_set(g1_policy()->collection_set()); g1_policy()->clear_collection_set(); - FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); - // this is more for peace of mind; we're nulling them here and - // we're expecting them to be null at the beginning of the next GC - _in_cset_fast_test = NULL; - _in_cset_fast_test_base = NULL; - cleanup_surviving_young_words(); + // Start a new incremental collection set for the next pause. + g1_policy()->start_incremental_cset_building(); + + // Clear the _cset_fast_test bitmap in anticipation of adding + // regions to the incremental collection set for the next + // evacuation pause. + clear_cset_fast_test(); + if (g1_policy()->in_young_gc_mode()) { _young_list->reset_sampled_info(); - assert(check_young_list_empty(true), - "young list should be empty"); - -#if SCAN_ONLY_VERBOSE + + // Don't check the whole heap at this point as the + // GC alloc regions from this pause have been tagged + // as survivors and moved on to the survivor list. + // Survivor regions will fail the !is_young() check. + assert(check_young_list_empty(false /* check_heap */), + "young list should be empty"); + +#if YOUNG_LIST_VERBOSE + gclog_or_tty->print_cr("Before recording survivors.\nYoung List:"); _young_list->print(); -#endif // SCAN_ONLY_VERBOSE +#endif // YOUNG_LIST_VERBOSE g1_policy()->record_survivor_regions(_young_list->survivor_length(), _young_list->first_survivor_region(), _young_list->last_survivor_region()); + _young_list->reset_auxilary_lists(); } } else { - if (_in_cset_fast_test != NULL) { - assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't"); - FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); - // this is more for peace of mind; we're nulling them here and - // we're expecting them to be null at the beginning of the next GC - _in_cset_fast_test = NULL; - _in_cset_fast_test_base = NULL; - } + // We have abandoned the current collection. This can only happen + // if we're not doing young or partially young collections, and + // we didn't find an old region that we're able to collect within + // the allowed time. + + assert(g1_policy()->collection_set() == NULL, "should be"); + assert(_young_list->length() == 0, "because it should be"); + + // This should be a no-op. + abandon_collection_set(g1_policy()->inc_cset_head()); + + g1_policy()->clear_incremental_cset(); + g1_policy()->stop_incremental_cset_building(); + + // Start a new incremental collection set for the next pause. + g1_policy()->start_incremental_cset_building(); + + // Clear the _cset_fast_test bitmap in anticipation of adding + // regions to the incremental collection set for the next + // evacuation pause. + clear_cset_fast_test(); + // This looks confusing, because the DPT should really be empty // at this point -- since we have not done any collection work, // there should not be any derived pointers in the table to update; @@ -2858,7 +2835,7 @@ } if (g1_policy()->in_young_gc_mode() && - g1_policy()->should_initiate_conc_mark()) { + g1_policy()->during_initial_mark_pause()) { concurrent_mark()->checkpointRootsInitialPost(); set_marking_started(); // CAUTION: after the doConcurrentMark() call below, @@ -2871,9 +2848,11 @@ doConcurrentMark(); } -#if SCAN_ONLY_VERBOSE +#if YOUNG_LIST_VERBOSE + gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:"); _young_list->print(); -#endif // SCAN_ONLY_VERBOSE + g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); +#endif // YOUNG_LIST_VERBOSE double end_time_sec = os::elapsedTime(); double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; @@ -2931,12 +2910,34 @@ } } +size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) +{ + size_t gclab_word_size; + switch (purpose) { + case GCAllocForSurvived: + gclab_word_size = YoungPLABSize; + break; + case GCAllocForTenured: + gclab_word_size = OldPLABSize; + break; + default: + assert(false, "unknown GCAllocPurpose"); + gclab_word_size = OldPLABSize; + break; + } + return gclab_word_size; +} + + void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); // make sure we don't call set_gc_alloc_region() multiple times on // the same region assert(r == NULL || !r->is_gc_alloc_region(), "shouldn't already be a GC alloc region"); + assert(r == NULL || !r->isHumongous(), + "humongous regions shouldn't be used as GC alloc regions"); + HeapWord* original_top = NULL; if (r != NULL) original_top = r->top(); @@ -3079,12 +3080,17 @@ if (alloc_region->in_collection_set() || alloc_region->top() == alloc_region->end() || - alloc_region->top() == alloc_region->bottom()) { - // we will discard the current GC alloc region if it's in the - // collection set (it can happen!), if it's already full (no - // point in using it), or if it's empty (this means that it - // was emptied during a cleanup and it should be on the free - // list now). + alloc_region->top() == alloc_region->bottom() || + alloc_region->isHumongous()) { + // we will discard the current GC alloc region if + // * it's in the collection set (it can happen!), + // * it's already full (no point in using it), + // * it's empty (this means that it was emptied during + // a cleanup and it should be on the free list now), or + // * it's humongous (this means that it was emptied + // during a cleanup and was added to the free list, but + // has been subseqently used to allocate a humongous + // object that may be less than the region size). alloc_region = NULL; } @@ -3096,6 +3102,11 @@ } else { // the region was retained from the last collection ++_gc_alloc_region_counts[ap]; + if (G1PrintHeapRegions) { + gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], " + "top "PTR_FORMAT, + alloc_region->hrs_index(), alloc_region->bottom(), alloc_region->end(), alloc_region->top()); + } } if (alloc_region != NULL) { @@ -3652,6 +3663,8 @@ _g1_rem(g1h->g1_rem_set()), _hash_seed(17), _queue_num(queue_num), _term_attempts(0), + _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)), + _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)), _age_table(false), #if G1_DETAILED_STATS _pushes(0), _pops(0), _steals(0), @@ -3678,6 +3691,9 @@ _overflowed_refs = new OverflowQueue(10); + _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer; + _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer; + _start = os::elapsedTime(); } @@ -3975,16 +3991,13 @@ OopsInHeapRegionClosure *scan_root_cl; OopsInHeapRegionClosure *scan_perm_cl; - OopsInHeapRegionClosure *scan_so_cl; - - if (_g1h->g1_policy()->should_initiate_conc_mark()) { + + if (_g1h->g1_policy()->during_initial_mark_pause()) { scan_root_cl = &scan_mark_root_cl; scan_perm_cl = &scan_mark_perm_cl; - scan_so_cl = &scan_mark_heap_rs_cl; } else { scan_root_cl = &only_scan_root_cl; scan_perm_cl = &only_scan_perm_cl; - scan_so_cl = &only_scan_heap_rs_cl; } pss.start_strong_roots(); @@ -3992,7 +4005,6 @@ SharedHeap::SO_AllClasses, scan_root_cl, &push_heap_rs_cl, - scan_so_cl, scan_perm_cl, i); pss.end_strong_roots(); @@ -4054,7 +4066,6 @@ SharedHeap::ScanningOption so, OopClosure* scan_non_heap_roots, OopsInHeapRegionClosure* scan_rs, - OopsInHeapRegionClosure* scan_so, OopsInGenClosure* scan_perm, int worker_i) { // First scan the strong roots, including the perm gen. @@ -4074,6 +4085,7 @@ &buf_scan_non_heap_roots, &eager_scan_code_roots, &buf_scan_perm); + // Finish up any enqueued closure apps. buf_scan_non_heap_roots.done(); buf_scan_perm.done(); @@ -4096,9 +4108,6 @@ // XXX What should this be doing in the parallel case? g1_policy()->record_collection_pause_end_CH_strong_roots(); - if (scan_so != NULL) { - scan_scan_only_set(scan_so, worker_i); - } // Now scan the complement of the collection set. if (scan_rs != NULL) { g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i); @@ -4112,54 +4121,6 @@ } void -G1CollectedHeap::scan_scan_only_region(HeapRegion* r, - OopsInHeapRegionClosure* oc, - int worker_i) { - HeapWord* startAddr = r->bottom(); - HeapWord* endAddr = r->used_region().end(); - - oc->set_region(r); - - HeapWord* p = r->bottom(); - HeapWord* t = r->top(); - guarantee( p == r->next_top_at_mark_start(), "invariant" ); - while (p < t) { - oop obj = oop(p); - p += obj->oop_iterate(oc); - } -} - -void -G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc, - int worker_i) { - double start = os::elapsedTime(); - - BufferingOopsInHeapRegionClosure boc(oc); - - FilterInHeapRegionAndIntoCSClosure scan_only(this, &boc); - FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark()); - - OopsInHeapRegionClosure *foc; - if (g1_policy()->should_initiate_conc_mark()) - foc = &scan_and_mark; - else - foc = &scan_only; - - HeapRegion* hr; - int n = 0; - while ((hr = _young_list->par_get_next_scan_only_region()) != NULL) { - scan_scan_only_region(hr, foc, worker_i); - ++n; - } - boc.done(); - - double closure_app_s = boc.closure_app_seconds(); - g1_policy()->record_obj_copy_time(worker_i, closure_app_s * 1000.0); - double ms = (os::elapsedTime() - start - closure_app_s)*1000.0; - g1_policy()->record_scan_only_time(worker_i, ms, n); -} - -void G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, OopClosure* non_root_closure) { CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); @@ -4357,17 +4318,14 @@ class G1ParCleanupCTTask : public AbstractGangTask { CardTableModRefBS* _ct_bs; G1CollectedHeap* _g1h; - HeapRegion* volatile _so_head; HeapRegion* volatile _su_head; public: G1ParCleanupCTTask(CardTableModRefBS* ct_bs, G1CollectedHeap* g1h, - HeapRegion* scan_only_list, HeapRegion* survivor_list) : AbstractGangTask("G1 Par Cleanup CT Task"), _ct_bs(ct_bs), _g1h(g1h), - _so_head(scan_only_list), _su_head(survivor_list) { } @@ -4376,14 +4334,13 @@ while (r = _g1h->pop_dirty_cards_region()) { clear_cards(r); } - // Redirty the cards of the scan-only and survivor regions. - dirty_list(&this->_so_head); + // Redirty the cards of the survivor regions. dirty_list(&this->_su_head); } void clear_cards(HeapRegion* r) { - // Cards for Survivor and Scan-Only regions will be dirtied later. - if (!r->is_scan_only() && !r->is_survivor()) { + // Cards for Survivor regions will be dirtied later. + if (!r->is_survivor()) { _ct_bs->clear(MemRegion(r->bottom(), r->end())); } } @@ -4416,7 +4373,7 @@ virtual bool doHeapRegion(HeapRegion* r) { MemRegion mr(r->bottom(), r->end()); - if (r->is_scan_only() || r->is_survivor()) { + if (r->is_survivor()) { _ct_bs->verify_dirty_region(mr); } else { _ct_bs->verify_clean_region(mr); @@ -4432,8 +4389,8 @@ // Iterate over the dirty cards region list. G1ParCleanupCTTask cleanup_task(ct_bs, this, - _young_list->first_scan_only_region(), _young_list->first_survivor_region()); + if (ParallelGCThreads > 0) { set_par_threads(workers()->total_workers()); workers()->run_task(&cleanup_task); @@ -4449,12 +4406,12 @@ } r->set_next_dirty_cards_region(NULL); } - // now, redirty the cards of the scan-only and survivor regions + // now, redirty the cards of the survivor regions // (it seemed faster to do it this way, instead of iterating over // all regions and then clearing / dirtying as appropriate) - dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region()); dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region()); } + double elapsed = os::elapsedTime() - start; g1_policy()->record_clear_ct_time( elapsed * 1000.0); #ifndef PRODUCT @@ -4475,6 +4432,11 @@ double young_time_ms = 0.0; double non_young_time_ms = 0.0; + // Since the collection set is a superset of the the young list, + // all we need to do to clear the young list is clear its + // head and length, and unlink any young regions in the code below + _young_list->clear(); + G1CollectorPolicy* policy = g1_policy(); double start_sec = os::elapsedTime(); @@ -4518,6 +4480,12 @@ guarantee( (size_t)index < policy->young_cset_length(), "invariant" ); size_t words_survived = _surviving_young_words[index]; cur->record_surv_words_in_group(words_survived); + + // At this point the we have 'popped' cur from the collection set + // (linked via next_in_collection_set()) but it is still in the + // young list (linked via next_young_region()). Clear the + // _next_young_region field. + cur->set_next_young_region(NULL); } else { int index = cur->young_index_in_cset(); guarantee( index == -1, "invariant" ); @@ -4533,7 +4501,6 @@ "Should not have empty regions in a CS."); free_region(cur); } else { - guarantee( !cur->is_scan_only(), "should not be scan only" ); cur->uninstall_surv_rate_group(); if (cur->is_young()) cur->set_young_index_in_cset(-1); @@ -4557,6 +4524,27 @@ policy->record_non_young_free_cset_time_ms(non_young_time_ms); } +// This routine is similar to the above but does not record +// any policy statistics or update free lists; we are abandoning +// the current incremental collection set in preparation of a +// full collection. After the full GC we will start to build up +// the incremental collection set again. +// This is only called when we're doing a full collection +// and is immediately followed by the tearing down of the young list. + +void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { + HeapRegion* cur = cs_head; + + while (cur != NULL) { + HeapRegion* next = cur->next_in_collection_set(); + assert(cur->in_collection_set(), "bad CS"); + cur->set_next_in_collection_set(NULL); + cur->set_in_collection_set(false); + cur->set_young_index_in_cset(-1); + cur = next; + } +} + HeapRegion* G1CollectedHeap::alloc_region_from_unclean_list_locked(bool zero_filled) { assert(ZF_mon->owned_by_self(), "Precondition"); @@ -4923,12 +4911,10 @@ bool success() { return _success; } }; -bool G1CollectedHeap::check_young_list_empty(bool ignore_scan_only_list, - bool check_sample) { - bool ret = true; - - ret = _young_list->check_list_empty(ignore_scan_only_list, check_sample); - if (!ignore_scan_only_list) { +bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) { + bool ret = _young_list->check_list_empty(check_sample); + + if (check_heap) { NoYoungRegionsClosure closure; heap_region_iterate(&closure); ret = ret && closure.success(); @@ -4979,7 +4965,7 @@ MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); while (pop_unclean_region_list_locked() != NULL) ; assert(_unclean_region_list.hd() == NULL && _unclean_region_list.sz() == 0, - "Postconditions of loop.") + "Postconditions of loop."); while (pop_free_region_list_locked() != NULL) ; assert(_free_region_list == NULL, "Postcondition of loop."); if (_free_region_list_size != 0) { diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -81,33 +81,29 @@ HeapRegion* _head; - HeapRegion* _scan_only_head; - HeapRegion* _scan_only_tail; + HeapRegion* _survivor_head; + HeapRegion* _survivor_tail; + + HeapRegion* _curr; + size_t _length; - size_t _scan_only_length; + size_t _survivor_length; size_t _last_sampled_rs_lengths; size_t _sampled_rs_lengths; - HeapRegion* _curr; - HeapRegion* _curr_scan_only; - HeapRegion* _survivor_head; - HeapRegion* _survivor_tail; - size_t _survivor_length; - - void empty_list(HeapRegion* list); + void empty_list(HeapRegion* list); public: YoungList(G1CollectedHeap* g1h); - void push_region(HeapRegion* hr); - void add_survivor_region(HeapRegion* hr); - HeapRegion* pop_region(); - void empty_list(); - bool is_empty() { return _length == 0; } - size_t length() { return _length; } - size_t scan_only_length() { return _scan_only_length; } - size_t survivor_length() { return _survivor_length; } + void push_region(HeapRegion* hr); + void add_survivor_region(HeapRegion* hr); + + void empty_list(); + bool is_empty() { return _length == 0; } + size_t length() { return _length; } + size_t survivor_length() { return _survivor_length; } void rs_length_sampling_init(); bool rs_length_sampling_more(); @@ -120,22 +116,21 @@ // for development purposes void reset_auxilary_lists(); + void clear() { _head = NULL; _length = 0; } + + void clear_survivors() { + _survivor_head = NULL; + _survivor_tail = NULL; + _survivor_length = 0; + } + HeapRegion* first_region() { return _head; } - HeapRegion* first_scan_only_region() { return _scan_only_head; } HeapRegion* first_survivor_region() { return _survivor_head; } HeapRegion* last_survivor_region() { return _survivor_tail; } - HeapRegion* par_get_next_scan_only_region() { - MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); - HeapRegion* ret = _curr_scan_only; - if (ret != NULL) - _curr_scan_only = ret->get_next_young_region(); - return ret; - } // debugging bool check_list_well_formed(); - bool check_list_empty(bool ignore_scan_only_list, - bool check_sample = true); + bool check_list_empty(bool check_sample = true); void print(); }; @@ -232,6 +227,9 @@ // current collection. HeapRegion* _gc_alloc_region_list; + // Determines PLAB size for a particular allocation purpose. + static size_t desired_plab_sz(GCAllocPurpose purpose); + // When called by par thread, require par_alloc_during_gc_lock() to be held. void push_gc_alloc_region(HeapRegion* hr); @@ -402,8 +400,7 @@ assert(_in_cset_fast_test_base != NULL, "sanity"); assert(r->in_collection_set(), "invariant"); int index = r->hrs_index(); - assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length, - "invariant"); + assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant"); assert(!_in_cset_fast_test_base[index], "invariant"); _in_cset_fast_test_base[index] = true; } @@ -428,6 +425,12 @@ } } + void clear_cset_fast_test() { + assert(_in_cset_fast_test_base != NULL, "sanity"); + memset(_in_cset_fast_test_base, false, + _in_cset_fast_test_length * sizeof(bool)); + } + protected: // Shrink the garbage-first heap by at most the given size (in bytes!). @@ -473,6 +476,10 @@ // regions. void free_collection_set(HeapRegion* cs_head); + // Abandon the current collection set without recording policy + // statistics or updating free lists. + void abandon_collection_set(HeapRegion* cs_head); + // Applies "scan_non_heap_roots" to roots outside the heap, // "scan_rs" to roots inside the heap (having done "set_region" to // indicate the region in which the root resides), and does "scan_perm" @@ -485,16 +492,9 @@ SharedHeap::ScanningOption so, OopClosure* scan_non_heap_roots, OopsInHeapRegionClosure* scan_rs, - OopsInHeapRegionClosure* scan_so, OopsInGenClosure* scan_perm, int worker_i); - void scan_scan_only_set(OopsInHeapRegionClosure* oc, - int worker_i); - void scan_scan_only_region(HeapRegion* hr, - OopsInHeapRegionClosure* oc, - int worker_i); - // Apply "blk" to all the weak roots of the system. These include // JNI weak roots, the code cache, system dictionary, symbol table, // string table, and referents of reachable weak refs. @@ -1133,36 +1133,14 @@ void set_region_short_lived_locked(HeapRegion* hr); // add appropriate methods for any other surv rate groups - void young_list_rs_length_sampling_init() { - _young_list->rs_length_sampling_init(); - } - bool young_list_rs_length_sampling_more() { - return _young_list->rs_length_sampling_more(); - } - void young_list_rs_length_sampling_next() { - _young_list->rs_length_sampling_next(); - } - size_t young_list_sampled_rs_lengths() { - return _young_list->sampled_rs_lengths(); - } - - size_t young_list_length() { return _young_list->length(); } - size_t young_list_scan_only_length() { - return _young_list->scan_only_length(); } - - HeapRegion* pop_region_from_young_list() { - return _young_list->pop_region(); - } - - HeapRegion* young_list_first_region() { - return _young_list->first_region(); - } + YoungList* young_list() { return _young_list; } // debugging bool check_young_list_well_formed() { return _young_list->check_list_well_formed(); } - bool check_young_list_empty(bool ignore_scan_only_list, + + bool check_young_list_empty(bool check_heap, bool check_sample = true); // *** Stuff related to concurrent marking. It's not clear to me that so @@ -1367,12 +1345,18 @@ return BitsPerWord << shifter(); } - static size_t gclab_word_size() { - return G1ParallelGCAllocBufferSize / HeapWordSize; + size_t gclab_word_size() const { + return _gclab_word_size; } - static size_t bitmap_size_in_bits() { - size_t bits_in_bitmap = gclab_word_size() >> shifter(); + // Calculates actual GCLab size in words + size_t gclab_real_word_size() const { + return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word)) + / BitsPerWord; + } + + static size_t bitmap_size_in_bits(size_t gclab_word_size) { + size_t bits_in_bitmap = gclab_word_size >> shifter(); // We are going to ensure that the beginning of a word in this // bitmap also corresponds to the beginning of a word in the // global marking bitmap. To handle the case where a GCLab @@ -1382,13 +1366,13 @@ return bits_in_bitmap + BitsPerWord - 1; } public: - GCLabBitMap(HeapWord* heap_start) - : BitMap(bitmap_size_in_bits()), + GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size) + : BitMap(bitmap_size_in_bits(gclab_word_size)), _cm(G1CollectedHeap::heap()->concurrent_mark()), _shifter(shifter()), _bitmap_word_covers_words(bitmap_word_covers_words()), _heap_start(heap_start), - _gclab_word_size(gclab_word_size()), + _gclab_word_size(gclab_word_size), _real_start_word(NULL), _real_end_word(NULL), _start_word(NULL) @@ -1483,7 +1467,7 @@ mark_bitmap->mostly_disjoint_range_union(this, 0, // always start from the start of the bitmap _start_word, - size_in_words()); + gclab_real_word_size()); _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); #ifndef PRODUCT @@ -1495,9 +1479,10 @@ } } - static size_t bitmap_size_in_words() { - return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord; + size_t bitmap_size_in_words() const { + return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord; } + }; class G1ParGCAllocBuffer: public ParGCAllocBuffer { @@ -1507,10 +1492,10 @@ GCLabBitMap _bitmap; public: - G1ParGCAllocBuffer() : - ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize), + G1ParGCAllocBuffer(size_t gclab_word_size) : + ParGCAllocBuffer(gclab_word_size), _during_marking(G1CollectedHeap::heap()->mark_in_progress()), - _bitmap(G1CollectedHeap::heap()->reserved_region().start()), + _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size), _retired(false) { } @@ -1549,8 +1534,10 @@ typedef GrowableArray OverflowQueue; OverflowQueue* _overflowed_refs; - G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount]; - ageTable _age_table; + G1ParGCAllocBuffer _surviving_alloc_buffer; + G1ParGCAllocBuffer _tenured_alloc_buffer; + G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount]; + ageTable _age_table; size_t _alloc_buffer_waste; size_t _undo_waste; @@ -1619,7 +1606,7 @@ ageTable* age_table() { return &_age_table; } G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { - return &_alloc_buffers[purpose]; + return _alloc_buffers[purpose]; } size_t alloc_buffer_waste() { return _alloc_buffer_waste; } @@ -1684,15 +1671,15 @@ HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { HeapWord* obj = NULL; - if (word_sz * 100 < - (size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) * - ParallelGCBufferWastePct) { + size_t gclab_word_size = _g1h->desired_plab_sz(purpose); + if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); + assert(gclab_word_size == alloc_buf->word_sz(), + "dynamic resizing is not supported"); add_to_alloc_buffer_waste(alloc_buf->words_remaining()); alloc_buf->retire(false, false); - HeapWord* buf = - _g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize); + HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); if (buf == NULL) return NULL; // Let caller handle allocation failure. // Otherwise. alloc_buf->set_buf(buf); @@ -1786,9 +1773,9 @@ void retire_alloc_buffers() { for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { - size_t waste = _alloc_buffers[ap].words_remaining(); + size_t waste = _alloc_buffers[ap]->words_remaining(); add_to_alloc_buffer_waste(waste); - _alloc_buffers[ap].retire(true, false); + _alloc_buffers[ap]->retire(true, false); } } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,10 +42,6 @@ 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 }; -static double cost_per_scan_only_region_ms_defaults[] = { - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 -}; - // all the same static double fully_young_cards_per_entry_ratio_defaults[] = { 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 @@ -125,7 +121,6 @@ _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)), _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_scan_only_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), _partially_young_cards_per_entry_ratio_seq( new TruncatedSeq(TruncatedSeqLength)), @@ -133,7 +128,6 @@ _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)), _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_scan_only_region_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)), _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)), _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)), _non_young_other_cost_per_region_ms_seq( @@ -178,14 +172,30 @@ // so the hack is to do the cast QQQ FIXME _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark), _n_marks_since_last_pause(0), - _conc_mark_initiated(false), - _should_initiate_conc_mark(false), + _initiate_conc_mark_if_possible(false), + _during_initial_mark_pause(false), _should_revert_to_full_young_gcs(false), _last_full_young_gc(false), _prev_collection_pause_used_at_end_bytes(0), _collection_set(NULL), + _collection_set_size(0), + _collection_set_bytes_used_before(0), + + // Incremental CSet attributes + _inc_cset_build_state(Inactive), + _inc_cset_head(NULL), + _inc_cset_tail(NULL), + _inc_cset_size(0), + _inc_cset_young_index(0), + _inc_cset_bytes_used_before(0), + _inc_cset_max_finger(NULL), + _inc_cset_recorded_young_bytes(0), + _inc_cset_recorded_rs_lengths(0), + _inc_cset_predicted_elapsed_time_ms(0.0), + _inc_cset_predicted_bytes_to_copy(0), + #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away #pragma warning( disable:4355 ) // 'this' : used in base member initializer list #endif // _MSC_VER @@ -198,7 +208,9 @@ _recorded_survivor_regions(0), _recorded_survivor_head(NULL), _recorded_survivor_tail(NULL), - _survivors_age_table(true) + _survivors_age_table(true), + + _gc_overhead_perc(0.0) { // Set up the region size and associated fields. Given that the @@ -207,13 +219,20 @@ HeapRegion::setup_heap_region_size(Arguments::min_heap_size()); HeapRegionRemSet::setup_remset_size(); + // Verify PLAB sizes + const uint region_size = HeapRegion::GrainWords; + if (YoungPLABSize > region_size || OldPLABSize > region_size) { + char buffer[128]; + jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u", + OldPLABSize > region_size ? "Old" : "Young", region_size); + vm_exit_during_initialization(buffer); + } + _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads]; _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads]; - _par_last_scan_only_times_ms = new double[_parallel_gc_threads]; - _par_last_scan_only_regions_scanned = new double[_parallel_gc_threads]; _par_last_update_rs_start_times_ms = new double[_parallel_gc_threads]; _par_last_update_rs_times_ms = new double[_parallel_gc_threads]; @@ -243,8 +262,6 @@ _pending_card_diff_seq->add(0.0); _rs_length_diff_seq->add(rs_length_diff_defaults[index]); _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]); - _cost_per_scan_only_region_ms_seq->add( - cost_per_scan_only_region_ms_defaults[index]); _fully_young_cards_per_entry_ratio_seq->add( fully_young_cards_per_entry_ratio_defaults[index]); _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]); @@ -272,9 +289,14 @@ // if G1FixedSurvivorSpaceSize is 0 which means the size is not // fixed, then _max_survivor_regions will be calculated at - // calculate_young_list_target_config during initialization + // calculate_young_list_target_length during initialization _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes; + assert(GCTimeRatio > 0, + "we should have set it to a default value set_g1_gc_flags() " + "if a user set it to 0"); + _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); + initialize_all(); } @@ -341,15 +363,18 @@ set_adaptive_young_list_length(false); _young_list_fixed_length = initial_region_num; } - _free_regions_at_end_of_collection = _g1->free_regions(); - _scan_only_regions_at_end_of_collection = 0; - calculate_young_list_min_length(); - guarantee( _young_list_min_length == 0, "invariant, not enough info" ); - calculate_young_list_target_config(); - } else { + _free_regions_at_end_of_collection = _g1->free_regions(); + calculate_young_list_min_length(); + guarantee( _young_list_min_length == 0, "invariant, not enough info" ); + calculate_young_list_target_length(); + } else { _young_list_fixed_length = 0; _in_young_gc_mode = false; } + + // We may immediately start allocating regions and placing them on the + // collection set list. Initialize the per-collection set info + start_incremental_cset_building(); } // Create the jstat counters for the policy. @@ -369,112 +394,29 @@ double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0; double alloc_rate_ms = predict_alloc_rate_ms(); int min_regions = (int) ceil(alloc_rate_ms * when_ms); - int current_region_num = (int) _g1->young_list_length(); + int current_region_num = (int) _g1->young_list()->length(); _young_list_min_length = min_regions + current_region_num; } } -void G1CollectorPolicy::calculate_young_list_target_config() { +void G1CollectorPolicy::calculate_young_list_target_length() { if (adaptive_young_list_length()) { size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq); - calculate_young_list_target_config(rs_lengths); + calculate_young_list_target_length(rs_lengths); } else { if (full_young_gcs()) _young_list_target_length = _young_list_fixed_length; else _young_list_target_length = _young_list_fixed_length / 2; + _young_list_target_length = MAX2(_young_list_target_length, (size_t)1); - size_t so_length = calculate_optimal_so_length(_young_list_target_length); - guarantee( so_length < _young_list_target_length, "invariant" ); - _young_list_so_prefix_length = so_length; } calculate_survivors_policy(); } -// This method calculate the optimal scan-only set for a fixed young -// gen size. I couldn't work out how to reuse the more elaborate one, -// i.e. calculate_young_list_target_config(rs_length), as the loops are -// fundamentally different (the other one finds a config for different -// S-O lengths, whereas here we need to do the opposite). -size_t G1CollectorPolicy::calculate_optimal_so_length( - size_t young_list_length) { - if (!G1UseScanOnlyPrefix) - return 0; - - if (_all_pause_times_ms->num() < 3) { - // we won't use a scan-only set at the beginning to allow the rest - // of the predictors to warm up - return 0; - } - - if (_cost_per_scan_only_region_ms_seq->num() < 3) { - // then, we'll only set the S-O set to 1 for a little bit of time, - // to get enough information on the scanning cost - return 1; - } - - size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); - size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq); - size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); - size_t scanned_cards; - if (full_young_gcs()) - scanned_cards = predict_young_card_num(adj_rs_lengths); - else - scanned_cards = predict_non_young_card_num(adj_rs_lengths); - double base_time_ms = predict_base_elapsed_time_ms(pending_cards, - scanned_cards); - - size_t so_length = 0; - double max_gc_eff = 0.0; - for (size_t i = 0; i < young_list_length; ++i) { - double gc_eff = 0.0; - double pause_time_ms = 0.0; - predict_gc_eff(young_list_length, i, base_time_ms, - &gc_eff, &pause_time_ms); - if (gc_eff > max_gc_eff) { - max_gc_eff = gc_eff; - so_length = i; - } - } - - // set it to 95% of the optimal to make sure we sample the "area" - // around the optimal length to get up-to-date survival rate data - return so_length * 950 / 1000; -} - -// This is a really cool piece of code! It finds the best -// target configuration (young length / scan-only prefix length) so -// that GC efficiency is maximized and that we also meet a pause -// time. It's a triple nested loop. These loops are explained below -// from the inside-out :-) -// -// (a) The innermost loop will try to find the optimal young length -// for a fixed S-O length. It uses a binary search to speed up the -// process. We assume that, for a fixed S-O length, as we add more -// young regions to the CSet, the GC efficiency will only go up (I'll -// skip the proof). So, using a binary search to optimize this process -// makes perfect sense. -// -// (b) The middle loop will fix the S-O length before calling the -// innermost one. It will vary it between two parameters, increasing -// it by a given increment. -// -// (c) The outermost loop will call the middle loop three times. -// (1) The first time it will explore all possible S-O length values -// from 0 to as large as it can get, using a coarse increment (to -// quickly "home in" to where the optimal seems to be). -// (2) The second time it will explore the values around the optimal -// that was found by the first iteration using a fine increment. -// (3) Once the optimal config has been determined by the second -// iteration, we'll redo the calculation, but setting the S-O length -// to 95% of the optimal to make sure we sample the "area" -// around the optimal length to get up-to-date survival rate data -// -// Termination conditions for the iterations are several: the pause -// time is over the limit, we do not have enough to-space, etc. - -void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) { +void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) { guarantee( adaptive_young_list_length(), "pre-condition" ); + guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" ); double start_time_sec = os::elapsedTime(); size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent); @@ -488,285 +430,80 @@ double survivor_regions_evac_time = predict_survivor_regions_evac_time(); - size_t min_so_length = 0; - size_t max_so_length = 0; - - if (G1UseScanOnlyPrefix) { - if (_all_pause_times_ms->num() < 3) { - // we won't use a scan-only set at the beginning to allow the rest - // of the predictors to warm up - min_so_length = 0; - max_so_length = 0; - } else if (_cost_per_scan_only_region_ms_seq->num() < 3) { - // then, we'll only set the S-O set to 1 for a little bit of time, - // to get enough information on the scanning cost - min_so_length = 1; - max_so_length = 1; - } else if (_in_marking_window || _last_full_young_gc) { - // no S-O prefix during a marking phase either, as at the end - // of the marking phase we'll have to use a very small young - // length target to fill up the rest of the CSet with - // non-young regions and, if we have lots of scan-only regions - // left-over, we will not be able to add any more non-young - // regions. - min_so_length = 0; - max_so_length = 0; - } else { - // this is the common case; we'll never reach the maximum, we - // one of the end conditions will fire well before that - // (hopefully!) - min_so_length = 0; - max_so_length = _free_regions_at_end_of_collection - 1; - } - } else { - // no S-O prefix, as the switch is not set, but we still need to - // do one iteration to calculate the best young target that - // meets the pause time; this way we reuse the same code instead - // of replicating it - min_so_length = 0; - max_so_length = 0; - } - double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq); size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff(); - size_t scanned_cards; - if (full_young_gcs()) - scanned_cards = predict_young_card_num(adj_rs_lengths); - else - scanned_cards = predict_non_young_card_num(adj_rs_lengths); - // calculate this once, so that we don't have to recalculate it in - // the innermost loop + size_t scanned_cards = predict_young_card_num(adj_rs_lengths); double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards) + survivor_regions_evac_time; + // the result size_t final_young_length = 0; - size_t final_so_length = 0; - double final_gc_eff = 0.0; - // we'll also keep track of how many times we go into the inner loop - // this is for profiling reasons - size_t calculations = 0; - - // this determines which of the three iterations the outer loop is in - typedef enum { - pass_type_coarse, - pass_type_fine, - pass_type_final - } pass_type_t; - - // range of the outer loop's iteration - size_t from_so_length = min_so_length; - size_t to_so_length = max_so_length; - guarantee( from_so_length <= to_so_length, "invariant" ); - - // this will keep the S-O length that's found by the second - // iteration of the outer loop; we'll keep it just in case the third - // iteration fails to find something - size_t fine_so_length = 0; - - // the increment step for the coarse (first) iteration - size_t so_coarse_increments = 5; - - // the common case, we'll start with the coarse iteration - pass_type_t pass = pass_type_coarse; - size_t so_length_incr = so_coarse_increments; - - if (from_so_length == to_so_length) { - // not point in doing the coarse iteration, we'll go directly into - // the fine one (we essentially trying to find the optimal young - // length for a fixed S-O length). - so_length_incr = 1; - pass = pass_type_final; - } else if (to_so_length - from_so_length < 3 * so_coarse_increments) { - // again, the range is too short so no point in foind the coarse - // iteration either - so_length_incr = 1; - pass = pass_type_fine; - } - - bool done = false; - // this is the outermost loop - while (!done) { -#ifdef TRACE_CALC_YOUNG_CONFIG - // leave this in for debugging, just in case - gclog_or_tty->print_cr("searching between " SIZE_FORMAT " and " SIZE_FORMAT - ", incr " SIZE_FORMAT ", pass %s", - from_so_length, to_so_length, so_length_incr, - (pass == pass_type_coarse) ? "coarse" : - (pass == pass_type_fine) ? "fine" : "final"); -#endif // TRACE_CALC_YOUNG_CONFIG - - size_t so_length = from_so_length; - size_t init_free_regions = - MAX2((size_t)0, - _free_regions_at_end_of_collection + - _scan_only_regions_at_end_of_collection - reserve_regions); - - // this determines whether a configuration was found - bool gc_eff_set = false; - // this is the middle loop - while (so_length <= to_so_length) { - // base time, which excludes region-related time; again we - // calculate it once to avoid recalculating it in the - // innermost loop - double base_time_with_so_ms = - base_time_ms + predict_scan_only_time_ms(so_length); - // it's already over the pause target, go around - if (base_time_with_so_ms > target_pause_time_ms) - break; - - size_t starting_young_length = so_length+1; - - // we make sure that the short young length that makes sense - // (one more than the S-O length) is feasible - size_t min_young_length = starting_young_length; - double min_gc_eff; - bool min_ok; - ++calculations; - min_ok = predict_gc_eff(min_young_length, so_length, - base_time_with_so_ms, - init_free_regions, target_pause_time_ms, - &min_gc_eff); - - if (min_ok) { - // the shortest young length is indeed feasible; we'll know - // set up the max young length and we'll do a binary search - // between min_young_length and max_young_length - size_t max_young_length = _free_regions_at_end_of_collection - 1; - double max_gc_eff = 0.0; - bool max_ok = false; - - // the innermost loop! (finally!) - while (max_young_length > min_young_length) { - // we'll make sure that min_young_length is always at a - // feasible config - guarantee( min_ok, "invariant" ); - - ++calculations; - max_ok = predict_gc_eff(max_young_length, so_length, - base_time_with_so_ms, - init_free_regions, target_pause_time_ms, - &max_gc_eff); + + size_t init_free_regions = + MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions); + + // if we're still under the pause target... + if (base_time_ms <= target_pause_time_ms) { + // We make sure that the shortest young length that makes sense + // fits within the target pause time. + size_t min_young_length = 1; + + if (predict_will_fit(min_young_length, base_time_ms, + init_free_regions, target_pause_time_ms)) { + // The shortest young length will fit within the target pause time; + // we'll now check whether the absolute maximum number of young + // regions will fit in the target pause time. If not, we'll do + // a binary search between min_young_length and max_young_length + size_t abs_max_young_length = _free_regions_at_end_of_collection - 1; + size_t max_young_length = abs_max_young_length; + + if (max_young_length > min_young_length) { + // Let's check if the initial max young length will fit within the + // target pause. If so then there is no need to search for a maximal + // young length - we'll return the initial maximum + + if (predict_will_fit(max_young_length, base_time_ms, + init_free_regions, target_pause_time_ms)) { + // The maximum young length will satisfy the target pause time. + // We are done so set min young length to this maximum length. + // The code after the loop will then set final_young_length using + // the value cached in the minimum length. + min_young_length = max_young_length; + } else { + // The maximum possible number of young regions will not fit within + // the target pause time so let's search.... size_t diff = (max_young_length - min_young_length) / 2; - if (max_ok) { - min_young_length = max_young_length; - min_gc_eff = max_gc_eff; - min_ok = true; + max_young_length = min_young_length + diff; + + while (max_young_length > min_young_length) { + if (predict_will_fit(max_young_length, base_time_ms, + init_free_regions, target_pause_time_ms)) { + + // The current max young length will fit within the target + // pause time. Note we do not exit the loop here. By setting + // min = max, and then increasing the max below means that + // we will continue searching for an upper bound in the + // range [max..max+diff] + min_young_length = max_young_length; + } + diff = (max_young_length - min_young_length) / 2; + max_young_length = min_young_length + diff; } - max_young_length = min_young_length + diff; + // the above loop found a maximal young length that will fit + // within the target pause time. } - - // the innermost loop found a config - guarantee( min_ok, "invariant" ); - if (min_gc_eff > final_gc_eff) { - // it's the best config so far, so we'll keep it - final_gc_eff = min_gc_eff; - final_young_length = min_young_length; - final_so_length = so_length; - gc_eff_set = true; - } + assert(min_young_length <= abs_max_young_length, "just checking"); } - - // incremental the fixed S-O length and go around - so_length += so_length_incr; + final_young_length = min_young_length; } - - // this is the end of the outermost loop and we need to decide - // what to do during the next iteration - if (pass == pass_type_coarse) { - // we just did the coarse pass (first iteration) - - if (!gc_eff_set) - // we didn't find a feasible config so we'll just bail out; of - // course, it might be the case that we missed it; but I'd say - // it's a bit unlikely - done = true; - else { - // We did find a feasible config with optimal GC eff during - // the first pass. So the second pass we'll only consider the - // S-O lengths around that config with a fine increment. - - guarantee( so_length_incr == so_coarse_increments, "invariant" ); - guarantee( final_so_length >= min_so_length, "invariant" ); - -#ifdef TRACE_CALC_YOUNG_CONFIG - // leave this in for debugging, just in case - gclog_or_tty->print_cr(" coarse pass: SO length " SIZE_FORMAT, - final_so_length); -#endif // TRACE_CALC_YOUNG_CONFIG - - from_so_length = - (final_so_length - min_so_length > so_coarse_increments) ? - final_so_length - so_coarse_increments + 1 : min_so_length; - to_so_length = - (max_so_length - final_so_length > so_coarse_increments) ? - final_so_length + so_coarse_increments - 1 : max_so_length; - - pass = pass_type_fine; - so_length_incr = 1; - } - } else if (pass == pass_type_fine) { - // we just finished the second pass - - if (!gc_eff_set) { - // we didn't find a feasible config (yes, it's possible; - // notice that, sometimes, we go directly into the fine - // iteration and skip the coarse one) so we bail out - done = true; - } else { - // We did find a feasible config with optimal GC eff - guarantee( so_length_incr == 1, "invariant" ); - - if (final_so_length == 0) { - // The config is of an empty S-O set, so we'll just bail out - done = true; - } else { - // we'll go around once more, setting the S-O length to 95% - // of the optimal - size_t new_so_length = 950 * final_so_length / 1000; - -#ifdef TRACE_CALC_YOUNG_CONFIG - // leave this in for debugging, just in case - gclog_or_tty->print_cr(" fine pass: SO length " SIZE_FORMAT - ", setting it to " SIZE_FORMAT, - final_so_length, new_so_length); -#endif // TRACE_CALC_YOUNG_CONFIG - - from_so_length = new_so_length; - to_so_length = new_so_length; - fine_so_length = final_so_length; - - pass = pass_type_final; - } - } - } else if (pass == pass_type_final) { - // we just finished the final (third) pass - - if (!gc_eff_set) - // we didn't find a feasible config, so we'll just use the one - // we found during the second pass, which we saved - final_so_length = fine_so_length; - - // and we're done! - done = true; - } else { - guarantee( false, "should never reach here" ); - } - - // we now go around the outermost loop } + // and we're done! // we should have at least one region in the target young length _young_list_target_length = MAX2((size_t) 1, final_young_length + _recorded_survivor_regions); - if (final_so_length >= final_young_length) - // and we need to ensure that the S-O length is not greater than - // the target young length (this is being a bit careful) - final_so_length = 0; - _young_list_so_prefix_length = final_so_length; - guarantee( !_in_marking_window || !_last_full_young_gc || - _young_list_so_prefix_length == 0, "invariant" ); // let's keep an eye of how long we spend on this calculation // right now, I assume that we'll print it when we need it; we @@ -774,142 +511,91 @@ double end_time_sec = os::elapsedTime(); double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0; -#ifdef TRACE_CALC_YOUNG_CONFIG +#ifdef TRACE_CALC_YOUNG_LENGTH // leave this in for debugging, just in case - gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT - ", SO = " SIZE_FORMAT ", " - "elapsed %1.2lf ms, calcs: " SIZE_FORMAT " (%s%s) " - SIZE_FORMAT SIZE_FORMAT, + gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", " + "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT, target_pause_time_ms, - _young_list_target_length - _young_list_so_prefix_length, - _young_list_so_prefix_length, + _young_list_target_length elapsed_time_ms, - calculations, full_young_gcs() ? "full" : "partial", - should_initiate_conc_mark() ? " i-m" : "", + during_initial_mark_pause() ? " i-m" : "", _in_marking_window, _in_marking_window_im); -#endif // TRACE_CALC_YOUNG_CONFIG +#endif // TRACE_CALC_YOUNG_LENGTH if (_young_list_target_length < _young_list_min_length) { - // bummer; this means that, if we do a pause when the optimal - // config dictates, we'll violate the pause spacing target (the + // bummer; this means that, if we do a pause when the maximal + // length dictates, we'll violate the pause spacing target (the // min length was calculate based on the application's current // alloc rate); // so, we have to bite the bullet, and allocate the minimum // number. We'll violate our target, but we just can't meet it. - size_t so_length = 0; - // a note further up explains why we do not want an S-O length - // during marking - if (!_in_marking_window && !_last_full_young_gc) - // but we can still try to see whether we can find an optimal - // S-O length - so_length = calculate_optimal_so_length(_young_list_min_length); - -#ifdef TRACE_CALC_YOUNG_CONFIG +#ifdef TRACE_CALC_YOUNG_LENGTH // leave this in for debugging, just in case gclog_or_tty->print_cr("adjusted target length from " - SIZE_FORMAT " to " SIZE_FORMAT - ", SO " SIZE_FORMAT, - _young_list_target_length, _young_list_min_length, - so_length); -#endif // TRACE_CALC_YOUNG_CONFIG - - _young_list_target_length = - MAX2(_young_list_min_length, (size_t)1); - _young_list_so_prefix_length = so_length; + SIZE_FORMAT " to " SIZE_FORMAT, + _young_list_target_length, _young_list_min_length); +#endif // TRACE_CALC_YOUNG_LENGTH + + _young_list_target_length = _young_list_min_length; } } else { // we are in a partially-young mode or we've run out of regions (due // to evacuation failure) -#ifdef TRACE_CALC_YOUNG_CONFIG +#ifdef TRACE_CALC_YOUNG_LENGTH // leave this in for debugging, just in case gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT - ", SO " SIZE_FORMAT, - _young_list_min_length, 0); -#endif // TRACE_CALC_YOUNG_CONFIG - - // we'll do the pause as soon as possible and with no S-O prefix - // (see above for the reasons behind the latter) + _young_list_min_length); +#endif // TRACE_CALC_YOUNG_LENGTH + // we'll do the pause as soon as possible by choosing the minimum _young_list_target_length = MAX2(_young_list_min_length, (size_t) 1); - _young_list_so_prefix_length = 0; } _rs_lengths_prediction = rs_lengths; } -// This is used by: calculate_optimal_so_length(length). It returns -// the GC eff and predicted pause time for a particular config -void -G1CollectorPolicy::predict_gc_eff(size_t young_length, - size_t so_length, - double base_time_ms, - double* ret_gc_eff, - double* ret_pause_time_ms) { - double so_time_ms = predict_scan_only_time_ms(so_length); - double accum_surv_rate_adj = 0.0; - if (so_length > 0) - accum_surv_rate_adj = accum_yg_surv_rate_pred((int)(so_length - 1)); - double accum_surv_rate = - accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj; - size_t bytes_to_copy = - (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); - double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); - double young_other_time_ms = - predict_young_other_time_ms(young_length - so_length); - double pause_time_ms = - base_time_ms + so_time_ms + copy_time_ms + young_other_time_ms; - size_t reclaimed_bytes = - (young_length - so_length) * HeapRegion::GrainBytes - bytes_to_copy; - double gc_eff = (double) reclaimed_bytes / pause_time_ms; - - *ret_gc_eff = gc_eff; - *ret_pause_time_ms = pause_time_ms; -} - -// This is used by: calculate_young_list_target_config(rs_length). It -// returns the GC eff of a particular config. It returns false if that -// config violates any of the end conditions of the search in the -// calling method, or true upon success. The end conditions were put -// here since it's called twice and it was best not to replicate them -// in the caller. Also, passing the parameteres avoids having to -// recalculate them in the innermost loop. +// This is used by: calculate_young_list_target_length(rs_length). It +// returns true iff: +// the predicted pause time for the given young list will not overflow +// the target pause time +// and: +// the predicted amount of surviving data will not overflow the +// the amount of free space available for survivor regions. +// bool -G1CollectorPolicy::predict_gc_eff(size_t young_length, - size_t so_length, - double base_time_with_so_ms, - size_t init_free_regions, - double target_pause_time_ms, - double* ret_gc_eff) { - *ret_gc_eff = 0.0; +G1CollectorPolicy::predict_will_fit(size_t young_length, + double base_time_ms, + size_t init_free_regions, + double target_pause_time_ms) { if (young_length >= init_free_regions) // end condition 1: not enough space for the young regions return false; double accum_surv_rate_adj = 0.0; - if (so_length > 0) - accum_surv_rate_adj = accum_yg_surv_rate_pred((int)(so_length - 1)); double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj; + size_t bytes_to_copy = (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes); + double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy); + double young_other_time_ms = - predict_young_other_time_ms(young_length - so_length); + predict_young_other_time_ms(young_length); + double pause_time_ms = - base_time_with_so_ms + copy_time_ms + young_other_time_ms; + base_time_ms + copy_time_ms + young_other_time_ms; if (pause_time_ms > target_pause_time_ms) // end condition 2: over the target pause time return false; - size_t reclaimed_bytes = - (young_length - so_length) * HeapRegion::GrainBytes - bytes_to_copy; size_t free_bytes = (init_free_regions - young_length) * HeapRegion::GrainBytes; @@ -918,9 +604,6 @@ return false; // success! - double gc_eff = (double) reclaimed_bytes / pause_time_ms; - *ret_gc_eff = gc_eff; - return true; } @@ -937,11 +620,11 @@ void G1CollectorPolicy::check_prediction_validity() { guarantee( adaptive_young_list_length(), "should not call this otherwise" ); - size_t rs_lengths = _g1->young_list_sampled_rs_lengths(); + size_t rs_lengths = _g1->young_list()->sampled_rs_lengths(); if (rs_lengths > _rs_lengths_prediction) { // add 10% to avoid having to recalculate often size_t rs_lengths_prediction = rs_lengths * 1100 / 1000; - calculate_young_list_target_config(rs_lengths_prediction); + calculate_young_list_target_length(rs_lengths_prediction); } } @@ -963,7 +646,7 @@ #ifndef PRODUCT bool G1CollectorPolicy::verify_young_ages() { - HeapRegion* head = _g1->young_list_first_region(); + HeapRegion* head = _g1->young_list()->first_region(); return verify_young_ages(head, _short_lived_surv_rate_group); // also call verify_young_ages on any additional surv rate groups @@ -1033,13 +716,13 @@ set_full_young_gcs(true); _last_full_young_gc = false; _should_revert_to_full_young_gcs = false; - _should_initiate_conc_mark = false; + clear_initiate_conc_mark_if_possible(); + clear_during_initial_mark_pause(); _known_garbage_bytes = 0; _known_garbage_ratio = 0.0; _in_marking_window = false; _in_marking_window_im = false; - _short_lived_surv_rate_group->record_scan_only_prefix(0); _short_lived_surv_rate_group->start_adding_regions(); // also call this on any additional surv rate groups @@ -1049,11 +732,10 @@ _prev_region_num_tenured = _region_num_tenured; _free_regions_at_end_of_collection = _g1->free_regions(); - _scan_only_regions_at_end_of_collection = 0; // Reset survivors SurvRateGroup. _survivor_surv_rate_group->reset(); calculate_young_list_min_length(); - calculate_young_list_target_config(); + calculate_young_list_target_length(); } void G1CollectorPolicy::record_before_bytes(size_t bytes) { @@ -1102,8 +784,6 @@ for (int i = 0; i < _parallel_gc_threads; ++i) { _par_last_ext_root_scan_times_ms[i] = -666.0; _par_last_mark_stack_scan_times_ms[i] = -666.0; - _par_last_scan_only_times_ms[i] = -666.0; - _par_last_scan_only_regions_scanned[i] = -666.0; _par_last_update_rs_start_times_ms[i] = -666.0; _par_last_update_rs_times_ms[i] = -666.0; _par_last_update_rs_processed_buffers[i] = -666.0; @@ -1126,47 +806,13 @@ if (in_young_gc_mode()) _last_young_gc_full = false; - // do that for any other surv rate groups _short_lived_surv_rate_group->stop_adding_regions(); - size_t short_lived_so_length = _young_list_so_prefix_length; - _short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length); - tag_scan_only(short_lived_so_length); _survivors_age_table.clear(); assert( verify_young_ages(), "region age verification" ); } -void G1CollectorPolicy::tag_scan_only(size_t short_lived_scan_only_length) { - // done in a way that it can be extended for other surv rate groups too... - - HeapRegion* head = _g1->young_list_first_region(); - bool finished_short_lived = (short_lived_scan_only_length == 0); - - if (finished_short_lived) - return; - - for (HeapRegion* curr = head; - curr != NULL; - curr = curr->get_next_young_region()) { - SurvRateGroup* surv_rate_group = curr->surv_rate_group(); - int age = curr->age_in_surv_rate_group(); - - if (surv_rate_group == _short_lived_surv_rate_group) { - if ((size_t)age < short_lived_scan_only_length) - curr->set_scan_only(); - else - finished_short_lived = true; - } - - - if (finished_short_lived) - return; - } - - guarantee( false, "we should never reach here" ); -} - void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) { _mark_closure_time_ms = mark_closure_time_ms; } @@ -1179,7 +825,8 @@ void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double mark_init_elapsed_time_ms) { _during_marking = true; - _should_initiate_conc_mark = false; + assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now"); + clear_during_initial_mark_pause(); _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; } @@ -1250,7 +897,6 @@ } _n_pauses_at_mark_end = _n_pauses; _n_marks_since_last_pause++; - _conc_mark_initiated = false; } void @@ -1260,7 +906,7 @@ _last_full_young_gc = true; _in_marking_window = false; if (adaptive_young_list_length()) - calculate_young_list_target_config(); + calculate_young_list_target_length(); } } @@ -1446,17 +1092,24 @@ #endif // PRODUCT if (in_young_gc_mode()) { - last_pause_included_initial_mark = _should_initiate_conc_mark; + last_pause_included_initial_mark = during_initial_mark_pause(); if (last_pause_included_initial_mark) record_concurrent_mark_init_end_pre(0.0); size_t min_used_targ = (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; - if (cur_used_bytes > min_used_targ) { - if (cur_used_bytes <= _prev_collection_pause_used_at_end_bytes) { - } else if (!_g1->mark_in_progress() && !_last_full_young_gc) { - _should_initiate_conc_mark = true; + + if (!_g1->mark_in_progress() && !_last_full_young_gc) { + assert(!last_pause_included_initial_mark, "invariant"); + if (cur_used_bytes > min_used_targ && + cur_used_bytes > _prev_collection_pause_used_at_end_bytes) { + assert(!during_initial_mark_pause(), "we should not see this here"); + + // Note: this might have already been set, if during the last + // pause we decided to start a cycle but at the beginning of + // this pause we decided to postpone it. That's OK. + set_initiate_conc_mark_if_possible(); } } @@ -1488,6 +1141,7 @@ size_t freed_bytes = _cur_collection_pause_used_at_start_bytes - cur_used_bytes; size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes; + double survival_fraction = (double)surviving_bytes/ (double)_collection_set_bytes_used_before; @@ -1575,9 +1229,6 @@ double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms); double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms); - double scan_only_time = avg_value(_par_last_scan_only_times_ms); - double scan_only_regions_scanned = - sum_of_values(_par_last_scan_only_regions_scanned); double update_rs_time = avg_value(_par_last_update_rs_times_ms); double update_rs_processed_buffers = sum_of_values(_par_last_update_rs_processed_buffers); @@ -1587,7 +1238,7 @@ double parallel_other_time = _cur_collection_par_time_ms - (update_rs_time + ext_root_scan_time + mark_stack_scan_time + - scan_only_time + scan_rs_time + obj_copy_time + termination_time); + scan_rs_time + obj_copy_time + termination_time); if (update_stats) { MainBodySummary* body_summary = summary->main_body_summary(); guarantee(body_summary != NULL, "should not be null!"); @@ -1598,7 +1249,6 @@ body_summary->record_satb_drain_time_ms(0.0); body_summary->record_ext_root_scan_time_ms(ext_root_scan_time); body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time); - body_summary->record_scan_only_time_ms(scan_only_time); body_summary->record_update_rs_time_ms(update_rs_time); body_summary->record_scan_rs_time_ms(scan_rs_time); body_summary->record_obj_copy_time_ms(obj_copy_time); @@ -1652,7 +1302,7 @@ else other_time_ms -= update_rs_time + - ext_root_scan_time + mark_stack_scan_time + scan_only_time + + ext_root_scan_time + mark_stack_scan_time + scan_rs_time + obj_copy_time; } @@ -1677,9 +1327,6 @@ _par_last_update_rs_processed_buffers, true); print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms); print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms); - print_par_stats(2, "Scan-Only Scanning", _par_last_scan_only_times_ms); - print_par_buffers(3, "Scan-Only Regions", - _par_last_scan_only_regions_scanned, true); print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms); print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms); print_par_stats(2, "Termination", _par_last_termination_times_ms); @@ -1691,7 +1338,6 @@ (int)update_rs_processed_buffers); print_stats(1, "Ext Root Scanning", ext_root_scan_time); print_stats(1, "Mark Stack Scanning", mark_stack_scan_time); - print_stats(1, "Scan-Only Scanning", scan_only_time); print_stats(1, "Scan RS", scan_rs_time); print_stats(1, "Object Copying", obj_copy_time); } @@ -1706,6 +1352,8 @@ } #endif print_stats(1, "Other", other_time_ms); + print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms); + for (int i = 0; i < _aux_num; ++i) { if (_cur_aux_times_set[i]) { char buffer[96]; @@ -1747,7 +1395,7 @@ bool new_in_marking_window = _in_marking_window; bool new_in_marking_window_im = false; - if (_should_initiate_conc_mark) { + if (during_initial_mark_pause()) { new_in_marking_window = true; new_in_marking_window_im = true; } @@ -1791,16 +1439,6 @@ _cost_per_card_ms_seq->add(cost_per_card_ms); } - double cost_per_scan_only_region_ms = 0.0; - if (scan_only_regions_scanned > 0.0) { - cost_per_scan_only_region_ms = - scan_only_time / scan_only_regions_scanned; - if (_in_marking_window_im) - _cost_per_scan_only_region_ms_during_cm_seq->add(cost_per_scan_only_region_ms); - else - _cost_per_scan_only_region_ms_seq->add(cost_per_scan_only_region_ms); - } - size_t cards_scanned = _g1->cards_scanned(); double cost_per_entry_ms = 0.0; @@ -1836,7 +1474,7 @@ } double all_other_time_ms = pause_time_ms - - (update_rs_time + scan_only_time + scan_rs_time + obj_copy_time + + (update_rs_time + scan_rs_time + obj_copy_time + _mark_closure_time_ms + termination_time); double young_other_time_ms = 0.0; @@ -1883,11 +1521,10 @@ if (PREDICTIONS_VERBOSE) { gclog_or_tty->print_cr(""); gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d " - "REGIONS %d %d %d %d " + "REGIONS %d %d %d " "PENDING_CARDS %d %d " "CARDS_SCANNED %d %d " "RS_LENGTHS %d %d " - "SCAN_ONLY_SCAN %1.6lf %1.6lf " "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf " "SURVIVAL_RATIO %1.6lf %1.6lf " "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf " @@ -1900,12 +1537,10 @@ (last_pause_included_initial_mark) ? 1 : 0, _recorded_region_num, _recorded_young_regions, - _recorded_scan_only_regions, _recorded_non_young_regions, _predicted_pending_cards, _pending_cards, _predicted_cards_scanned, cards_scanned, _predicted_rs_lengths, _max_rs_lengths, - _predicted_scan_only_scan_time_ms, scan_only_time, _predicted_rs_update_time_ms, update_rs_time, _predicted_rs_scan_time_ms, scan_rs_time, _predicted_survival_ratio, survival_ratio, @@ -1930,14 +1565,12 @@ _in_marking_window = new_in_marking_window; _in_marking_window_im = new_in_marking_window_im; _free_regions_at_end_of_collection = _g1->free_regions(); - _scan_only_regions_at_end_of_collection = _g1->young_list_length(); calculate_young_list_min_length(); - calculate_young_list_target_config(); + calculate_young_list_target_length(); // Note that _mmu_tracker->max_gc_time() returns the time in seconds. double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); - // _target_pause_time_ms = -1.0; @@ -1992,13 +1625,13 @@ guarantee( adjustment == 0 || adjustment == 1, "invariant" ); G1CollectedHeap* g1h = G1CollectedHeap::heap(); - size_t young_num = g1h->young_list_length(); + size_t young_num = g1h->young_list()->length(); if (young_num == 0) return 0.0; young_num += adjustment; size_t pending_cards = predict_pending_cards(); - size_t rs_lengths = g1h->young_list_sampled_rs_lengths() + + size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() + predict_rs_length_diff(); size_t card_num; if (full_young_gcs()) @@ -2082,31 +1715,22 @@ void G1CollectorPolicy::start_recording_regions() { _recorded_rs_lengths = 0; - _recorded_scan_only_regions = 0; _recorded_young_regions = 0; _recorded_non_young_regions = 0; #if PREDICTIONS_VERBOSE - _predicted_rs_lengths = 0; - _predicted_cards_scanned = 0; - _recorded_marked_bytes = 0; _recorded_young_bytes = 0; _predicted_bytes_to_copy = 0; + _predicted_rs_lengths = 0; + _predicted_cards_scanned = 0; #endif // PREDICTIONS_VERBOSE } void -G1CollectorPolicy::record_cset_region(HeapRegion* hr, bool young) { - if (young) { - ++_recorded_young_regions; - } else { - ++_recorded_non_young_regions; - } +G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) { #if PREDICTIONS_VERBOSE - if (young) { - _recorded_young_bytes += hr->used(); - } else { + if (!young) { _recorded_marked_bytes += hr->max_live_bytes(); } _predicted_bytes_to_copy += predict_bytes_to_copy(hr); @@ -2117,12 +1741,37 @@ } void -G1CollectorPolicy::record_scan_only_regions(size_t scan_only_length) { - _recorded_scan_only_regions = scan_only_length; +G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) { + assert(!hr->is_young(), "should not call this"); + ++_recorded_non_young_regions; + record_cset_region_info(hr, false); +} + +void +G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) { + _recorded_young_regions = n_regions; +} + +void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) { +#if PREDICTIONS_VERBOSE + _recorded_young_bytes = bytes; +#endif // PREDICTIONS_VERBOSE +} + +void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) { + _recorded_rs_lengths = rs_lengths; +} + +void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) { + _predicted_bytes_to_copy = bytes; } void G1CollectorPolicy::end_recording_regions() { + // The _predicted_pause_time_ms field is referenced in code + // not under PREDICTIONS_VERBOSE. Let's initialize it. + _predicted_pause_time_ms = -1.0; + #if PREDICTIONS_VERBOSE _predicted_pending_cards = predict_pending_cards(); _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff(); @@ -2133,8 +1782,6 @@ predict_non_young_card_num(_predicted_rs_lengths); _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions; - _predicted_scan_only_scan_time_ms = - predict_scan_only_time_ms(_recorded_scan_only_regions); _predicted_rs_update_time_ms = predict_rs_update_time_ms(_g1->pending_card_num()); _predicted_rs_scan_time_ms = @@ -2149,7 +1796,6 @@ predict_non_young_other_time_ms(_recorded_non_young_regions); _predicted_pause_time_ms = - _predicted_scan_only_scan_time_ms + _predicted_rs_update_time_ms + _predicted_rs_scan_time_ms + _predicted_object_copy_time_ms + @@ -2166,7 +1812,13 @@ if (predicted_time_ms > _expensive_region_limit_ms) { if (!in_young_gc_mode()) { set_full_young_gcs(true); - _should_initiate_conc_mark = true; + // We might want to do something different here. However, + // right now we don't support the non-generational G1 mode + // (and in fact we are planning to remove the associated code, + // see CR 6814390). So, let's leave it as is and this will be + // removed some time in the future + ShouldNotReachHere(); + set_during_initial_mark_pause(); } else // no point in doing another partial one _should_revert_to_full_young_gcs = true; @@ -2288,7 +1940,7 @@ } size_t G1CollectorPolicy::expansion_amount() { - if ((int)(recent_avg_pause_time_ratio() * 100.0) > G1GCPercent) { + if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) { // We will double the existing space, or take // G1ExpandByPercentOfAvailable % of the available expansion // space, whichever is smaller, bounded below by a minimum @@ -2433,8 +2085,6 @@ body_summary->get_ext_root_scan_seq()); print_summary(2, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq()); - print_summary(2, "Scan-Only Scanning", - body_summary->get_scan_only_seq()); print_summary(2, "Scan RS", body_summary->get_scan_rs_seq()); print_summary(2, "Object Copy", body_summary->get_obj_copy_seq()); print_summary(2, "Termination", body_summary->get_termination_seq()); @@ -2444,7 +2094,6 @@ body_summary->get_update_rs_seq(), body_summary->get_ext_root_scan_seq(), body_summary->get_mark_stack_scan_seq(), - body_summary->get_scan_only_seq(), body_summary->get_scan_rs_seq(), body_summary->get_obj_copy_seq(), body_summary->get_termination_seq() @@ -2462,8 +2111,6 @@ body_summary->get_ext_root_scan_seq()); print_summary(1, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq()); - print_summary(1, "Scan-Only Scanning", - body_summary->get_scan_only_seq()); print_summary(1, "Scan RS", body_summary->get_scan_rs_seq()); print_summary(1, "Object Copy", body_summary->get_obj_copy_seq()); } @@ -2489,7 +2136,6 @@ body_summary->get_update_rs_seq(), body_summary->get_ext_root_scan_seq(), body_summary->get_mark_stack_scan_seq(), - body_summary->get_scan_only_seq(), body_summary->get_scan_rs_seq(), body_summary->get_obj_copy_seq() }; @@ -2583,7 +2229,7 @@ G1CollectorPolicy::should_add_next_region_to_young_list() { assert(in_young_gc_mode(), "should be in young GC mode"); bool ret; - size_t young_list_length = _g1->young_list_length(); + size_t young_list_length = _g1->young_list()->length(); size_t young_list_max_length = _young_list_target_length; if (G1FixedEdenSize) { young_list_max_length -= _max_survivor_regions; @@ -2646,7 +2292,7 @@ assert(_g1->regions_accounted_for(), "Region leakage!"); double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; - size_t young_list_length = _g1->young_list_length(); + size_t young_list_length = _g1->young_list()->length(); size_t young_list_max_length = _young_list_target_length; if (G1FixedEdenSize) { young_list_max_length -= _max_survivor_regions; @@ -2655,7 +2301,7 @@ if (in_young_gc_mode()) { if (reached_target_length) { - assert( young_list_length > 0 && _g1->young_list_length() > 0, + assert( young_list_length > 0 && _g1->young_list()->length() > 0, "invariant" ); _target_pause_time_ms = max_pause_time_ms; return true; @@ -2690,6 +2336,50 @@ #endif void +G1CollectorPolicy::decide_on_conc_mark_initiation() { + // We are about to decide on whether this pause will be an + // initial-mark pause. + + // First, during_initial_mark_pause() should not be already set. We + // will set it here if we have to. However, it should be cleared by + // the end of the pause (it's only set for the duration of an + // initial-mark pause). + assert(!during_initial_mark_pause(), "pre-condition"); + + if (initiate_conc_mark_if_possible()) { + // We had noticed on a previous pause that the heap occupancy has + // gone over the initiating threshold and we should start a + // concurrent marking cycle. So we might initiate one. + + bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); + if (!during_cycle) { + // The concurrent marking thread is not "during a cycle", i.e., + // it has completed the last one. So we can go ahead and + // initiate a new cycle. + + set_during_initial_mark_pause(); + + // And we can now clear initiate_conc_mark_if_possible() as + // we've already acted on it. + clear_initiate_conc_mark_if_possible(); + } else { + // The concurrent marking thread is still finishing up the + // previous cycle. If we start one right now the two cycles + // overlap. In particular, the concurrent marking thread might + // be in the process of clearing the next marking bitmap (which + // we will use for the next cycle if we start one). Starting a + // cycle now will be bad given that parts of the marking + // information might get cleared by the marking thread. And we + // cannot wait for the marking thread to finish the cycle as it + // periodically yields while clearing the next marking bitmap + // and, if it's in a yield point, it's waiting for us to + // finish. So, at this point we will not start a cycle and we'll + // let the concurrent marking thread complete the last one. + } + } +} + +void G1CollectorPolicy_BestRegionsFirst:: record_collection_pause_start(double start_time_sec, size_t start_used) { G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used); @@ -2872,22 +2562,24 @@ } } -// Add the heap region to the collection set and return the conservative -// estimate of the number of live bytes. +// Add the heap region at the head of the non-incremental collection set void G1CollectorPolicy:: add_to_collection_set(HeapRegion* hr) { + assert(_inc_cset_build_state == Active, "Precondition"); + assert(!hr->is_young(), "non-incremental add of young region"); + if (G1PrintHeapRegions) { - gclog_or_tty->print_cr("added region to cset %d:["PTR_FORMAT", "PTR_FORMAT"], " - "top "PTR_FORMAT", young %s", - hr->hrs_index(), hr->bottom(), hr->end(), - hr->top(), (hr->is_young()) ? "YES" : "NO"); + gclog_or_tty->print_cr("added region to cset " + "%d:["PTR_FORMAT", "PTR_FORMAT"], " + "top "PTR_FORMAT", %s", + hr->hrs_index(), hr->bottom(), hr->end(), + hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG"); } if (_g1->mark_in_progress()) _g1->concurrent_mark()->registerCSetRegion(hr); - assert(!hr->in_collection_set(), - "should not already be in the CSet"); + assert(!hr->in_collection_set(), "should not already be in the CSet"); hr->set_in_collection_set(true); hr->set_next_in_collection_set(_collection_set); _collection_set = hr; @@ -2896,10 +2588,230 @@ _g1->register_region_with_in_cset_fast_test(hr); } -void -G1CollectorPolicy_BestRegionsFirst:: -choose_collection_set() { - double non_young_start_time_sec; +// Initialize the per-collection-set information +void G1CollectorPolicy::start_incremental_cset_building() { + assert(_inc_cset_build_state == Inactive, "Precondition"); + + _inc_cset_head = NULL; + _inc_cset_tail = NULL; + _inc_cset_size = 0; + _inc_cset_bytes_used_before = 0; + + if (in_young_gc_mode()) { + _inc_cset_young_index = 0; + } + + _inc_cset_max_finger = 0; + _inc_cset_recorded_young_bytes = 0; + _inc_cset_recorded_rs_lengths = 0; + _inc_cset_predicted_elapsed_time_ms = 0; + _inc_cset_predicted_bytes_to_copy = 0; + _inc_cset_build_state = Active; +} + +void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) { + // This routine is used when: + // * adding survivor regions to the incremental cset at the end of an + // evacuation pause, + // * adding the current allocation region to the incremental cset + // when it is retired, and + // * updating existing policy information for a region in the + // incremental cset via young list RSet sampling. + // Therefore this routine may be called at a safepoint by the + // VM thread, or in-between safepoints by mutator threads (when + // retiring the current allocation region) or a concurrent + // refine thread (RSet sampling). + + double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true); + size_t used_bytes = hr->used(); + + _inc_cset_recorded_rs_lengths += rs_length; + _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; + + _inc_cset_bytes_used_before += used_bytes; + + // Cache the values we have added to the aggregated informtion + // in the heap region in case we have to remove this region from + // the incremental collection set, or it is updated by the + // rset sampling code + hr->set_recorded_rs_length(rs_length); + hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms); + +#if PREDICTIONS_VERBOSE + size_t bytes_to_copy = predict_bytes_to_copy(hr); + _inc_cset_predicted_bytes_to_copy += bytes_to_copy; + + // Record the number of bytes used in this region + _inc_cset_recorded_young_bytes += used_bytes; + + // Cache the values we have added to the aggregated informtion + // in the heap region in case we have to remove this region from + // the incremental collection set, or it is updated by the + // rset sampling code + hr->set_predicted_bytes_to_copy(bytes_to_copy); +#endif // PREDICTIONS_VERBOSE +} + +void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) { + // This routine is currently only called as part of the updating of + // existing policy information for regions in the incremental cset that + // is performed by the concurrent refine thread(s) as part of young list + // RSet sampling. Therefore we should not be at a safepoint. + + assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); + assert(hr->is_young(), "it should be"); + + size_t used_bytes = hr->used(); + size_t old_rs_length = hr->recorded_rs_length(); + double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); + + // Subtract the old recorded/predicted policy information for + // the given heap region from the collection set info. + _inc_cset_recorded_rs_lengths -= old_rs_length; + _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms; + + _inc_cset_bytes_used_before -= used_bytes; + + // Clear the values cached in the heap region + hr->set_recorded_rs_length(0); + hr->set_predicted_elapsed_time_ms(0); + +#if PREDICTIONS_VERBOSE + size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy(); + _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy; + + // Subtract the number of bytes used in this region + _inc_cset_recorded_young_bytes -= used_bytes; + + // Clear the values cached in the heap region + hr->set_predicted_bytes_to_copy(0); +#endif // PREDICTIONS_VERBOSE +} + +void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) { + // Update the collection set information that is dependent on the new RS length + assert(hr->is_young(), "Precondition"); + + remove_from_incremental_cset_info(hr); + add_to_incremental_cset_info(hr, new_rs_length); +} + +void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) { + assert( hr->is_young(), "invariant"); + assert( hr->young_index_in_cset() == -1, "invariant" ); + assert(_inc_cset_build_state == Active, "Precondition"); + + // We need to clear and set the cached recorded/cached collection set + // information in the heap region here (before the region gets added + // to the collection set). An individual heap region's cached values + // are calculated, aggregated with the policy collection set info, + // and cached in the heap region here (initially) and (subsequently) + // by the Young List sampling code. + + size_t rs_length = hr->rem_set()->occupied(); + add_to_incremental_cset_info(hr, rs_length); + + HeapWord* hr_end = hr->end(); + _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end); + + assert(!hr->in_collection_set(), "invariant"); + hr->set_in_collection_set(true); + assert( hr->next_in_collection_set() == NULL, "invariant"); + + _inc_cset_size++; + _g1->register_region_with_in_cset_fast_test(hr); + + hr->set_young_index_in_cset((int) _inc_cset_young_index); + ++_inc_cset_young_index; +} + +// Add the region at the RHS of the incremental cset +void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) { + // We should only ever be appending survivors at the end of a pause + assert( hr->is_survivor(), "Logic"); + + // Do the 'common' stuff + add_region_to_incremental_cset_common(hr); + + // Now add the region at the right hand side + if (_inc_cset_tail == NULL) { + assert(_inc_cset_head == NULL, "invariant"); + _inc_cset_head = hr; + } else { + _inc_cset_tail->set_next_in_collection_set(hr); + } + _inc_cset_tail = hr; + + if (G1PrintHeapRegions) { + gclog_or_tty->print_cr(" added region to incremental cset (RHS) " + "%d:["PTR_FORMAT", "PTR_FORMAT"], " + "top "PTR_FORMAT", young %s", + hr->hrs_index(), hr->bottom(), hr->end(), + hr->top(), (hr->is_young()) ? "YES" : "NO"); + } +} + +// Add the region to the LHS of the incremental cset +void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) { + // Survivors should be added to the RHS at the end of a pause + assert(!hr->is_survivor(), "Logic"); + + // Do the 'common' stuff + add_region_to_incremental_cset_common(hr); + + // Add the region at the left hand side + hr->set_next_in_collection_set(_inc_cset_head); + if (_inc_cset_head == NULL) { + assert(_inc_cset_tail == NULL, "Invariant"); + _inc_cset_tail = hr; + } + _inc_cset_head = hr; + + if (G1PrintHeapRegions) { + gclog_or_tty->print_cr(" added region to incremental cset (LHS) " + "%d:["PTR_FORMAT", "PTR_FORMAT"], " + "top "PTR_FORMAT", young %s", + hr->hrs_index(), hr->bottom(), hr->end(), + hr->top(), (hr->is_young()) ? "YES" : "NO"); + } +} + +#ifndef PRODUCT +void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) { + assert(list_head == inc_cset_head() || list_head == collection_set(), "must be"); + + st->print_cr("\nCollection_set:"); + HeapRegion* csr = list_head; + while (csr != NULL) { + HeapRegion* next = csr->next_in_collection_set(); + assert(csr->in_collection_set(), "bad CS"); + st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, " + "age: %4d, y: %d, surv: %d", + csr->bottom(), csr->end(), + csr->top(), + csr->prev_top_at_mark_start(), + csr->next_top_at_mark_start(), + csr->top_at_conc_mark_count(), + csr->age_in_surv_rate_group_cond(), + csr->is_young(), + csr->is_survivor()); + csr = next; + } +} +#endif // !PRODUCT + +bool +G1CollectorPolicy_BestRegionsFirst::choose_collection_set() { + // Set this here - in case we're not doing young collections. + double non_young_start_time_sec = os::elapsedTime(); + + // The result that this routine will return. This will be set to + // false if: + // * we're doing a young or partially young collection and we + // have added the youg regions to collection set, or + // * we add old regions to the collection set. + bool abandon_collection = true; + start_recording_regions(); guarantee(_target_pause_time_ms > -1.0 @@ -2952,47 +2864,79 @@ if (G1PolicyVerbose > 0) { gclog_or_tty->print_cr("Adding %d young regions to the CSet", - _g1->young_list_length()); + _g1->young_list()->length()); } + _young_cset_length = 0; _last_young_gc_full = full_young_gcs() ? true : false; + if (_last_young_gc_full) ++_full_young_pause_num; else ++_partial_young_pause_num; - hr = _g1->pop_region_from_young_list(); + + // The young list is laid with the survivor regions from the previous + // pause are appended to the RHS of the young list, i.e. + // [Newly Young Regions ++ Survivors from last pause]. + + hr = _g1->young_list()->first_survivor_region(); while (hr != NULL) { - - assert( hr->young_index_in_cset() == -1, "invariant" ); - assert( hr->age_in_surv_rate_group() != -1, "invariant" ); - hr->set_young_index_in_cset((int) _young_cset_length); - - ++_young_cset_length; - double predicted_time_ms = predict_region_elapsed_time_ms(hr, true); - time_remaining_ms -= predicted_time_ms; - predicted_pause_time_ms += predicted_time_ms; - assert(!hr->in_collection_set(), "invariant"); - add_to_collection_set(hr); - record_cset_region(hr, true); - max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes); - if (G1PolicyVerbose > 0) { - gclog_or_tty->print_cr(" Added [" PTR_FORMAT ", " PTR_FORMAT") to CS.", - hr->bottom(), hr->end()); - gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)", - max_live_bytes/K); - } - hr = _g1->pop_region_from_young_list(); + assert(hr->is_survivor(), "badly formed young list"); + hr->set_young(); + hr = hr->get_next_young_region(); } - record_scan_only_regions(_g1->young_list_scan_only_length()); + // Clear the fields that point to the survivor list - they are + // all young now. + _g1->young_list()->clear_survivors(); + + if (_g1->mark_in_progress()) + _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger); + + _young_cset_length = _inc_cset_young_index; + _collection_set = _inc_cset_head; + _collection_set_size = _inc_cset_size; + _collection_set_bytes_used_before = _inc_cset_bytes_used_before; + + // For young regions in the collection set, we assume the worst + // case of complete survival + max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes; + + time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms; + predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms; + + // The number of recorded young regions is the incremental + // collection set's current size + set_recorded_young_regions(_inc_cset_size); + set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths); + set_recorded_young_bytes(_inc_cset_recorded_young_bytes); +#if PREDICTIONS_VERBOSE + set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy); +#endif // PREDICTIONS_VERBOSE + + if (G1PolicyVerbose > 0) { + gclog_or_tty->print_cr(" Added " PTR_FORMAT " Young Regions to CS.", + _inc_cset_size); + gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)", + max_live_bytes/K); + } + + assert(_inc_cset_size == _g1->young_list()->length(), "Invariant"); + if (_inc_cset_size > 0) { + assert(_collection_set != NULL, "Invariant"); + abandon_collection = false; + } double young_end_time_sec = os::elapsedTime(); _recorded_young_cset_choice_time_ms = (young_end_time_sec - young_start_time_sec) * 1000.0; - non_young_start_time_sec = os::elapsedTime(); - - if (_young_cset_length > 0 && _last_young_gc_full) { + // We are doing young collections so reset this. + non_young_start_time_sec = young_end_time_sec; + + // Note we can use either _collection_set_size or + // _young_cset_length here + if (_collection_set_size > 0 && _last_young_gc_full) { // don't bother adding more regions... goto choose_collection_set_end; } @@ -3002,6 +2946,11 @@ bool should_continue = true; NumberSeq seq; double avg_prediction = 100000000000000000.0; // something very large + + // Save the current size of the collection set to detect + // if we actually added any old regions. + size_t n_young_regions = _collection_set_size; + do { hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms, avg_prediction); @@ -3010,7 +2959,7 @@ time_remaining_ms -= predicted_time_ms; predicted_pause_time_ms += predicted_time_ms; add_to_collection_set(hr); - record_cset_region(hr, false); + record_non_young_cset_region(hr); max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes); if (G1PolicyVerbose > 0) { gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)", @@ -3028,9 +2977,17 @@ if (!adaptive_young_list_length() && _collection_set_size < _young_list_fixed_length) _should_revert_to_full_young_gcs = true; + + if (_collection_set_size > n_young_regions) { + // We actually added old regions to the collection set + // so we are not abandoning this collection. + abandon_collection = false; + } } choose_collection_set_end: + stop_incremental_cset_building(); + count_CS_bytes_used(); end_recording_regions(); @@ -3038,6 +2995,8 @@ double non_young_end_time_sec = os::elapsedTime(); _recorded_non_young_cset_choice_time_ms = (non_young_end_time_sec - non_young_start_time_sec) * 1000.0; + + return abandon_collection; } void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() { diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +61,6 @@ define_num_seq(parallel) // parallel only define_num_seq(ext_root_scan) define_num_seq(mark_stack_scan) - define_num_seq(scan_only) define_num_seq(update_rs) define_num_seq(scan_rs) define_num_seq(scan_new_refs) // Only for temp use; added to @@ -174,8 +173,6 @@ double* _par_last_ext_root_scan_times_ms; double* _par_last_mark_stack_scan_times_ms; - double* _par_last_scan_only_times_ms; - double* _par_last_scan_only_regions_scanned; double* _par_last_update_rs_start_times_ms; double* _par_last_update_rs_times_ms; double* _par_last_update_rs_processed_buffers; @@ -196,7 +193,6 @@ bool _adaptive_young_list_length; size_t _young_list_min_length; size_t _young_list_target_length; - size_t _young_list_so_prefix_length; size_t _young_list_fixed_length; size_t _young_cset_length; @@ -215,6 +211,8 @@ SurvRateGroup* _survivor_surv_rate_group; // add here any more surv rate groups + double _gc_overhead_perc; + bool during_marking() { return _during_marking; } @@ -232,7 +230,6 @@ TruncatedSeq* _pending_card_diff_seq; TruncatedSeq* _rs_length_diff_seq; TruncatedSeq* _cost_per_card_ms_seq; - TruncatedSeq* _cost_per_scan_only_region_ms_seq; TruncatedSeq* _fully_young_cards_per_entry_ratio_seq; TruncatedSeq* _partially_young_cards_per_entry_ratio_seq; TruncatedSeq* _cost_per_entry_ms_seq; @@ -247,19 +244,16 @@ TruncatedSeq* _rs_lengths_seq; TruncatedSeq* _cost_per_byte_ms_during_cm_seq; - TruncatedSeq* _cost_per_scan_only_region_ms_during_cm_seq; TruncatedSeq* _young_gc_eff_seq; TruncatedSeq* _max_conc_overhead_seq; size_t _recorded_young_regions; - size_t _recorded_scan_only_regions; size_t _recorded_non_young_regions; size_t _recorded_region_num; size_t _free_regions_at_end_of_collection; - size_t _scan_only_regions_at_end_of_collection; size_t _recorded_rs_lengths; size_t _max_rs_lengths; @@ -275,7 +269,6 @@ double _predicted_survival_ratio; double _predicted_rs_update_time_ms; double _predicted_rs_scan_time_ms; - double _predicted_scan_only_scan_time_ms; double _predicted_object_copy_time_ms; double _predicted_constant_other_time_ms; double _predicted_young_other_time_ms; @@ -342,8 +335,6 @@ bool verify_young_ages(); #endif // PRODUCT - void tag_scan_only(size_t short_lived_scan_only_length); - double get_new_prediction(TruncatedSeq* seq) { return MAX2(seq->davg() + sigma() * seq->dsd(), seq->davg() * confidence_factor(seq->num())); @@ -429,23 +420,6 @@ get_new_prediction(_partially_young_cost_per_entry_ms_seq); } - double predict_scan_only_time_ms_during_cm(size_t scan_only_region_num) { - if (_cost_per_scan_only_region_ms_during_cm_seq->num() < 3) - return 1.5 * (double) scan_only_region_num * - get_new_prediction(_cost_per_scan_only_region_ms_seq); - else - return (double) scan_only_region_num * - get_new_prediction(_cost_per_scan_only_region_ms_during_cm_seq); - } - - double predict_scan_only_time_ms(size_t scan_only_region_num) { - if (_in_marking_window_im) - return predict_scan_only_time_ms_during_cm(scan_only_region_num); - else - return (double) scan_only_region_num * - get_new_prediction(_cost_per_scan_only_region_ms_seq); - } - double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) { if (_cost_per_byte_ms_during_cm_seq->num() < 3) return 1.1 * (double) bytes_to_copy * @@ -488,24 +462,21 @@ size_t predict_bytes_to_copy(HeapRegion* hr); double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); - // for use by: calculate_optimal_so_length(length) - void predict_gc_eff(size_t young_region_num, - size_t so_length, - double base_time_ms, - double *gc_eff, - double *pause_time_ms); - - // for use by: calculate_young_list_target_config(rs_length) - bool predict_gc_eff(size_t young_region_num, - size_t so_length, - double base_time_with_so_ms, - size_t init_free_regions, - double target_pause_time_ms, - double* gc_eff); + // for use by: calculate_young_list_target_length(rs_length) + bool predict_will_fit(size_t young_region_num, + double base_time_ms, + size_t init_free_regions, + double target_pause_time_ms); void start_recording_regions(); - void record_cset_region(HeapRegion* hr, bool young); - void record_scan_only_regions(size_t scan_only_length); + void record_cset_region_info(HeapRegion* hr, bool young); + void record_non_young_cset_region(HeapRegion* hr); + + void set_recorded_young_regions(size_t n_regions); + void set_recorded_young_bytes(size_t bytes); + void set_recorded_rs_lengths(size_t rs_lengths); + void set_predicted_bytes_to_copy(size_t bytes); + void end_recording_regions(); void record_vtime_diff_ms(double vtime_diff_ms) { @@ -636,11 +607,74 @@ void update_recent_gc_times(double end_time_sec, double elapsed_ms); // The head of the list (via "next_in_collection_set()") representing the - // current collection set. + // current collection set. Set from the incrementally built collection + // set at the start of the pause. HeapRegion* _collection_set; + + // The number of regions in the collection set. Set from the incrementally + // built collection set at the start of an evacuation pause. size_t _collection_set_size; + + // The number of bytes in the collection set before the pause. Set from + // the incrementally built collection set at the start of an evacuation + // pause. size_t _collection_set_bytes_used_before; + // The associated information that is maintained while the incremental + // collection set is being built with young regions. Used to populate + // the recorded info for the evacuation pause. + + enum CSetBuildType { + Active, // We are actively building the collection set + Inactive // We are not actively building the collection set + }; + + CSetBuildType _inc_cset_build_state; + + // The head of the incrementally built collection set. + HeapRegion* _inc_cset_head; + + // The tail of the incrementally built collection set. + HeapRegion* _inc_cset_tail; + + // The number of regions in the incrementally built collection set. + // Used to set _collection_set_size at the start of an evacuation + // pause. + size_t _inc_cset_size; + + // Used as the index in the surving young words structure + // which tracks the amount of space, for each young region, + // that survives the pause. + size_t _inc_cset_young_index; + + // The number of bytes in the incrementally built collection set. + // Used to set _collection_set_bytes_used_before at the start of + // an evacuation pause. + size_t _inc_cset_bytes_used_before; + + // Used to record the highest end of heap region in collection set + HeapWord* _inc_cset_max_finger; + + // The number of recorded used bytes in the young regions + // of the collection set. This is the sum of the used() bytes + // of retired young regions in the collection set. + size_t _inc_cset_recorded_young_bytes; + + // The RSet lengths recorded for regions in the collection set + // (updated by the periodic sampling of the regions in the + // young list/collection set). + size_t _inc_cset_recorded_rs_lengths; + + // The predicted elapsed time it will take to collect the regions + // in the collection set (updated by the periodic sampling of the + // regions in the young list/collection set). + double _inc_cset_predicted_elapsed_time_ms; + + // The predicted bytes to copy for the regions in the collection + // set (updated by the periodic sampling of the regions in the + // young list/collection set). + size_t _inc_cset_predicted_bytes_to_copy; + // Info about marking. int _n_marks; // Sticky at 2, so we know when we've done at least 2. @@ -722,11 +756,31 @@ size_t _n_marks_since_last_pause; - // True iff CM has been initiated. - bool _conc_mark_initiated; + // At the end of a pause we check the heap occupancy and we decide + // whether we will start a marking cycle during the next pause. If + // we decide that we want to do that, we will set this parameter to + // true. So, this parameter will stay true between the end of a + // pause and the beginning of a subsequent pause (not necessarily + // the next one, see the comments on the next field) when we decide + // that we will indeed start a marking cycle and do the initial-mark + // work. + volatile bool _initiate_conc_mark_if_possible; - // True iff CM should be initiated - bool _should_initiate_conc_mark; + // If initiate_conc_mark_if_possible() is set at the beginning of a + // pause, it is a suggestion that the pause should start a marking + // cycle by doing the initial-mark work. However, it is possible + // that the concurrent marking thread is still finishing up the + // previous marking cycle (e.g., clearing the next marking + // bitmap). If that is the case we cannot start a new cycle and + // we'll have to wait for the concurrent marking thread to finish + // what it is doing. In this case we will postpone the marking cycle + // initiation decision for the next pause. When we eventually decide + // to start a cycle, we will set _during_initial_mark_pause which + // will stay true until the end of the initial-mark pause and it's + // the condition that indicates that a pause is doing the + // initial-mark work. + volatile bool _during_initial_mark_pause; + bool _should_revert_to_full_young_gcs; bool _last_full_young_gc; @@ -739,9 +793,8 @@ double _mark_closure_time_ms; void calculate_young_list_min_length(); - void calculate_young_list_target_config(); - void calculate_young_list_target_config(size_t rs_lengths); - size_t calculate_optimal_so_length(size_t young_list_length); + void calculate_young_list_target_length(); + void calculate_young_list_target_length(size_t rs_lengths); public: @@ -846,11 +899,6 @@ _par_last_mark_stack_scan_times_ms[worker_i] = ms; } - void record_scan_only_time(int worker_i, double ms, int n) { - _par_last_scan_only_times_ms[worker_i] = ms; - _par_last_scan_only_regions_scanned[worker_i] = (double) n; - } - void record_satb_drain_time(double ms) { _cur_satb_drain_time_ms = ms; _satb_drain_time_set = true; @@ -965,23 +1013,82 @@ // Choose a new collection set. Marks the chosen regions as being // "in_collection_set", and links them together. The head and number of // the collection set are available via access methods. - virtual void choose_collection_set() = 0; - - void clear_collection_set() { _collection_set = NULL; } + virtual bool choose_collection_set() = 0; // The head of the list (via "next_in_collection_set()") representing the // current collection set. HeapRegion* collection_set() { return _collection_set; } + void clear_collection_set() { _collection_set = NULL; } + // The number of elements in the current collection set. size_t collection_set_size() { return _collection_set_size; } // Add "hr" to the CS. void add_to_collection_set(HeapRegion* hr); - bool should_initiate_conc_mark() { return _should_initiate_conc_mark; } - void set_should_initiate_conc_mark() { _should_initiate_conc_mark = true; } - void unset_should_initiate_conc_mark(){ _should_initiate_conc_mark = false; } + // Incremental CSet Support + + // The head of the incrementally built collection set. + HeapRegion* inc_cset_head() { return _inc_cset_head; } + + // The tail of the incrementally built collection set. + HeapRegion* inc_set_tail() { return _inc_cset_tail; } + + // The number of elements in the incrementally built collection set. + size_t inc_cset_size() { return _inc_cset_size; } + + // Initialize incremental collection set info. + void start_incremental_cset_building(); + + void clear_incremental_cset() { + _inc_cset_head = NULL; + _inc_cset_tail = NULL; + } + + // Stop adding regions to the incremental collection set + void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; } + + // Add/remove information about hr to the aggregated information + // for the incrementally built collection set. + void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length); + void remove_from_incremental_cset_info(HeapRegion* hr); + + // Update information about hr in the aggregated information for + // the incrementally built collection set. + void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length); + +private: + // Update the incremental cset information when adding a region + // (should not be called directly). + void add_region_to_incremental_cset_common(HeapRegion* hr); + +public: + // Add hr to the LHS of the incremental collection set. + void add_region_to_incremental_cset_lhs(HeapRegion* hr); + + // Add hr to the RHS of the incremental collection set. + void add_region_to_incremental_cset_rhs(HeapRegion* hr); + +#ifndef PRODUCT + void print_collection_set(HeapRegion* list_head, outputStream* st); +#endif // !PRODUCT + + bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } + void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; } + void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; } + + bool during_initial_mark_pause() { return _during_initial_mark_pause; } + void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } + void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } + + // This is called at the very beginning of an evacuation pause (it + // has to be the first thing that the pause does). If + // initiate_conc_mark_if_possible() is true, and the concurrent + // marking thread has completed its work during the previous cycle, + // it will set during_initial_mark_pause() to so that the pause does + // the initial-mark work and start a marking cycle. + void decide_on_conc_mark_initiation(); // If an expansion would be appropriate, because recent GC overhead had // exceeded the desired limit, return an amount to expand by. @@ -1157,7 +1264,7 @@ // If the estimated is less then desirable, resize if possible. void expand_if_possible(size_t numRegions); - virtual void choose_collection_set(); + virtual bool choose_collection_set(); virtual void record_collection_pause_start(double start_time_sec, size_t start_used); virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/g1MarkSweep.cpp --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,12 @@ bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); + SharedHeap* sh = SharedHeap::heap(); +#ifdef ASSERT + if (sh->collector_policy()->should_clear_all_soft_refs()) { + assert(clear_all_softrefs, "Policy should have been checked earler"); + } +#endif // hook up weak ref data so it can be used during Mark-Sweep assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); assert(rp != NULL, "should be non-NULL"); @@ -44,7 +50,6 @@ // Increment the invocation count for the permanent generation, since it is // implicitly collected whenever we do a full mark sweep collection. - SharedHeap* sh = SharedHeap::heap(); sh->perm_gen()->stat_record()->invocations++; bool marked_for_unloading = false; diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/g1_globals.hpp --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,9 +28,6 @@ #define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \ \ - product(intx, G1ParallelGCAllocBufferSize, 8*K, \ - "Size of parallel G1 allocation buffers in to-space.") \ - \ product(intx, G1ConfidencePercent, 50, \ "Confidence level for MMU/pause predictions") \ \ @@ -40,9 +37,6 @@ develop(bool, G1Gen, true, \ "If true, it will enable the generational G1") \ \ - develop(intx, G1GCPercent, 10, \ - "The desired percent time spent on GC") \ - \ develop(intx, G1PolicyVerbose, 0, \ "The verbosity level on G1 policy decisions") \ \ @@ -232,10 +226,6 @@ "the number of regions for which we'll print a surv rate " \ "summary.") \ \ - develop(bool, G1UseScanOnlyPrefix, false, \ - "It determines whether the system will calculate an optimum " \ - "scan-only set.") \ - \ product(intx, G1ReservePercent, 10, \ "It determines the minimum reserve we should have in the heap " \ "to minimize the probability of promotion failure.") \ @@ -270,11 +260,11 @@ product(uintx, G1HeapRegionSize, 0, \ "Size of the G1 regions.") \ \ - experimental(bool, G1UseParallelRSetUpdating, false, \ + experimental(bool, G1UseParallelRSetUpdating, true, \ "Enables the parallelization of remembered set updating " \ "during evacuation pauses") \ \ - experimental(bool, G1UseParallelRSetScanning, false, \ + experimental(bool, G1UseParallelRSetScanning, true, \ "Enables the parallelization of remembered set scanning " \ "during evacuation pauses") \ \ diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/heapRegion.cpp --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,6 +75,16 @@ virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop( oop* p) { do_oop_work(p); } + void print_object(outputStream* out, oop obj) { +#ifdef PRODUCT + klassOop k = obj->klass(); + const char* class_name = instanceKlass::cast(k)->external_name(); + out->print_cr("class name %s", class_name); +#else // PRODUCT + obj->print_on(out); +#endif // PRODUCT + } + template void do_oop_work(T* p) { assert(_containing_obj != NULL, "Precondition"); assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking), @@ -90,21 +100,29 @@ gclog_or_tty->print_cr("----------"); } if (!_g1h->is_in_closed_subset(obj)) { - gclog_or_tty->print_cr("Field "PTR_FORMAT - " of live obj "PTR_FORMAT - " points to obj "PTR_FORMAT - " not in the heap.", - p, (void*) _containing_obj, (void*) obj); - } else { + HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); gclog_or_tty->print_cr("Field "PTR_FORMAT - " of live obj "PTR_FORMAT - " points to dead obj "PTR_FORMAT".", - p, (void*) _containing_obj, (void*) obj); + " of live obj "PTR_FORMAT" in region " + "["PTR_FORMAT", "PTR_FORMAT")", + p, (void*) _containing_obj, + from->bottom(), from->end()); + print_object(gclog_or_tty, _containing_obj); + gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", + (void*) obj); + } else { + HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); + HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); + gclog_or_tty->print_cr("Field "PTR_FORMAT + " of live obj "PTR_FORMAT" in region " + "["PTR_FORMAT", "PTR_FORMAT")", + p, (void*) _containing_obj, + from->bottom(), from->end()); + print_object(gclog_or_tty, _containing_obj); + gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " + "["PTR_FORMAT", "PTR_FORMAT")", + (void*) obj, to->bottom(), to->end()); + print_object(gclog_or_tty, obj); } - gclog_or_tty->print_cr("Live obj:"); - _containing_obj->print_on(gclog_or_tty); - gclog_or_tty->print_cr("Bad referent:"); - obj->print_on(gclog_or_tty); gclog_or_tty->print_cr("----------"); _failures = true; failed = true; @@ -432,7 +450,9 @@ _young_type(NotYoung), _next_young_region(NULL), _next_dirty_cards_region(NULL), _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1), - _rem_set(NULL), _zfs(NotZeroFilled) + _rem_set(NULL), _zfs(NotZeroFilled), + _recorded_rs_length(0), _predicted_elapsed_time_ms(0), + _predicted_bytes_to_copy(0) { _orig_end = mr.end(); // Note that initialize() will set the start of the unmarked area of the @@ -715,7 +735,7 @@ else st->print(" "); if (is_young()) - st->print(is_scan_only() ? " SO" : (is_survivor() ? " SU" : " Y ")); + st->print(is_survivor() ? " SU" : " Y "); else st->print(" "); if (is_empty()) @@ -723,6 +743,8 @@ else st->print(" "); st->print(" %5d", _gc_time_stamp); + st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, + prev_top_at_mark_start(), next_top_at_mark_start()); G1OffsetTableContigSpace::print_on(st); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/heapRegion.hpp --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -247,7 +247,6 @@ enum YoungType { NotYoung, // a region is not young - ScanOnly, // a region is young and scan-only Young, // a region is young Survivor // a region is young and it contains // survivor @@ -292,6 +291,20 @@ _young_type = new_type; } + // Cached attributes used in the collection set policy information + + // The RSet length that was added to the total value + // for the collection set. + size_t _recorded_rs_length; + + // The predicted elapsed time that was added to total value + // for the collection set. + double _predicted_elapsed_time_ms; + + // The predicted number of bytes to copy that was added to + // the total value for the collection set. + size_t _predicted_bytes_to_copy; + public: // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros. HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray, @@ -614,7 +627,6 @@ // bool is_young() const { return _young_type != NotYoung; } - bool is_scan_only() const { return _young_type == ScanOnly; } bool is_survivor() const { return _young_type == Survivor; } int young_index_in_cset() const { return _young_index_in_cset; } @@ -629,12 +641,6 @@ return _surv_rate_group->age_in_group(_age_index); } - void recalculate_age_in_surv_rate_group() { - assert( _surv_rate_group != NULL, "pre-condition" ); - assert( _age_index > -1, "pre-condition" ); - _age_index = _surv_rate_group->recalculate_age_index(_age_index); - } - void record_surv_words_in_group(size_t words_survived) { assert( _surv_rate_group != NULL, "pre-condition" ); assert( _age_index > -1, "pre-condition" ); @@ -676,8 +682,6 @@ void set_young() { set_young_type(Young); } - void set_scan_only() { set_young_type(ScanOnly); } - void set_survivor() { set_young_type(Survivor); } void set_not_young() { set_young_type(NotYoung); } @@ -775,6 +779,22 @@ _zero_filler = NULL; } + size_t recorded_rs_length() const { return _recorded_rs_length; } + double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; } + size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; } + + void set_recorded_rs_length(size_t rs_length) { + _recorded_rs_length = rs_length; + } + + void set_predicted_elapsed_time_ms(double ms) { + _predicted_elapsed_time_ms = ms; + } + + void set_predicted_bytes_to_copy(size_t bytes) { + _predicted_bytes_to_copy = bytes; + } + #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL) diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Wed May 05 16:39:47 2010 -0700 @@ -662,8 +662,6 @@ prt = PosParPRT::alloc(from_hr); } prt->init(from_hr); - // Record the outgoing pointer in the from_region's outgoing bitmap. - from_hr->rem_set()->add_outgoing_reference(hr()); PosParPRT* first_prt = _fine_grain_regions[ind]; prt->set_next(first_prt); // XXX Maybe move to init? @@ -1073,11 +1071,7 @@ HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegion* hr) - : _bosa(bosa), _other_regions(hr), - _outgoing_region_map(G1CollectedHeap::heap()->max_regions(), - false /* in-resource-area */), - _iter_state(Unclaimed) -{} + : _bosa(bosa), _other_regions(hr), _iter_state(Unclaimed) { } void HeapRegionRemSet::setup_remset_size() { @@ -1148,30 +1142,11 @@ PosParPRT::par_contract_all(); } -void HeapRegionRemSet::add_outgoing_reference(HeapRegion* to_hr) { - _outgoing_region_map.par_at_put(to_hr->hrs_index(), 1); -} - void HeapRegionRemSet::clear() { - clear_outgoing_entries(); - _outgoing_region_map.clear(); _other_regions.clear(); assert(occupied() == 0, "Should be clear."); } -void HeapRegionRemSet::clear_outgoing_entries() { - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - size_t i = _outgoing_region_map.get_next_one_offset(0); - while (i < _outgoing_region_map.size()) { - HeapRegion* to_region = g1h->region_at(i); - if (!to_region->in_collection_set()) { - to_region->rem_set()->clear_incoming_entry(hr()); - } - i = _outgoing_region_map.get_next_one_offset(i+1); - } -} - - void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm) { _other_regions.scrub(ctbs, region_bm, card_bm); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Wed May 05 16:39:47 2010 -0700 @@ -179,13 +179,6 @@ OtherRegionsTable _other_regions; - // One set bit for every region that has an entry for this one. - BitMap _outgoing_region_map; - - // Clear entries for the current region in any rem sets named in - // the _outgoing_region_map. - void clear_outgoing_entries(); - enum ParIterState { Unclaimed, Claimed, Complete }; volatile ParIterState _iter_state; volatile jlong _iter_claimed; @@ -243,10 +236,6 @@ _other_regions.add_reference(from, tid); } - // Records the fact that the current region contains an outgoing - // reference into "to_hr". - void add_outgoing_reference(HeapRegion* to_hr); - // Removes any entries shown by the given bitmaps to contain only dead // objects. void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/survRateGroup.cpp --- a/src/share/vm/gc_implementation/g1/survRateGroup.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/survRateGroup.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,7 +55,6 @@ void SurvRateGroup::reset() { _all_regions_allocated = 0; - _scan_only_prefix = 0; _setup_seq_num = 0; _stats_arrays_length = 0; _accum_surv_rate = 0.0; @@ -74,7 +73,7 @@ void SurvRateGroup::start_adding_regions() { _setup_seq_num = _stats_arrays_length; - _region_num = _scan_only_prefix; + _region_num = 0; _accum_surv_rate = 0.0; #if 0 @@ -164,12 +163,6 @@ } void -SurvRateGroup::record_scan_only_prefix(size_t scan_only_prefix) { - guarantee( scan_only_prefix <= _region_num, "pre-condition" ); - _scan_only_prefix = scan_only_prefix; -} - -void SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) { guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num, "pre-condition" ); @@ -218,13 +211,12 @@ #ifndef PRODUCT void SurvRateGroup::print() { - gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries, %d scan-only)", - _name, _region_num, _scan_only_prefix); + gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries)", + _name, _region_num); for (size_t i = 0; i < _region_num; ++i) { - gclog_or_tty->print_cr(" age %4d surv rate %6.2lf %% pred %6.2lf %%%s", + gclog_or_tty->print_cr(" age %4d surv rate %6.2lf %% pred %6.2lf %%", i, _surv_rate[i] * 100.0, - _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0, - (i < _scan_only_prefix) ? " S-O" : " "); + _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0); } } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/g1/survRateGroup.hpp --- a/src/share/vm/gc_implementation/g1/survRateGroup.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/g1/survRateGroup.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,6 @@ int _all_regions_allocated; size_t _region_num; - size_t _scan_only_prefix; size_t _setup_seq_num; public: @@ -51,13 +50,11 @@ void reset(); void start_adding_regions(); void stop_adding_regions(); - void record_scan_only_prefix(size_t scan_only_prefix); void record_surviving_words(int age_in_group, size_t surv_words); void all_surviving_words_recorded(bool propagate); const char* name() { return _name; } size_t region_num() { return _region_num; } - size_t scan_only_length() { return _scan_only_prefix; } double accum_surv_rate_pred(int age) { assert(age >= 0, "must be"); if ((size_t)age < _stats_arrays_length) @@ -82,17 +79,12 @@ int next_age_index(); int age_in_group(int age_index) { - int ret = (int) (_all_regions_allocated - age_index); + int ret = (int) (_all_regions_allocated - age_index); assert( ret >= 0, "invariant" ); return ret; } - int recalculate_age_index(int age_index) { - int new_age_index = (int) _scan_only_prefix - age_in_group(age_index); - guarantee( new_age_index >= 0, "invariant" ); - return new_age_index; - } void finished_recalculating_age_indexes() { - _all_regions_allocated = (int) _scan_only_prefix; + _all_regions_allocated = 0; } #ifndef PRODUCT diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/includeDB_gc_parallelScavenge --- a/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ // -// Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -161,8 +161,10 @@ parMarkBitMap.hpp bitMap.inline.hpp parMarkBitMap.hpp psVirtualspace.hpp +psAdaptiveSizePolicy.cpp collectorPolicy.hpp psAdaptiveSizePolicy.cpp gcPolicyCounters.hpp psAdaptiveSizePolicy.cpp gcCause.hpp +psAdaptiveSizePolicy.cpp generationSizer.hpp psAdaptiveSizePolicy.cpp psAdaptiveSizePolicy.hpp psAdaptiveSizePolicy.cpp psGCAdaptivePolicyCounters.hpp psAdaptiveSizePolicy.cpp psScavenge.hpp @@ -215,6 +217,7 @@ psMarkSweep.cpp fprofiler.hpp psMarkSweep.cpp gcCause.hpp psMarkSweep.cpp gcLocker.inline.hpp +psMarkSweep.cpp generationSizer.hpp psMarkSweep.cpp isGCActiveMark.hpp psMarkSweep.cpp oop.inline.hpp psMarkSweep.cpp memoryService.hpp @@ -256,6 +259,7 @@ psParallelCompact.cpp gcCause.hpp psParallelCompact.cpp gcLocker.inline.hpp psParallelCompact.cpp gcTaskManager.hpp +psParallelCompact.cpp generationSizer.hpp psParallelCompact.cpp isGCActiveMark.hpp psParallelCompact.cpp management.hpp psParallelCompact.cpp memoryService.hpp @@ -344,10 +348,12 @@ psScavenge.cpp psAdaptiveSizePolicy.hpp psScavenge.cpp biasedLocking.hpp psScavenge.cpp cardTableExtension.hpp +psScavenge.cpp collectorPolicy.hpp psScavenge.cpp fprofiler.hpp psScavenge.cpp gcCause.hpp psScavenge.cpp gcLocker.inline.hpp psScavenge.cpp gcTaskManager.hpp +psScavenge.cpp generationSizer.hpp psScavenge.cpp handles.inline.hpp psScavenge.cpp isGCActiveMark.hpp psScavenge.cpp oop.inline.hpp diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/includeDB_gc_serial --- a/src/share/vm/gc_implementation/includeDB_gc_serial Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/includeDB_gc_serial Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ // -// Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved. +// Copyright 2007-2010 Sun Microsystems, Inc. All Rights Reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ adaptiveSizePolicy.hpp universe.hpp adaptiveSizePolicy.cpp adaptiveSizePolicy.hpp +adaptiveSizePolicy.cpp collectorPolicy.hpp adaptiveSizePolicy.cpp gcCause.hpp adaptiveSizePolicy.cpp ostream.hpp adaptiveSizePolicy.cpp timer.hpp diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp --- a/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp Wed May 05 16:39:47 2010 -0700 @@ -325,7 +325,7 @@ eden_size = align_size_down(eden_size, alignment); eden_end = eden_start + eden_size; - assert(eden_end >= eden_start, "addition overflowed") + assert(eden_end >= eden_start, "addition overflowed"); // To may resize into from space as long as it is clear of live data. // From space must remain page aligned, though, so we need to do some diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parNew/parNewGeneration.cpp --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -892,6 +892,10 @@ } swap_spaces(); + // A successful scavenge should restart the GC time limit count which is + // for full GC's. + size_policy->reset_gc_overhead_limit_count(); + assert(to()->is_empty(), "to space should be empty now"); } else { assert(HandlePromotionFailure, diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp Wed May 05 16:39:47 2010 -0700 @@ -326,7 +326,7 @@ } eden_end = eden_start + eden_size; - assert(eden_end >= eden_start, "addition overflowed") + assert(eden_end >= eden_start, "addition overflowed"); // To may resize into from space as long as it is clear of live data. // From space must remain page aligned, though, so we need to do some @@ -413,7 +413,7 @@ pointer_delta(to_start, eden_start, sizeof(char))); } eden_end = eden_start + eden_size; - assert(eden_end >= eden_start, "addition overflowed") + assert(eden_end >= eden_start, "addition overflowed"); // Don't let eden shrink down to 0 or less. eden_end = MAX2(eden_end, eden_start + alignment); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,15 +54,16 @@ CollectedHeap::pre_initialize(); // Cannot be initialized until after the flags are parsed - GenerationSizer flag_parser; + // GenerationSizer flag_parser; + _collector_policy = new GenerationSizer(); - size_t yg_min_size = flag_parser.min_young_gen_size(); - size_t yg_max_size = flag_parser.max_young_gen_size(); - size_t og_min_size = flag_parser.min_old_gen_size(); - size_t og_max_size = flag_parser.max_old_gen_size(); + size_t yg_min_size = _collector_policy->min_young_gen_size(); + size_t yg_max_size = _collector_policy->max_young_gen_size(); + size_t og_min_size = _collector_policy->min_old_gen_size(); + size_t og_max_size = _collector_policy->max_old_gen_size(); // Why isn't there a min_perm_gen_size()? - size_t pg_min_size = flag_parser.perm_gen_size(); - size_t pg_max_size = flag_parser.max_perm_gen_size(); + size_t pg_min_size = _collector_policy->perm_gen_size(); + size_t pg_max_size = _collector_policy->max_perm_gen_size(); trace_gen_sizes("ps heap raw", pg_min_size, pg_max_size, @@ -89,12 +90,14 @@ // move to the common code. yg_min_size = align_size_up(yg_min_size, yg_align); yg_max_size = align_size_up(yg_max_size, yg_align); - size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align); + size_t yg_cur_size = + align_size_up(_collector_policy->young_gen_size(), yg_align); yg_cur_size = MAX2(yg_cur_size, yg_min_size); og_min_size = align_size_up(og_min_size, og_align); og_max_size = align_size_up(og_max_size, og_align); - size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align); + size_t og_cur_size = + align_size_up(_collector_policy->old_gen_size(), og_align); og_cur_size = MAX2(og_cur_size, og_min_size); pg_min_size = align_size_up(pg_min_size, pg_align); @@ -355,6 +358,11 @@ assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); + // In general gc_overhead_limit_was_exceeded should be false so + // set it so here and reset it to true only if the gc time + // limit is being exceeded as checked below. + *gc_overhead_limit_was_exceeded = false; + HeapWord* result = young_gen()->allocate(size, is_tlab); uint loop_count = 0; @@ -428,24 +436,6 @@ if (result == NULL) { - // Exit the loop if if the gc time limit has been exceeded. - // The allocation must have failed above (result must be NULL), - // and the most recent collection must have exceeded the - // gc time limit. Exit the loop so that an out-of-memory - // will be thrown (returning a NULL will do that), but - // clear gc_time_limit_exceeded so that the next collection - // will succeeded if the applications decides to handle the - // out-of-memory and tries to go on. - *gc_overhead_limit_was_exceeded = size_policy()->gc_time_limit_exceeded(); - if (size_policy()->gc_time_limit_exceeded()) { - size_policy()->set_gc_time_limit_exceeded(false); - if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: " - "return NULL because gc_time_limit_exceeded is set"); - } - return NULL; - } - // Generate a VM operation VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count); VMThread::execute(&op); @@ -463,16 +453,34 @@ assert(op.result() == NULL, "must be NULL if gc_locked() is true"); continue; // retry and/or stall as necessary } - // If a NULL result is being returned, an out-of-memory - // will be thrown now. Clear the gc_time_limit_exceeded - // flag to avoid the following situation. - // gc_time_limit_exceeded is set during a collection - // the collection fails to return enough space and an OOM is thrown - // the next GC is skipped because the gc_time_limit_exceeded - // flag is set and another OOM is thrown - if (op.result() == NULL) { - size_policy()->set_gc_time_limit_exceeded(false); + + // Exit the loop if the gc time limit has been exceeded. + // The allocation must have failed above ("result" guarding + // this path is NULL) and the most recent collection has exceeded the + // gc overhead limit (although enough may have been collected to + // satisfy the allocation). Exit the loop so that an out-of-memory + // will be thrown (return a NULL ignoring the contents of + // op.result()), + // but clear gc_overhead_limit_exceeded so that the next collection + // starts with a clean slate (i.e., forgets about previous overhead + // excesses). Fill op.result() with a filler object so that the + // heap remains parsable. + const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); + const bool softrefs_clear = collector_policy()->all_soft_refs_clear(); + assert(!limit_exceeded || softrefs_clear, "Should have been cleared"); + if (limit_exceeded && softrefs_clear) { + *gc_overhead_limit_was_exceeded = true; + size_policy()->set_gc_overhead_limit_exceeded(false); + if (PrintGCDetails && Verbose) { + gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: " + "return NULL because gc_overhead_limit_exceeded is set"); + } + if (op.result() != NULL) { + CollectedHeap::fill_with_object(op.result(), size); + } + return NULL; } + return op.result(); } } @@ -613,14 +621,15 @@ // and the most recent collection must have exceeded the // gc time limit. Exit the loop so that an out-of-memory // will be thrown (returning a NULL will do that), but - // clear gc_time_limit_exceeded so that the next collection + // clear gc_overhead_limit_exceeded so that the next collection // will succeeded if the applications decides to handle the // out-of-memory and tries to go on. - if (size_policy()->gc_time_limit_exceeded()) { - size_policy()->set_gc_time_limit_exceeded(false); + const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); + if (limit_exceeded) { + size_policy()->set_gc_overhead_limit_exceeded(false); if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: " - "return NULL because gc_time_limit_exceeded is set"); + gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:" + " return NULL because gc_overhead_limit_exceeded is set"); } assert(result == NULL, "Allocation did not fail"); return NULL; @@ -643,14 +652,15 @@ continue; // retry and/or stall as necessary } // If a NULL results is being returned, an out-of-memory - // will be thrown now. Clear the gc_time_limit_exceeded + // will be thrown now. Clear the gc_overhead_limit_exceeded // flag to avoid the following situation. - // gc_time_limit_exceeded is set during a collection + // gc_overhead_limit_exceeded is set during a collection // the collection fails to return enough space and an OOM is thrown - // the next GC is skipped because the gc_time_limit_exceeded - // flag is set and another OOM is thrown + // a subsequent GC prematurely throws an out-of-memory because + // the gc_overhead_limit_exceeded counts did not start + // again from 0. if (op.result() == NULL) { - size_policy()->set_gc_time_limit_exceeded(false); + size_policy()->reset_gc_overhead_limit_count(); } return op.result(); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,8 @@ class AdjoiningGenerations; class GCTaskManager; class PSAdaptiveSizePolicy; +class GenerationSizer; +class CollectorPolicy; class ParallelScavengeHeap : public CollectedHeap { friend class VMStructs; @@ -43,6 +45,8 @@ size_t _young_gen_alignment; size_t _old_gen_alignment; + GenerationSizer* _collector_policy; + inline size_t set_alignment(size_t& var, size_t val); // Collection of generations that are adjacent in the @@ -72,6 +76,9 @@ return CollectedHeap::ParallelScavengeHeap; } +CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; } + // GenerationSizer* collector_policy() const { return _collector_policy; } + static PSYoungGen* young_gen() { return _young_gen; } static PSOldGen* old_gen() { return _old_gen; } static PSPermGen* perm_gen() { return _perm_gen; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -184,18 +184,19 @@ set_change_young_gen_for_maj_pauses(0); } - // If this is not a full GC, only test and modify the young generation. -void PSAdaptiveSizePolicy::compute_generation_free_space(size_t young_live, - size_t eden_live, - size_t old_live, - size_t perm_live, - size_t cur_eden, - size_t max_old_gen_size, - size_t max_eden_size, - bool is_full_gc, - GCCause::Cause gc_cause) { +void PSAdaptiveSizePolicy::compute_generation_free_space( + size_t young_live, + size_t eden_live, + size_t old_live, + size_t perm_live, + size_t cur_eden, + size_t max_old_gen_size, + size_t max_eden_size, + bool is_full_gc, + GCCause::Cause gc_cause, + CollectorPolicy* collector_policy) { // Update statistics // Time statistics are updated as we go, update footprint stats here @@ -380,91 +381,16 @@ // Is too much time being spent in GC? // Is the heap trying to grow beyond it's limits? - const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average()); + const size_t free_in_old_gen = + (size_t)(max_old_gen_size - avg_old_live()->average()); if (desired_promo_size > free_in_old_gen && desired_eden_size > eden_limit) { - - // eden_limit is the upper limit on the size of eden based on - // the maximum size of the young generation and the sizes - // of the survivor space. - // The question being asked is whether the gc costs are high - // and the space being recovered by a collection is low. - // free_in_young_gen is the free space in the young generation - // after a collection and promo_live is the free space in the old - // generation after a collection. - // - // Use the minimum of the current value of the live in the - // young gen or the average of the live in the young gen. - // If the current value drops quickly, that should be taken - // into account (i.e., don't trigger if the amount of free - // space has suddenly jumped up). If the current is much - // higher than the average, use the average since it represents - // the longer term behavor. - const size_t live_in_eden = MIN2(eden_live, (size_t) avg_eden_live()->average()); - const size_t free_in_eden = eden_limit > live_in_eden ? - eden_limit - live_in_eden : 0; - const size_t total_free_limit = free_in_old_gen + free_in_eden; - const size_t total_mem = max_old_gen_size + max_eden_size; - const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0); - if (PrintAdaptiveSizePolicy && (Verbose || - (total_free_limit < (size_t) mem_free_limit))) { - gclog_or_tty->print_cr( - "PSAdaptiveSizePolicy::compute_generation_free_space limits:" - " promo_limit: " SIZE_FORMAT - " eden_limit: " SIZE_FORMAT - " total_free_limit: " SIZE_FORMAT - " max_old_gen_size: " SIZE_FORMAT - " max_eden_size: " SIZE_FORMAT - " mem_free_limit: " SIZE_FORMAT, - promo_limit, eden_limit, total_free_limit, - max_old_gen_size, max_eden_size, - (size_t) mem_free_limit); - } - - if (is_full_gc) { - if (gc_cost() > gc_cost_limit && - total_free_limit < (size_t) mem_free_limit) { - // Collections, on average, are taking too much time, and - // gc_cost() > gc_cost_limit - // we have too little space available after a full gc. - // total_free_limit < mem_free_limit - // where - // total_free_limit is the free space available in - // both generations - // total_mem is the total space available for allocation - // in both generations (survivor spaces are not included - // just as they are not included in eden_limit). - // mem_free_limit is a fraction of total_mem judged to be an - // acceptable amount that is still unused. - // The heap can ask for the value of this variable when deciding - // whether to thrown an OutOfMemory error. - // Note that the gc time limit test only works for the collections - // of the young gen + tenured gen and not for collections of the - // permanent gen. That is because the calculation of the space - // freed by the collection is the free space in the young gen + - // tenured gen. - // Ignore explicit GC's. Ignoring explicit GC's at this level - // is the equivalent of the GC did not happen as far as the - // overhead calculation is concerted (i.e., the flag is not set - // and the count is not affected). Also the average will not - // have been updated unless UseAdaptiveSizePolicyWithSystemGC is on. - if (!GCCause::is_user_requested_gc(gc_cause) && - !GCCause::is_serviceability_requested_gc(gc_cause)) { - inc_gc_time_limit_count(); - if (UseGCOverheadLimit && - (gc_time_limit_count() > AdaptiveSizePolicyGCTimeLimitThreshold)){ - // All conditions have been met for throwing an out-of-memory - _gc_time_limit_exceeded = true; - // Avoid consecutive OOM due to the gc time limit by resetting - // the counter. - reset_gc_time_limit_count(); - } - _print_gc_time_limit_would_be_exceeded = true; - } - } else { - // Did not exceed overhead limits - reset_gc_time_limit_count(); - } - } + check_gc_overhead_limit(young_live, + eden_live, + max_old_gen_size, + max_eden_size, + is_full_gc, + gc_cause, + collector_policy); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2002-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,6 +45,7 @@ // Forward decls class elapsedTimer; +class GenerationSizer; class PSAdaptiveSizePolicy : public AdaptiveSizePolicy { friend class PSGCAdaptivePolicyCounters; @@ -340,7 +341,8 @@ size_t max_old_gen_size, size_t max_eden_size, bool is_full_gc, - GCCause::Cause gc_cause); + GCCause::Cause gc_cause, + CollectorPolicy* collector_policy); // Calculates new survivor space size; returns a new tenuring threshold // value. Stores new survivor size in _survivor_size. diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -117,11 +117,13 @@ PerfData::U_Bytes, (jlong) ps_size_policy()->avg_base_footprint()->average(), CHECK); cname = PerfDataManager::counter_name(name_space(), "gcTimeLimitExceeded"); - _gc_time_limit_exceeded = PerfDataManager::create_variable(SUN_GC, cname, - PerfData::U_Events, ps_size_policy()->gc_time_limit_exceeded(), CHECK); + _gc_overhead_limit_exceeded_counter = + PerfDataManager::create_variable(SUN_GC, cname, + PerfData::U_Events, ps_size_policy()->gc_overhead_limit_exceeded(), CHECK); cname = PerfDataManager::counter_name(name_space(), "liveAtLastFullGc"); - _live_at_last_full_gc = PerfDataManager::create_variable(SUN_GC, cname, + _live_at_last_full_gc_counter = + PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, ps_size_policy()->live_at_last_full_gc(), CHECK); cname = PerfDataManager::counter_name(name_space(), "majorPauseOldSlope"); @@ -189,6 +191,8 @@ update_minor_pause_old_slope(); update_major_pause_young_slope(); update_minor_collection_slope_counter(); + update_gc_overhead_limit_exceeded_counter(); + update_live_at_last_full_gc_counter(); } } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,8 +44,8 @@ PerfVariable* _live_space; PerfVariable* _free_space; PerfVariable* _avg_base_footprint; - PerfVariable* _gc_time_limit_exceeded; - PerfVariable* _live_at_last_full_gc; + PerfVariable* _gc_overhead_limit_exceeded_counter; + PerfVariable* _live_at_last_full_gc_counter; PerfVariable* _old_capacity; PerfVariable* _boundary_moved; @@ -169,6 +169,14 @@ (jlong)(ps_size_policy()->major_pause_young_slope() * 1000) ); } + inline void update_gc_overhead_limit_exceeded_counter() { + _gc_overhead_limit_exceeded_counter->set_value( + (jlong) ps_size_policy()->gc_overhead_limit_exceeded()); + } + inline void update_live_at_last_full_gc_counter() { + _live_at_last_full_gc_counter->set_value( + (jlong)(ps_size_policy()->live_at_last_full_gc())); + } inline void update_scavenge_skipped(int cause) { _scavenge_skipped->set_value(cause); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,6 +46,12 @@ // // Note that this method should only be called from the vm_thread while // at a safepoint! +// +// Note that the all_soft_refs_clear flag in the collector policy +// may be true because this method can be called without intervening +// activity. For example when the heap space is tight and full measure +// are being taken to free space. + void PSMarkSweep::invoke(bool maximum_heap_compaction) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); @@ -54,24 +60,18 @@ ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); GCCause::Cause gc_cause = heap->gc_cause(); PSAdaptiveSizePolicy* policy = heap->size_policy(); + IsGCActiveMark mark; - // Before each allocation/collection attempt, find out from the - // policy object if GCs are, on the whole, taking too long. If so, - // bail out without attempting a collection. The exceptions are - // for explicitly requested GC's. - if (!policy->gc_time_limit_exceeded() || - GCCause::is_user_requested_gc(gc_cause) || - GCCause::is_serviceability_requested_gc(gc_cause)) { - IsGCActiveMark mark; + if (ScavengeBeforeFullGC) { + PSScavenge::invoke_no_policy(); + } - if (ScavengeBeforeFullGC) { - PSScavenge::invoke_no_policy(); - } + const bool clear_all_soft_refs = + heap->collector_policy()->should_clear_all_soft_refs(); - int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount; - IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); - PSMarkSweep::invoke_no_policy(maximum_heap_compaction); - } + int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount; + IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); + PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction); } // This method contains no policy. You should probably @@ -89,6 +89,10 @@ assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); + // The scope of casr should end after code that can change + // CollectorPolicy::_should_clear_all_soft_refs. + ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); + PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSPermGen* perm_gen = heap->perm_gen(); @@ -275,7 +279,8 @@ old_gen->max_gen_size(), max_eden_size, true /* full gc*/, - gc_cause); + gc_cause, + heap->collector_policy()); heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); @@ -326,19 +331,6 @@ // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); - - if (PrintGCDetails) { - if (size_policy->print_gc_time_limit_would_be_exceeded()) { - if (size_policy->gc_time_limit_exceeded()) { - gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit " - "of %d%%", GCTimeLimit); - } else { - gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit " - "of %d%%", GCTimeLimit); - } - } - size_policy->set_print_gc_time_limit_would_be_exceeded(false); - } } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Wed May 05 16:39:47 2010 -0700 @@ -65,7 +65,7 @@ // and releasing the heap lock, which is held during gc's anyway. This method is not // safe for use at the same time as allocate_noexpand()! HeapWord* cas_allocate_noexpand(size_t word_size) { - assert(SafepointSynchronize::is_at_safepoint(), "Must only be called at safepoint") + assert(SafepointSynchronize::is_at_safepoint(), "Must only be called at safepoint"); HeapWord* res = object_space()->cas_allocate(word_size); if (res != NULL) { _start_array.allocate_block(res); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1923,31 +1923,32 @@ // // Note that this method should only be called from the vm_thread while at a // safepoint. +// +// Note that the all_soft_refs_clear flag in the collector policy +// may be true because this method can be called without intervening +// activity. For example when the heap space is tight and full measure +// are being taken to free space. void PSParallelCompact::invoke(bool maximum_heap_compaction) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); + ParallelScavengeHeap* heap = gc_heap(); GCCause::Cause gc_cause = heap->gc_cause(); assert(!heap->is_gc_active(), "not reentrant"); PSAdaptiveSizePolicy* policy = heap->size_policy(); - - // Before each allocation/collection attempt, find out from the - // policy object if GCs are, on the whole, taking too long. If so, - // bail out without attempting a collection. The exceptions are - // for explicitly requested GC's. - if (!policy->gc_time_limit_exceeded() || - GCCause::is_user_requested_gc(gc_cause) || - GCCause::is_serviceability_requested_gc(gc_cause)) { - IsGCActiveMark mark; - - if (ScavengeBeforeFullGC) { - PSScavenge::invoke_no_policy(); - } - - PSParallelCompact::invoke_no_policy(maximum_heap_compaction); + IsGCActiveMark mark; + + if (ScavengeBeforeFullGC) { + PSScavenge::invoke_no_policy(); } + + const bool clear_all_soft_refs = + heap->collector_policy()->should_clear_all_soft_refs(); + + PSParallelCompact::invoke_no_policy(clear_all_soft_refs || + maximum_heap_compaction); } bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) { @@ -1976,6 +1977,11 @@ PSPermGen* perm_gen = heap->perm_gen(); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); + // The scope of casr should end after code that can change + // CollectorPolicy::_should_clear_all_soft_refs. + ClearedAllSoftRefs casr(maximum_heap_compaction, + heap->collector_policy()); + if (ZapUnusedHeapArea) { // Save information needed to minimize mangling heap->record_gen_tops_before_GC(); @@ -2109,7 +2115,8 @@ old_gen->max_gen_size(), max_eden_size, true /* full gc*/, - gc_cause); + gc_cause, + heap->collector_policy()); heap->resize_old_gen( size_policy->calculated_old_free_size_in_bytes()); @@ -2157,19 +2164,6 @@ // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); - - if (PrintGCDetails) { - if (size_policy->print_gc_time_limit_would_be_exceeded()) { - if (size_policy->gc_time_limit_exceeded()) { - gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit " - "of %d%%", GCTimeLimit); - } else { - gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit " - "of %d%%", GCTimeLimit); - } - } - size_policy->set_print_gc_time_limit_would_be_exceeded(false); - } } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { @@ -3283,7 +3277,7 @@ if (status == ParMarkBitMap::incomplete) { // The last obj that starts in the source region does not end in the // region. - assert(closure.source() < end_addr, "sanity") + assert(closure.source() < end_addr, "sanity"); HeapWord* const obj_beg = closure.source(); HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(), src_space_top); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -187,8 +187,7 @@ // // Note that this method should only be called from the vm_thread while // at a safepoint! -void PSScavenge::invoke() -{ +void PSScavenge::invoke() { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(!Universe::heap()->is_gc_active(), "not reentrant"); @@ -197,29 +196,25 @@ assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); PSAdaptiveSizePolicy* policy = heap->size_policy(); + IsGCActiveMark mark; - // Before each allocation/collection attempt, find out from the - // policy object if GCs are, on the whole, taking too long. If so, - // bail out without attempting a collection. - if (!policy->gc_time_limit_exceeded()) { - IsGCActiveMark mark; + bool scavenge_was_done = PSScavenge::invoke_no_policy(); - bool scavenge_was_done = PSScavenge::invoke_no_policy(); - - PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); + PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); + if (UsePerfData) + counters->update_full_follows_scavenge(0); + if (!scavenge_was_done || + policy->should_full_GC(heap->old_gen()->free_in_bytes())) { if (UsePerfData) - counters->update_full_follows_scavenge(0); - if (!scavenge_was_done || - policy->should_full_GC(heap->old_gen()->free_in_bytes())) { - if (UsePerfData) - counters->update_full_follows_scavenge(full_follows_scavenge); + counters->update_full_follows_scavenge(full_follows_scavenge); + GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); + CollectorPolicy* cp = heap->collector_policy(); + const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); - GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); - if (UseParallelOldGC) { - PSParallelCompact::invoke_no_policy(false); - } else { - PSMarkSweep::invoke_no_policy(false); - } + if (UseParallelOldGC) { + PSParallelCompact::invoke_no_policy(clear_all_softrefs); + } else { + PSMarkSweep::invoke_no_policy(clear_all_softrefs); } } } @@ -447,6 +442,9 @@ size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; size_policy->update_averages(_survivor_overflow, survived, promoted); + // A successful scavenge should restart the GC time limit count which is + // for full GC's. + size_policy->reset_gc_overhead_limit_count(); if (UseAdaptiveSizePolicy) { // Calculate the new survivor size and tenuring threshold @@ -523,7 +521,8 @@ old_gen->max_gen_size(), max_eden_size, false /* full gc*/, - gc_cause); + gc_cause, + heap->collector_policy()); } // Resize the young generation at every collection diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Wed May 05 16:39:47 2010 -0700 @@ -521,7 +521,7 @@ } eden_end = eden_start + eden_size; - assert(eden_end >= eden_start, "addition overflowed") + assert(eden_end >= eden_start, "addition overflowed"); // To may resize into from space as long as it is clear of live data. // From space must remain page aligned, though, so we need to do some @@ -605,7 +605,7 @@ pointer_delta(to_start, eden_start, sizeof(char))); } eden_end = eden_start + eden_size; - assert(eden_end >= eden_start, "addition overflowed") + assert(eden_end >= eden_start, "addition overflowed"); // Could choose to not let eden shrink // to_start = MAX2(to_start, eden_end); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp --- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2004-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2004-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,13 +44,15 @@ _survivor_size(init_survivor_size), _gc_pause_goal_sec(gc_pause_goal_sec), _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))), - _gc_time_limit_exceeded(false), - _print_gc_time_limit_would_be_exceeded(false), - _gc_time_limit_count(0), + _gc_overhead_limit_exceeded(false), + _print_gc_overhead_limit_would_be_exceeded(false), + _gc_overhead_limit_count(0), _latest_minor_mutator_interval_seconds(0), _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0), _young_gen_change_for_minor_throughput(0), _old_gen_change_for_major_throughput(0) { + assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0, + "No opportunity to clear SoftReferences before GC overhead limit"); _avg_minor_pause = new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding); _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight); @@ -278,6 +280,147 @@ set_decide_at_full_gc(0); } +void AdaptiveSizePolicy::check_gc_overhead_limit( + size_t young_live, + size_t eden_live, + size_t max_old_gen_size, + size_t max_eden_size, + bool is_full_gc, + GCCause::Cause gc_cause, + CollectorPolicy* collector_policy) { + + // Ignore explicit GC's. Exiting here does not set the flag and + // does not reset the count. Updating of the averages for system + // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC. + if (GCCause::is_user_requested_gc(gc_cause) || + GCCause::is_serviceability_requested_gc(gc_cause)) { + return; + } + // eden_limit is the upper limit on the size of eden based on + // the maximum size of the young generation and the sizes + // of the survivor space. + // The question being asked is whether the gc costs are high + // and the space being recovered by a collection is low. + // free_in_young_gen is the free space in the young generation + // after a collection and promo_live is the free space in the old + // generation after a collection. + // + // Use the minimum of the current value of the live in the + // young gen or the average of the live in the young gen. + // If the current value drops quickly, that should be taken + // into account (i.e., don't trigger if the amount of free + // space has suddenly jumped up). If the current is much + // higher than the average, use the average since it represents + // the longer term behavor. + const size_t live_in_eden = + MIN2(eden_live, (size_t) avg_eden_live()->average()); + const size_t free_in_eden = max_eden_size > live_in_eden ? + max_eden_size - live_in_eden : 0; + const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average()); + const size_t total_free_limit = free_in_old_gen + free_in_eden; + const size_t total_mem = max_old_gen_size + max_eden_size; + const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0); + const double mem_free_old_limit = max_old_gen_size * (GCHeapFreeLimit/100.0); + const double mem_free_eden_limit = max_eden_size * (GCHeapFreeLimit/100.0); + const double gc_cost_limit = GCTimeLimit/100.0; + size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average()); + // But don't force a promo size below the current promo size. Otherwise, + // the promo size will shrink for no good reason. + promo_limit = MAX2(promo_limit, _promo_size); + + + if (PrintAdaptiveSizePolicy && (Verbose || + (free_in_old_gen < (size_t) mem_free_old_limit && + free_in_eden < (size_t) mem_free_eden_limit))) { + gclog_or_tty->print_cr( + "PSAdaptiveSizePolicy::compute_generation_free_space limits:" + " promo_limit: " SIZE_FORMAT + " max_eden_size: " SIZE_FORMAT + " total_free_limit: " SIZE_FORMAT + " max_old_gen_size: " SIZE_FORMAT + " max_eden_size: " SIZE_FORMAT + " mem_free_limit: " SIZE_FORMAT, + promo_limit, max_eden_size, total_free_limit, + max_old_gen_size, max_eden_size, + (size_t) mem_free_limit); + } + + bool print_gc_overhead_limit_would_be_exceeded = false; + if (is_full_gc) { + if (gc_cost() > gc_cost_limit && + free_in_old_gen < (size_t) mem_free_old_limit && + free_in_eden < (size_t) mem_free_eden_limit) { + // Collections, on average, are taking too much time, and + // gc_cost() > gc_cost_limit + // we have too little space available after a full gc. + // total_free_limit < mem_free_limit + // where + // total_free_limit is the free space available in + // both generations + // total_mem is the total space available for allocation + // in both generations (survivor spaces are not included + // just as they are not included in eden_limit). + // mem_free_limit is a fraction of total_mem judged to be an + // acceptable amount that is still unused. + // The heap can ask for the value of this variable when deciding + // whether to thrown an OutOfMemory error. + // Note that the gc time limit test only works for the collections + // of the young gen + tenured gen and not for collections of the + // permanent gen. That is because the calculation of the space + // freed by the collection is the free space in the young gen + + // tenured gen. + // At this point the GC overhead limit is being exceeded. + inc_gc_overhead_limit_count(); + if (UseGCOverheadLimit) { + if (gc_overhead_limit_count() >= + AdaptiveSizePolicyGCTimeLimitThreshold){ + // All conditions have been met for throwing an out-of-memory + set_gc_overhead_limit_exceeded(true); + // Avoid consecutive OOM due to the gc time limit by resetting + // the counter. + reset_gc_overhead_limit_count(); + } else { + // The required consecutive collections which exceed the + // GC time limit may or may not have been reached. We + // are approaching that condition and so as not to + // throw an out-of-memory before all SoftRef's have been + // cleared, set _should_clear_all_soft_refs in CollectorPolicy. + // The clearing will be done on the next GC. + bool near_limit = gc_overhead_limit_near(); + if (near_limit) { + collector_policy->set_should_clear_all_soft_refs(true); + if (PrintGCDetails && Verbose) { + gclog_or_tty->print_cr(" Nearing GC overhead limit, " + "will be clearing all SoftReference"); + } + } + } + } + // Set this even when the overhead limit will not + // cause an out-of-memory. Diagnostic message indicating + // that the overhead limit is being exceeded is sometimes + // printed. + print_gc_overhead_limit_would_be_exceeded = true; + + } else { + // Did not exceed overhead limits + reset_gc_overhead_limit_count(); + } + } + + if (UseGCOverheadLimit && PrintGCDetails && Verbose) { + if (gc_overhead_limit_exceeded()) { + gclog_or_tty->print_cr(" GC is exceeding overhead limit " + "of %d%%", GCTimeLimit); + reset_gc_overhead_limit_count(); + } else if (print_gc_overhead_limit_would_be_exceeded) { + assert(gc_overhead_limit_count() > 0, "Should not be printing"); + gclog_or_tty->print_cr(" GC would exceed overhead limit " + "of %d%% %d consecutive time(s)", + GCTimeLimit, gc_overhead_limit_count()); + } + } +} // Printing bool AdaptiveSizePolicy::print_adaptive_size_policy_on(outputStream* st) const { diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp --- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2004-2006 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2004-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ // Forward decls class elapsedTimer; +class CollectorPolicy; class AdaptiveSizePolicy : public CHeapObj { friend class GCAdaptivePolicyCounters; @@ -75,13 +76,16 @@ // This is a hint for the heap: we've detected that gc times // are taking longer than GCTimeLimit allows. - bool _gc_time_limit_exceeded; - // Use for diagnostics only. If UseGCTimeLimit is false, + bool _gc_overhead_limit_exceeded; + // Use for diagnostics only. If UseGCOverheadLimit is false, // this variable is still set. - bool _print_gc_time_limit_would_be_exceeded; + bool _print_gc_overhead_limit_would_be_exceeded; // Count of consecutive GC that have exceeded the // GC time limit criterion. - uint _gc_time_limit_count; + uint _gc_overhead_limit_count; + // This flag signals that GCTimeLimit is being exceeded + // but may not have done so for the required number of consequetive + // collections. // Minor collection timers used to determine both // pause and interval times for collections. @@ -406,22 +410,21 @@ // Most heaps will choose to throw an OutOfMemoryError when // this occurs but it is up to the heap to request this information // of the policy - bool gc_time_limit_exceeded() { - return _gc_time_limit_exceeded; - } - void set_gc_time_limit_exceeded(bool v) { - _gc_time_limit_exceeded = v; + bool gc_overhead_limit_exceeded() { + return _gc_overhead_limit_exceeded; } - bool print_gc_time_limit_would_be_exceeded() { - return _print_gc_time_limit_would_be_exceeded; - } - void set_print_gc_time_limit_would_be_exceeded(bool v) { - _print_gc_time_limit_would_be_exceeded = v; + void set_gc_overhead_limit_exceeded(bool v) { + _gc_overhead_limit_exceeded = v; } - uint gc_time_limit_count() { return _gc_time_limit_count; } - void reset_gc_time_limit_count() { _gc_time_limit_count = 0; } - void inc_gc_time_limit_count() { _gc_time_limit_count++; } + // Tests conditions indicate the GC overhead limit is being approached. + bool gc_overhead_limit_near() { + return gc_overhead_limit_count() >= + (AdaptiveSizePolicyGCTimeLimitThreshold - 1); + } + uint gc_overhead_limit_count() { return _gc_overhead_limit_count; } + void reset_gc_overhead_limit_count() { _gc_overhead_limit_count = 0; } + void inc_gc_overhead_limit_count() { _gc_overhead_limit_count++; } // accessors for flags recording the decisions to resize the // generations to meet the pause goal. @@ -436,6 +439,16 @@ int decide_at_full_gc() { return _decide_at_full_gc; } void set_decide_at_full_gc(int v) { _decide_at_full_gc = v; } + // Check the conditions for an out-of-memory due to excessive GC time. + // Set _gc_overhead_limit_exceeded if all the conditions have been met. + void check_gc_overhead_limit(size_t young_live, + size_t eden_live, + size_t max_old_gen_size, + size_t max_eden_size, + bool is_full_gc, + GCCause::Cause gc_cause, + CollectorPolicy* collector_policy); + // Printing support virtual bool print_adaptive_size_policy_on(outputStream* st) const; bool print_adaptive_size_policy_on(outputStream* st, int diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/shared/vmGCOperations.cpp --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Wed May 05 16:39:47 2010 -0700 @@ -115,11 +115,25 @@ void VM_GC_HeapInspection::doit() { HandleMark hm; CollectedHeap* ch = Universe::heap(); + ch->ensure_parsability(false); // must happen, even if collection does + // not happen (e.g. due to GC_locker) if (_full_gc) { - ch->collect_as_vm_thread(GCCause::_heap_inspection); - } else { - // make the heap parsable (no need to retire TLABs) - ch->ensure_parsability(false); + // The collection attempt below would be skipped anyway if + // the gc locker is held. The following dump may then be a tad + // misleading to someone expecting only live objects to show + // up in the dump (see CR 6944195). Just issue a suitable warning + // in that case and do not attempt to do a collection. + // The latter is a subtle point, because even a failed attempt + // to GC will, in fact, induce one in the future, which we + // probably want to avoid in this case because the GC that we may + // be about to attempt holds value for us only + // if it happens now and not if it happens in the eventual + // future. + if (GC_locker::is_active()) { + warning("GC locker is held; pre-dump GC was skipped"); + } else { + ch->collect_as_vm_thread(GCCause::_heap_inspection); + } } HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_implementation/shared/vmGCOperations.hpp --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2005-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,8 +89,19 @@ if (full) { _full_gc_count_before = full_gc_count_before; } + // In ParallelScavengeHeap::mem_allocate() collections can be + // executed within a loop and _all_soft_refs_clear can be set + // true after they have been cleared by a collection and another + // collection started so that _all_soft_refs_clear can be true + // when this collection is started. Don't assert that + // _all_soft_refs_clear have to be false here even though + // mutators have run. Soft refs will be cleared again in this + // collection. } - ~VM_GC_Operation() {} + ~VM_GC_Operation() { + CollectedHeap* ch = Universe::heap(); + ch->collector_policy()->set_all_soft_refs_clear(false); + } // Acquire the reference synchronization lock virtual bool doit_prologue(); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/gc_interface/collectedHeap.hpp --- a/src/share/vm/gc_interface/collectedHeap.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,7 @@ class ThreadClosure; class AdaptiveSizePolicy; class Thread; +class CollectorPolicy; // // CollectedHeap @@ -506,6 +507,9 @@ // Return the AdaptiveSizePolicy for the heap. virtual AdaptiveSizePolicy* size_policy() = 0; + // Return the CollectorPolicy for the heap + virtual CollectorPolicy* collector_policy() const = 0; + // Iterate over all the ref-containing fields of all objects, calling // "cl.do_oop" on each. This includes objects in permanent memory. virtual void oop_iterate(OopClosure* cl) = 0; diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/includeDB_zero --- a/src/share/vm/includeDB_zero Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/includeDB_zero Wed May 05 16:39:47 2010 -0700 @@ -1,6 +1,6 @@ // // Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. -// Copyright 2009 Red Hat, Inc. +// Copyright 2009, 2010 Red Hat, Inc. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,8 @@ // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps! +cppInterpreter_.cpp stack_.inline.hpp + entryFrame_.hpp javaCalls.hpp entryFrame_.hpp stack_.hpp @@ -47,9 +49,19 @@ interpreterFrame_.hpp stack_.hpp interpreterFrame_.hpp thread.hpp +interpreterRT_.cpp stack_.inline.hpp + sharkFrame_.hpp methodOop.hpp sharkFrame_.hpp stack_.hpp stack_.hpp sizes.hpp +stack_.inline.hpp stack_.hpp +stack_.inline.hpp thread.hpp + +stack_.cpp interpreterRuntime.hpp +stack_.cpp stack_.hpp + +stubGenerator_.cpp stack_.inline.hpp + thread.hpp stack_.hpp diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/interpreter/bytecodeInterpreter.cpp --- a/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed May 05 16:39:47 2010 -0700 @@ -2339,8 +2339,8 @@ goto opcode_switch; } #endif - fatal2("\t*** Unimplemented opcode: %d = %s\n", - opcode, Bytecodes::name((Bytecodes::Code)opcode)); + fatal(err_msg("Unimplemented opcode %d = %s", opcode, + Bytecodes::name((Bytecodes::Code)opcode))); goto finish; } /* switch(opc) */ diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/interpreter/bytecodes.cpp --- a/src/share/vm/interpreter/bytecodes.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/interpreter/bytecodes.cpp Wed May 05 16:39:47 2010 -0700 @@ -426,7 +426,9 @@ if (is_defined(i)) { Code code = cast(i); Code java = java_code(code); - if (can_trap(code) && !can_trap(java)) fatal2("%s can trap => %s can trap, too", name(code), name(java)); + if (can_trap(code) && !can_trap(java)) + fatal(err_msg("%s can trap => %s can trap, too", name(code), + name(java))); } } } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/interpreter/oopMapCache.cpp --- a/src/share/vm/interpreter/oopMapCache.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/interpreter/oopMapCache.cpp Wed May 05 16:39:47 2010 -0700 @@ -224,8 +224,8 @@ // If we are doing mark sweep _method may not have a valid header // $$$ This used to happen only for m/s collections; we might want to // think of an appropriate generalization of this distinction. - guarantee(Universe::heap()->is_gc_active() || - _method->is_oop_or_null(), "invalid oop in oopMapCache") + guarantee(Universe::heap()->is_gc_active() || _method->is_oop_or_null(), + "invalid oop in oopMapCache"); } #ifdef ENABLE_ZAP_DEAD_LOCALS diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/interpreter/templateInterpreter.cpp --- a/src/share/vm/interpreter/templateInterpreter.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/interpreter/templateInterpreter.cpp Wed May 05 16:39:47 2010 -0700 @@ -457,7 +457,7 @@ void TemplateInterpreterGenerator::set_wide_entry_point(Template* t, address& wep) { assert(t->is_valid(), "template must exist"); - assert(t->tos_in() == vtos, "only vtos tos_in supported for wide instructions") + assert(t->tos_in() == vtos, "only vtos tos_in supported for wide instructions"); wep = __ pc(); generate_and_dispatch(t); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/memory/blockOffsetTable.cpp --- a/src/share/vm/memory/blockOffsetTable.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/memory/blockOffsetTable.cpp Wed May 05 16:39:47 2010 -0700 @@ -689,7 +689,7 @@ assert(blk_end > _next_offset_threshold, "should be past threshold"); assert(blk_start <= _next_offset_threshold, - "blk_start should be at or before threshold") + "blk_start should be at or before threshold"); assert(pointer_delta(_next_offset_threshold, blk_start) <= N_words, "offset should be <= BlockOffsetSharedArray::N"); assert(Universe::heap()->is_in_reserved(blk_start), diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/memory/collectorPolicy.cpp --- a/src/share/vm/memory/collectorPolicy.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/memory/collectorPolicy.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -112,6 +112,11 @@ } } +bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { + bool result = _should_clear_all_soft_refs; + set_should_clear_all_soft_refs(false); + return result; +} GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, int max_covered_regions) { @@ -126,6 +131,17 @@ } } +void CollectorPolicy::cleared_all_soft_refs() { + // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may + // have been cleared in the last collection but if the gc overhear + // limit continues to be near, SoftRefs should still be cleared. + if (size_policy() != NULL) { + _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near(); + } + _all_soft_refs_clear = true; +} + + // GenCollectorPolicy methods. size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { @@ -489,6 +505,12 @@ debug_only(gch->check_for_valid_allocation_state()); assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); + + // In general gc_overhead_limit_was_exceeded should be false so + // set it so here and reset it to true only if the gc time + // limit is being exceeded as checked below. + *gc_overhead_limit_was_exceeded = false; + HeapWord* result = NULL; // Loop until the allocation is satisified, @@ -524,12 +546,6 @@ return result; } - // There are NULL's returned for different circumstances below. - // In general gc_overhead_limit_was_exceeded should be false so - // set it so here and reset it to true only if the gc time - // limit is being exceeded as checked below. - *gc_overhead_limit_was_exceeded = false; - if (GC_locker::is_active_and_needs_gc()) { if (is_tlab) { return NULL; // Caller will retry allocating individual object @@ -568,18 +584,6 @@ gc_count_before = Universe::heap()->total_collections(); } - // Allocation has failed and a collection is about - // to be done. If the gc time limit was exceeded the - // last time a collection was done, return NULL so - // that an out-of-memory will be thrown. Clear - // gc_time_limit_exceeded so that subsequent attempts - // at a collection will be made. - if (size_policy()->gc_time_limit_exceeded()) { - *gc_overhead_limit_was_exceeded = true; - size_policy()->set_gc_time_limit_exceeded(false); - return NULL; - } - VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); @@ -590,6 +594,24 @@ assert(result == NULL, "must be NULL if gc_locked() is true"); continue; // retry and/or stall as necessary } + + // Allocation has failed and a collection + // has been done. If the gc time limit was exceeded the + // this time, return NULL so that an out-of-memory + // will be thrown. Clear gc_overhead_limit_exceeded + // so that the overhead exceeded does not persist. + + const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); + const bool softrefs_clear = all_soft_refs_clear(); + assert(!limit_exceeded || softrefs_clear, "Should have been cleared"); + if (limit_exceeded && softrefs_clear) { + *gc_overhead_limit_was_exceeded = true; + size_policy()->set_gc_overhead_limit_exceeded(false); + if (op.result() != NULL) { + CollectedHeap::fill_with_object(op.result(), size); + } + return NULL; + } assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); return result; @@ -688,6 +710,9 @@ return result; } + assert(!should_clear_all_soft_refs(), + "Flag should have been handled and cleared prior to this point"); + // What else? We might try synchronous finalization later. If the total // space available is large enough for the allocation, then a more // complete compaction phase than we've tried so far might be diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/memory/collectorPolicy.hpp --- a/src/share/vm/memory/collectorPolicy.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/memory/collectorPolicy.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,12 +69,28 @@ size_t _min_alignment; size_t _max_alignment; + // The sizing of the heap are controlled by a sizing policy. + AdaptiveSizePolicy* _size_policy; + + // Set to true when policy wants soft refs cleared. + // Reset to false by gc after it clears all soft refs. + bool _should_clear_all_soft_refs; + // Set to true by the GC if the just-completed gc cleared all + // softrefs. This is set to true whenever a gc clears all softrefs, and + // set to false each time gc returns to the mutator. For example, in the + // ParallelScavengeHeap case the latter would be done toward the end of + // mem_allocate() where it returns op.result() + bool _all_soft_refs_clear; + CollectorPolicy() : _min_alignment(1), _max_alignment(1), _initial_heap_byte_size(0), _max_heap_byte_size(0), - _min_heap_byte_size(0) + _min_heap_byte_size(0), + _size_policy(NULL), + _should_clear_all_soft_refs(false), + _all_soft_refs_clear(false) {} public: @@ -98,6 +114,19 @@ G1CollectorPolicyKind }; + AdaptiveSizePolicy* size_policy() { return _size_policy; } + bool should_clear_all_soft_refs() { return _should_clear_all_soft_refs; } + void set_should_clear_all_soft_refs(bool v) { _should_clear_all_soft_refs = v; } + // Returns the current value of _should_clear_all_soft_refs. + // _should_clear_all_soft_refs is set to false as a side effect. + bool use_should_clear_all_soft_refs(bool v); + bool all_soft_refs_clear() { return _all_soft_refs_clear; } + void set_all_soft_refs_clear(bool v) { _all_soft_refs_clear = v; } + + // Called by the GC after Soft Refs have been cleared to indicate + // that the request in _should_clear_all_soft_refs has been fulfilled. + void cleared_all_soft_refs(); + // Identification methods. virtual GenCollectorPolicy* as_generation_policy() { return NULL; } virtual TwoGenerationCollectorPolicy* as_two_generation_policy() { return NULL; } @@ -165,6 +194,22 @@ }; +class ClearedAllSoftRefs : public StackObj { + bool _clear_all_soft_refs; + CollectorPolicy* _collector_policy; + public: + ClearedAllSoftRefs(bool clear_all_soft_refs, + CollectorPolicy* collector_policy) : + _clear_all_soft_refs(clear_all_soft_refs), + _collector_policy(collector_policy) {} + + ~ClearedAllSoftRefs() { + if (_clear_all_soft_refs) { + _collector_policy->cleared_all_soft_refs(); + } + } +}; + class GenCollectorPolicy : public CollectorPolicy { protected: size_t _min_gen0_size; @@ -173,10 +218,6 @@ GenerationSpec **_generations; - // The sizing of the different generations in the heap are controlled - // by a sizing policy. - AdaptiveSizePolicy* _size_policy; - // Return true if an allocation should be attempted in the older // generation if it fails in the younger generation. Return // false, otherwise. @@ -236,14 +277,11 @@ virtual size_t large_typearray_limit(); // Adaptive size policy - AdaptiveSizePolicy* size_policy() { return _size_policy; } virtual void initialize_size_policy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size); - }; - // All of hotspot's current collectors are subtypes of this // class. Currently, these collectors all use the same gen[0], // but have different gen[1] types. If we add another subtype diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/memory/defNewGeneration.cpp --- a/src/share/vm/memory/defNewGeneration.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/memory/defNewGeneration.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -594,6 +594,10 @@ _tenuring_threshold = age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); + // A successful scavenge should restart the GC time limit count which is + // for full GC's. + AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); + size_policy->reset_gc_overhead_limit_count(); if (PrintGC && !PrintGCDetails) { gch->print_heap_change(gch_prev_used); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/memory/genCollectedHeap.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -428,7 +428,8 @@ assert(my_thread->is_VM_thread() || my_thread->is_ConcurrentGC_thread(), "incorrect thread type capability"); - assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock"); + assert(Heap_lock->is_locked(), + "the requesting thread should have the Heap_lock"); guarantee(!is_gc_active(), "collection is not reentrant"); assert(max_level < n_gens(), "sanity check"); @@ -436,6 +437,11 @@ return; // GC is disabled (e.g. JNI GetXXXCritical operation) } + const bool do_clear_all_soft_refs = clear_all_soft_refs || + collector_policy()->should_clear_all_soft_refs(); + + ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); + const size_t perm_prev_used = perm_gen()->used(); if (PrintHeapAtGC) { @@ -560,11 +566,11 @@ if (rp->discovery_is_atomic()) { rp->verify_no_references_recorded(); rp->enable_discovery(); - rp->setup_policy(clear_all_soft_refs); + rp->setup_policy(do_clear_all_soft_refs); } else { // collect() below will enable discovery as appropriate } - _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab); + _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab); if (!rp->enqueuing_is_done()) { rp->enqueue_discovered_references(); } else { diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/memory/genMarkSweep.cpp --- a/src/share/vm/memory/genMarkSweep.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/memory/genMarkSweep.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,13 @@ bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); + GenCollectedHeap* gch = GenCollectedHeap::heap(); +#ifdef ASSERT + if (gch->collector_policy()->should_clear_all_soft_refs()) { + assert(clear_all_softrefs, "Policy should have been checked earlier"); + } +#endif + // hook up weak ref data so it can be used during Mark-Sweep assert(ref_processor() == NULL, "no stomping"); assert(rp != NULL, "should be non-NULL"); @@ -44,7 +51,6 @@ // Increment the invocation count for the permanent generation, since it is // implicitly collected whenever we do a full mark sweep collection. - GenCollectedHeap* gch = GenCollectedHeap::heap(); gch->perm_gen()->stat_record()->invocations++; // Capture heap size before collection for printing. diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/memory/heapInspection.cpp --- a/src/share/vm/memory/heapInspection.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/memory/heapInspection.cpp Wed May 05 16:39:47 2010 -0700 @@ -315,7 +315,7 @@ void HeapInspection::find_instances_at_safepoint(klassOop k, GrowableArray* result) { assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped"); - assert(Heap_lock->is_locked(), "should have the Heap_lock") + assert(Heap_lock->is_locked(), "should have the Heap_lock"); // Ensure that the heap is parsable Universe::heap()->ensure_parsability(false); // no need to retire TALBs diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/memory/threadLocalAllocBuffer.hpp --- a/src/share/vm/memory/threadLocalAllocBuffer.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp Wed May 05 16:39:47 2010 -0700 @@ -111,7 +111,22 @@ // Allocate size HeapWords. The memory is NOT initialized to zero. inline HeapWord* allocate(size_t size); - static size_t alignment_reserve() { return align_object_size(typeArrayOopDesc::header_size(T_INT)); } + + // Reserve space at the end of TLAB + static size_t end_reserve() { + int reserve_size = typeArrayOopDesc::header_size(T_INT); + if (AllocatePrefetchStyle == 3) { + // BIS is used to prefetch - we need a space for it. + // +1 for rounding up to next cache line +1 to be safe + int lines = AllocatePrefetchLines + 2; + int step_size = AllocatePrefetchStepSize; + int distance = AllocatePrefetchDistance; + int prefetch_end = (distance + step_size*lines)/(int)HeapWordSize; + reserve_size = MAX2(reserve_size, prefetch_end); + } + return reserve_size; + } + static size_t alignment_reserve() { return align_object_size(end_reserve()); } static size_t alignment_reserve_in_bytes() { return alignment_reserve() * HeapWordSize; } // Return tlab size or remaining space in eden such that the diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/oops/generateOopMap.cpp --- a/src/share/vm/oops/generateOopMap.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/oops/generateOopMap.cpp Wed May 05 16:39:47 2010 -0700 @@ -807,7 +807,7 @@ } CellTypeState GenerateOopMap::get_var(int localNo) { - assert(localNo < _max_locals + _nof_refval_conflicts, "variable read error") + assert(localNo < _max_locals + _nof_refval_conflicts, "variable read error"); if (localNo < 0 || localNo > _max_locals) { verify_error("variable read error: r%d", localNo); return valCTS; // just to pick something; diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/oops/instanceKlass.cpp Wed May 05 16:39:47 2010 -0700 @@ -966,7 +966,7 @@ // not found #ifdef ASSERT int index = linear_search(methods, name, signature); - if (index != -1) fatal1("binary search bug: should have found entry %d", index); + assert(index == -1, err_msg("binary search should have found entry %d", index)); #endif return NULL; } else if (res < 0) { @@ -977,7 +977,7 @@ } #ifdef ASSERT int index = linear_search(methods, name, signature); - if (index != -1) fatal1("binary search bug: should have found entry %d", index); + assert(index == -1, err_msg("binary search should have found entry %d", index)); #endif return NULL; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/oops/instanceKlassKlass.cpp --- a/src/share/vm/oops/instanceKlassKlass.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/oops/instanceKlassKlass.cpp Wed May 05 16:39:47 2010 -0700 @@ -712,10 +712,10 @@ int sib_count = 0; while (sib != NULL) { if (sib == ik) { - fatal1("subclass cycle of length %d", sib_count); + fatal(err_msg("subclass cycle of length %d", sib_count)); } if (sib_count >= 100000) { - fatal1("suspiciously long subclass list %d", sib_count); + fatal(err_msg("suspiciously long subclass list %d", sib_count)); } guarantee(sib->as_klassOop()->is_klass(), "should be klass"); guarantee(sib->as_klassOop()->is_perm(), "should be in permspace"); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/oops/klassVtable.cpp --- a/src/share/vm/oops/klassVtable.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/oops/klassVtable.cpp Wed May 05 16:39:47 2010 -0700 @@ -899,7 +899,7 @@ int nof_methods = methods()->length(); HandleMark hm; KlassHandle klass = _klass; - assert(nof_methods > 0, "at least one method must exist for interface to be in vtable") + assert(nof_methods > 0, "at least one method must exist for interface to be in vtable"); Handle interface_loader (THREAD, instanceKlass::cast(interf_h())->class_loader()); int ime_num = 0; @@ -1180,8 +1180,8 @@ oop* end_of_obj = (oop*)_klass() + _klass()->size(); oop* end_of_vtable = (oop *)&table()[_length]; if (end_of_vtable > end_of_obj) { - fatal1("klass %s: klass object too short (vtable extends beyond end)", - _klass->internal_name()); + fatal(err_msg("klass %s: klass object too short (vtable extends beyond " + "end)", _klass->internal_name())); } for (int i = 0; i < _length; i++) table()[i].verify(this, st); @@ -1224,7 +1224,7 @@ #ifndef PRODUCT print(); #endif - fatal1("vtableEntry %#lx: method is from subclass", this); + fatal(err_msg("vtableEntry " PTR_FORMAT ": method is from subclass", this)); } } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/c2_globals.hpp --- a/src/share/vm/opto/c2_globals.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/c2_globals.hpp Wed May 05 16:39:47 2010 -0700 @@ -52,9 +52,6 @@ "Code alignment for interior entry points " \ "in generated code (in bytes)") \ \ - product_pd(intx, OptoLoopAlignment, \ - "Align inner loops to zero relative to this modulus") \ - \ product(intx, MaxLoopPad, (OptoLoopAlignment-1), \ "Align a loop if padding size in bytes is less or equal to this value") \ \ diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/c2compiler.cpp --- a/src/share/vm/opto/c2compiler.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/c2compiler.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -105,8 +105,7 @@ } bool subsume_loads = true; bool do_escape_analysis = DoEscapeAnalysis && - !(env->jvmti_can_hotswap_or_post_breakpoint() || - env->jvmti_can_examine_or_deopt_anywhere()); + !env->jvmti_can_access_local_variables(); while (!env->failing()) { // Attempt to compile while subsuming loads into machine instructions. Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/classes.hpp --- a/src/share/vm/opto/classes.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/classes.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,8 @@ macro(BoxLock) macro(ReverseBytesI) macro(ReverseBytesL) +macro(ReverseBytesUS) +macro(ReverseBytesS) macro(CProj) macro(CallDynamicJava) macro(CallJava) diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/compile.cpp --- a/src/share/vm/opto/compile.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/compile.cpp Wed May 05 16:39:47 2010 -0700 @@ -871,7 +871,6 @@ set_has_split_ifs(false); set_has_loops(has_method() && method()->has_loops()); // first approximation set_has_stringbuilder(false); - _deopt_happens = true; // start out assuming the worst _trap_can_recompile = false; // no traps emitted yet _major_progress = true; // start out assuming good things will happen set_has_unsafe_access(false); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/compile.hpp --- a/src/share/vm/opto/compile.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/compile.hpp Wed May 05 16:39:47 2010 -0700 @@ -146,7 +146,6 @@ int _orig_pc_slot_offset_in_bytes; int _major_progress; // Count of something big happening - bool _deopt_happens; // TRUE if de-optimization CAN happen bool _has_loops; // True if the method _may_ have some loops bool _has_split_ifs; // True if the method _may_ have some split-if bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. @@ -300,7 +299,6 @@ void set_freq_inline_size(int n) { _freq_inline_size = n; } int freq_inline_size() const { return _freq_inline_size; } void set_max_inline_size(int n) { _max_inline_size = n; } - bool deopt_happens() const { return _deopt_happens; } bool has_loops() const { return _has_loops; } void set_has_loops(bool z) { _has_loops = z; } bool has_split_ifs() const { return _has_split_ifs; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/graphKit.cpp --- a/src/share/vm/opto/graphKit.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/graphKit.cpp Wed May 05 16:39:47 2010 -0700 @@ -812,10 +812,6 @@ JVMState* youngest_jvms = sync_jvms(); - // Do we need debug info here? If it is a SafePoint and this method - // cannot de-opt, then we do NOT need any debug info. - bool full_info = (C->deopt_happens() || call->Opcode() != Op_SafePoint); - // If we are guaranteed to throw, we can prune everything but the // input to the current bytecode. bool can_prune_locals = false; @@ -829,10 +825,9 @@ } } - if (env()->jvmti_can_examine_or_deopt_anywhere()) { + if (env()->jvmti_can_access_local_variables()) { // At any safepoint, this method can get breakpointed, which would // then require an immediate deoptimization. - full_info = true; can_prune_locals = false; // do not prune locals stack_slots_not_pruned = 0; } @@ -890,7 +885,7 @@ k = in_jvms->locoff(); l = in_jvms->loc_size(); out_jvms->set_locoff(p); - if (full_info && !can_prune_locals) { + if (!can_prune_locals) { for (j = 0; j < l; j++) call->set_req(p++, in_map->in(k+j)); } else { @@ -901,7 +896,7 @@ k = in_jvms->stkoff(); l = in_jvms->sp(); out_jvms->set_stkoff(p); - if (full_info && !can_prune_locals) { + if (!can_prune_locals) { for (j = 0; j < l; j++) call->set_req(p++, in_map->in(k+j)); } else if (can_prune_locals && stack_slots_not_pruned != 0) { diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/idealGraphPrinter.cpp --- a/src/share/vm/opto/idealGraphPrinter.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/idealGraphPrinter.cpp Wed May 05 16:39:47 2010 -0700 @@ -151,7 +151,8 @@ } else { // It would be nice if we could shut down cleanly but it should // be an error if we can't connect to the visualizer. - fatal2("Couldn't connect to visualizer at %s:%d", PrintIdealGraphAddress, PrintIdealGraphPort); + fatal(err_msg("Couldn't connect to visualizer at %s:%d", + PrintIdealGraphAddress, PrintIdealGraphPort)); } } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/library_call.cpp --- a/src/share/vm/opto/library_call.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/library_call.cpp Wed May 05 16:39:47 2010 -0700 @@ -636,6 +636,8 @@ case vmIntrinsics::_reverseBytes_i: case vmIntrinsics::_reverseBytes_l: + case vmIntrinsics::_reverseBytes_s: + case vmIntrinsics::_reverseBytes_c: return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id()); case vmIntrinsics::_get_AtomicLong: @@ -2010,13 +2012,19 @@ return true; } -//----------------------------inline_reverseBytes_int/long------------------- +//----------------------------inline_reverseBytes_int/long/char/short------------------- // inline Integer.reverseBytes(int) // inline Long.reverseBytes(long) +// inline Character.reverseBytes(char) +// inline Short.reverseBytes(short) bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) { - assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l, "not reverse Bytes"); - if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false; - if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL)) return false; + assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l || + id == vmIntrinsics::_reverseBytes_c || id == vmIntrinsics::_reverseBytes_s, + "not reverse Bytes"); + if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false; + if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL)) return false; + if (id == vmIntrinsics::_reverseBytes_c && !Matcher::has_match_rule(Op_ReverseBytesUS)) return false; + if (id == vmIntrinsics::_reverseBytes_s && !Matcher::has_match_rule(Op_ReverseBytesS)) return false; _sp += arg_size(); // restore stack pointer switch (id) { case vmIntrinsics::_reverseBytes_i: @@ -2025,6 +2033,12 @@ case vmIntrinsics::_reverseBytes_l: push_pair(_gvn.transform(new (C, 2) ReverseBytesLNode(0, pop_pair()))); break; + case vmIntrinsics::_reverseBytes_c: + push(_gvn.transform(new (C, 2) ReverseBytesUSNode(0, pop()))); + break; + case vmIntrinsics::_reverseBytes_s: + push(_gvn.transform(new (C, 2) ReverseBytesSNode(0, pop()))); + break; default: ; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/macro.cpp --- a/src/share/vm/opto/macro.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/macro.cpp Wed May 05 16:39:47 2010 -0700 @@ -1487,11 +1487,11 @@ Node*& contended_phi_rawmem, Node* old_eden_top, Node* new_eden_top, Node* length) { + enum { fall_in_path = 1, pf_path = 2 }; if( UseTLAB && AllocatePrefetchStyle == 2 ) { // Generate prefetch allocation with watermark check. // As an allocation hits the watermark, we will prefetch starting // at a "distance" away from watermark. - enum { fall_in_path = 1, pf_path = 2 }; Node *pf_region = new (C, 3) RegionNode(3); Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY, @@ -1570,6 +1570,45 @@ needgc_false = pf_region; contended_phi_rawmem = pf_phi_rawmem; i_o = pf_phi_abio; + } else if( UseTLAB && AllocatePrefetchStyle == 3 ) { + // Insert a prefetch for each allocation only on the fast-path + Node *pf_region = new (C, 3) RegionNode(3); + Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY, + TypeRawPtr::BOTTOM ); + + // Generate several prefetch instructions only for arrays. + uint lines = (length != NULL) ? AllocatePrefetchLines : 1; + uint step_size = AllocatePrefetchStepSize; + uint distance = AllocatePrefetchDistance; + + // Next cache address. + Node *cache_adr = new (C, 4) AddPNode(old_eden_top, old_eden_top, + _igvn.MakeConX(distance)); + transform_later(cache_adr); + cache_adr = new (C, 2) CastP2XNode(needgc_false, cache_adr); + transform_later(cache_adr); + Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1)); + cache_adr = new (C, 3) AndXNode(cache_adr, mask); + transform_later(cache_adr); + cache_adr = new (C, 2) CastX2PNode(cache_adr); + transform_later(cache_adr); + + // Prefetch + Node *prefetch = new (C, 3) PrefetchWriteNode( contended_phi_rawmem, cache_adr ); + prefetch->set_req(0, needgc_false); + transform_later(prefetch); + contended_phi_rawmem = prefetch; + Node *prefetch_adr; + distance = step_size; + for ( uint i = 1; i < lines; i++ ) { + prefetch_adr = new (C, 4) AddPNode( cache_adr, cache_adr, + _igvn.MakeConX(distance) ); + transform_later(prefetch_adr); + prefetch = new (C, 3) PrefetchWriteNode( contended_phi_rawmem, prefetch_adr ); + transform_later(prefetch); + distance += step_size; + contended_phi_rawmem = prefetch; + } } else if( AllocatePrefetchStyle > 0 ) { // Insert a prefetch for each allocation only on the fast-path Node *prefetch_adr; diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/memnode.hpp --- a/src/share/vm/opto/memnode.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/memnode.hpp Wed May 05 16:39:47 2010 -0700 @@ -1244,5 +1244,5 @@ virtual int Opcode() const; virtual uint ideal_reg() const { return NotAMachineReg; } virtual uint match_edge(uint idx) const { return idx==2; } - virtual const Type *bottom_type() const { return Type::ABIO; } + virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } }; diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/node.cpp --- a/src/share/vm/opto/node.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/node.cpp Wed May 05 16:39:47 2010 -0700 @@ -1188,7 +1188,7 @@ Node* use = dead->last_out(k); igvn->hash_delete(use); // Yank from hash table prior to mod if (use->in(0) == dead) { // Found another dead node - assert (!use->is_Con(), "Control for Con node should be Root node.") + assert (!use->is_Con(), "Control for Con node should be Root node."); use->set_req(0, top); // Cut dead edge to prevent processing nstack.push(use); // the dead node again. } else { // Else found a not-dead user diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/output.cpp --- a/src/share/vm/opto/output.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/output.cpp Wed May 05 16:39:47 2010 -0700 @@ -1231,7 +1231,7 @@ if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset ) { padding = nop_size; } - assert( labels_not_set || padding == 0, "instruction should already be aligned") + assert( labels_not_set || padding == 0, "instruction should already be aligned"); if(padding > 0) { assert((padding % nop_size) == 0, "padding is not a multiple of NOP size"); @@ -2407,7 +2407,7 @@ n->dump(); tty->print_cr("..."); prior_use->dump(); - assert_msg(edge_from_to(prior_use,n),msg); + assert(edge_from_to(prior_use,n),msg); } _reg_node.map(def,NULL); // Kill live USEs } @@ -2446,11 +2446,11 @@ OptoReg::Name reg_lo = _regalloc->get_reg_first(def); OptoReg::Name reg_hi = _regalloc->get_reg_second(def); if( OptoReg::is_valid(reg_lo) ) { - assert_msg(!_reg_node[reg_lo] || edge_from_to(_reg_node[reg_lo],def), msg ); + assert(!_reg_node[reg_lo] || edge_from_to(_reg_node[reg_lo],def), msg); _reg_node.map(reg_lo,n); } if( OptoReg::is_valid(reg_hi) ) { - assert_msg(!_reg_node[reg_hi] || edge_from_to(_reg_node[reg_hi],def), msg ); + assert(!_reg_node[reg_hi] || edge_from_to(_reg_node[reg_hi],def), msg); _reg_node.map(reg_hi,n); } } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/parse1.cpp --- a/src/share/vm/opto/parse1.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/parse1.cpp Wed May 05 16:39:47 2010 -0700 @@ -280,7 +280,13 @@ continue; } // Construct code to access the appropriate local. - Node *value = fetch_interpreter_state(index, type->basic_type(), locals_addr, osr_buf); + BasicType bt = type->basic_type(); + if (type == TypePtr::NULL_PTR) { + // Ptr types are mixed together with T_ADDRESS but NULL is + // really for T_OBJECT types so correct it. + bt = T_OBJECT; + } + Node *value = fetch_interpreter_state(index, bt, locals_addr, osr_buf); set_local(index, value); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/phaseX.hpp --- a/src/share/vm/opto/phaseX.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/phaseX.hpp Wed May 05 16:39:47 2010 -0700 @@ -310,7 +310,7 @@ void dump_nodes_and_types_recur( const Node *n, uint depth, bool only_ctrl, VectorSet &visited); uint _count_progress; // For profiling, count transforms that make progress - void set_progress() { ++_count_progress; assert( allow_progress(),"No progress allowed during verification") } + void set_progress() { ++_count_progress; assert( allow_progress(),"No progress allowed during verification"); } void clear_progress() { _count_progress = 0; } uint made_progress() const { return _count_progress; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/runtime.cpp --- a/src/share/vm/opto/runtime.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/runtime.cpp Wed May 05 16:39:47 2010 -0700 @@ -865,7 +865,7 @@ thread->set_exception_stack_size(0); // Check if the exception PC is a MethodHandle call site. - thread->set_is_method_handle_exception(nm->is_method_handle_return(pc)); + thread->set_is_method_handle_return(nm->is_method_handle_return(pc)); } // Restore correct return pc. Was saved above. diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/opto/subnode.hpp --- a/src/share/vm/opto/subnode.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/opto/subnode.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -509,3 +509,23 @@ const Type *bottom_type() const { return TypeLong::LONG; } virtual uint ideal_reg() const { return Op_RegL; } }; + +//-------------------------------ReverseBytesUSNode-------------------------------- +// reverse bytes of an unsigned short / char +class ReverseBytesUSNode : public Node { +public: + ReverseBytesUSNode(Node *c, Node *in1) : Node(c, in1) {} + virtual int Opcode() const; + const Type *bottom_type() const { return TypeInt::CHAR; } + virtual uint ideal_reg() const { return Op_RegI; } +}; + +//-------------------------------ReverseBytesSNode-------------------------------- +// reverse bytes of a short +class ReverseBytesSNode : public Node { +public: + ReverseBytesSNode(Node *c, Node *in1) : Node(c, in1) {} + virtual int Opcode() const; + const Type *bottom_type() const { return TypeInt::SHORT; } + virtual uint ideal_reg() const { return Op_RegI; } +}; diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/prims/forte.cpp --- a/src/share/vm/prims/forte.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/prims/forte.cpp Wed May 05 16:39:47 2010 -0700 @@ -647,7 +647,7 @@ void Forte::register_stub(const char* name, address start, address end) { #if !defined(_WINDOWS) && !defined(IA64) assert(pointer_delta(end, start, sizeof(jbyte)) < INT_MAX, - "Code size exceeds maximum range") + "Code size exceeds maximum range"); collector_func_load((char*)name, NULL, NULL, start, pointer_delta(end, start, sizeof(jbyte)), 0, NULL); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/prims/jni.cpp --- a/src/share/vm/prims/jni.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/prims/jni.cpp Wed May 05 16:39:47 2010 -0700 @@ -3311,6 +3311,7 @@ OrderAccess::release_store(&vm_created, 0); } + NOT_PRODUCT(test_error_handler(ErrorHandlerTest)); return result; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/prims/jvmtiExport.cpp --- a/src/share/vm/prims/jvmtiExport.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/prims/jvmtiExport.cpp Wed May 05 16:39:47 2010 -0700 @@ -270,7 +270,6 @@ int JvmtiExport::_field_modification_count = 0; bool JvmtiExport::_can_access_local_variables = false; -bool JvmtiExport::_can_examine_or_deopt_anywhere = false; bool JvmtiExport::_can_hotswap_or_post_breakpoint = false; bool JvmtiExport::_can_modify_any_class = false; bool JvmtiExport::_can_walk_any_space = false; diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/prims/jvmtiExport.hpp --- a/src/share/vm/prims/jvmtiExport.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/prims/jvmtiExport.hpp Wed May 05 16:39:47 2010 -0700 @@ -58,7 +58,6 @@ static int _field_modification_count; static bool _can_access_local_variables; - static bool _can_examine_or_deopt_anywhere; static bool _can_hotswap_or_post_breakpoint; static bool _can_modify_any_class; static bool _can_walk_any_space; @@ -112,7 +111,6 @@ // these should only be called by the friend class friend class JvmtiManageCapabilities; - inline static void set_can_examine_or_deopt_anywhere(bool on) { _can_examine_or_deopt_anywhere = (on != 0); } inline static void set_can_modify_any_class(bool on) { _can_modify_any_class = (on != 0); } inline static void set_can_access_local_variables(bool on) { _can_access_local_variables = (on != 0); } inline static void set_can_hotswap_or_post_breakpoint(bool on) { _can_hotswap_or_post_breakpoint = (on != 0); } @@ -220,7 +218,6 @@ static void enter_live_phase(); // ------ can_* conditions (below) are set at OnLoad and never changed ------------ - inline static bool can_examine_or_deopt_anywhere() { return _can_examine_or_deopt_anywhere; } inline static bool can_modify_any_class() { return _can_modify_any_class; } inline static bool can_access_local_variables() { return _can_access_local_variables; } inline static bool can_hotswap_or_post_breakpoint() { return _can_hotswap_or_post_breakpoint; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/prims/jvmtiManageCapabilities.cpp --- a/src/share/vm/prims/jvmtiManageCapabilities.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/prims/jvmtiManageCapabilities.cpp Wed May 05 16:39:47 2010 -0700 @@ -332,16 +332,6 @@ } JvmtiExport::set_can_get_source_debug_extension(avail.can_get_source_debug_extension); - JvmtiExport::set_can_examine_or_deopt_anywhere( - avail.can_generate_breakpoint_events || - interp_events || - avail.can_redefine_classes || - avail.can_retransform_classes || - avail.can_access_local_variables || - avail.can_get_owned_monitor_info || - avail.can_get_current_contended_monitor || - avail.can_get_monitor_info || - avail.can_get_owned_monitor_stack_depth_info); JvmtiExport::set_can_maintain_original_method_order(avail.can_maintain_original_method_order); JvmtiExport::set_can_post_interpreter_events(interp_events); JvmtiExport::set_can_hotswap_or_post_breakpoint( @@ -353,10 +343,13 @@ avail.can_generate_all_class_hook_events); JvmtiExport::set_can_walk_any_space( avail.can_tag_objects); // disable sharing in onload phase + // This controls whether the compilers keep extra locals live to + // improve the debugging experience so only set them if the selected + // capabilities look like a debugger. JvmtiExport::set_can_access_local_variables( - avail.can_access_local_variables || - avail.can_redefine_classes || - avail.can_retransform_classes); + avail.can_access_local_variables || + avail.can_generate_breakpoint_events || + avail.can_generate_frame_pop_events); JvmtiExport::set_can_post_on_exceptions( avail.can_generate_exception_events || avail.can_generate_frame_pop_events || diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/arguments.cpp --- a/src/share/vm/runtime/arguments.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/arguments.cpp Wed May 05 16:39:47 2010 -0700 @@ -1353,6 +1353,16 @@ MarkStackSize / K, MarkStackSizeMax / K); tty->print_cr("ConcGCThreads: %u", ConcGCThreads); } + + if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) { + // In G1, we want the default GC overhead goal to be higher than + // say in PS. So we set it here to 10%. Otherwise the heap might + // be expanded more aggressively than we would like it to. In + // fact, even 10% seems to not be high enough in some cases + // (especially small GC stress tests that the main thing they do + // is allocation). We might consider increase it further. + FLAG_SET_DEFAULT(GCTimeRatio, 9); + } } void Arguments::set_heap_size() { diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/frame.cpp --- a/src/share/vm/runtime/frame.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/frame.cpp Wed May 05 16:39:47 2010 -0700 @@ -844,7 +844,7 @@ } void oop_at_offset_do(int offset) { - assert (offset >= 0, "illegal offset") + assert (offset >= 0, "illegal offset"); oop* addr = (oop*) _fr->entry_frame_argument_at(offset); _f->do_oop(addr); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/globals.hpp --- a/src/share/vm/runtime/globals.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/globals.hpp Wed May 05 16:39:47 2010 -0700 @@ -652,6 +652,11 @@ product(bool, PrintGCApplicationStoppedTime, false, \ "Print the time the application has been stopped") \ \ + notproduct(uintx, ErrorHandlerTest, 0, \ + "If > 0, provokes an error after VM initialization; the value" \ + "determines which error to provoke. See test_error_handler()" \ + "in debug.cpp.") \ + \ develop(bool, Verbose, false, \ "Prints additional debugging information from other modes") \ \ @@ -1052,7 +1057,8 @@ "Use SSE2 MOVDQU instruction for Arraycopy") \ \ product(intx, FieldsAllocationStyle, 1, \ - "0 - type based with oops first, 1 - with oops last") \ + "0 - type based with oops first, 1 - with oops last, " \ + "2 - oops in super and sub classes are together") \ \ product(bool, CompactFields, true, \ "Allocate nonstatic fields in gaps between previous fields") \ @@ -2707,7 +2713,8 @@ product(intx, AllocatePrefetchStyle, 1, \ "0 = no prefetch, " \ "1 = prefetch instructions for each allocation, " \ - "2 = use TLAB watermark to gate allocation prefetch") \ + "2 = use TLAB watermark to gate allocation prefetch, " \ + "3 = use BIS instruction on Sparc for allocation prefetch") \ \ product(intx, AllocatePrefetchDistance, -1, \ "Distance to prefetch ahead of allocation pointer") \ @@ -3110,6 +3117,9 @@ develop_pd(intx, CodeEntryAlignment, \ "Code entry alignment for generated code (in bytes)") \ \ + product_pd(intx, OptoLoopAlignment, \ + "Align inner loops to zero relative to this modulus") \ + \ product_pd(uintx, InitialCodeCacheSize, \ "Initial code cache size (in bytes)") \ \ diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/memprofiler.cpp --- a/src/share/vm/runtime/memprofiler.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/memprofiler.cpp Wed May 05 16:39:47 2010 -0700 @@ -62,7 +62,7 @@ // Create log file _log_fp = fopen(log_name , "w+"); if (_log_fp == NULL) { - fatal1("MemProfiler: Cannot create log file: %s", log_name); + fatal(err_msg("MemProfiler: Cannot create log file: %s", log_name)); } fprintf(_log_fp, "MemProfiler: sizes are in Kb, time is in seconds since startup\n\n"); fprintf(_log_fp, " time, #thr, #cls, heap, heap, perm, perm, code, hndls, rescs, oopmp\n"); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/mutex.cpp --- a/src/share/vm/runtime/mutex.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/mutex.cpp Wed May 05 16:39:47 2010 -0700 @@ -1288,8 +1288,9 @@ !(this == Safepoint_lock && contains(locks, Terminator_lock) && SafepointSynchronize::is_synchronizing())) { new_owner->print_owned_locks(); - fatal4("acquiring lock %s/%d out of order with lock %s/%d -- possible deadlock", - this->name(), this->rank(), locks->name(), locks->rank()); + fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- " + "possible deadlock", this->name(), this->rank(), + locks->name(), locks->rank())); } this->_next = new_owner->_owned_locks; @@ -1342,7 +1343,8 @@ || rank() == Mutex::special, "wrong thread state for using locks"); if (StrictSafepointChecks) { if (thread->is_VM_thread() && !allow_vm_block()) { - fatal1("VM thread using lock %s (not allowed to block on)", name()); + fatal(err_msg("VM thread using lock %s (not allowed to block on)", + name())); } debug_only(if (rank() != Mutex::special) \ thread->check_for_valid_safepoint_state(false);) diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/mutexLocker.cpp --- a/src/share/vm/runtime/mutexLocker.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/mutexLocker.cpp Wed May 05 16:39:47 2010 -0700 @@ -70,6 +70,7 @@ Monitor* CMark_lock = NULL; Monitor* ZF_mon = NULL; Monitor* Cleanup_mon = NULL; +Mutex* CMRegionStack_lock = NULL; Mutex* SATB_Q_FL_lock = NULL; Monitor* SATB_Q_CBL_mon = NULL; Mutex* Shared_SATB_Q_lock = NULL; @@ -135,7 +136,7 @@ // see if invoker of VM operation owns it VM_Operation* op = VMThread::vm_operation(); if (op != NULL && op->calling_thread() == lock->owner()) return; - fatal1("must own lock %s", lock->name()); + fatal(err_msg("must own lock %s", lock->name())); } // a stronger assertion than the above @@ -143,7 +144,7 @@ if (IgnoreLockingAssertions) return; assert(lock != NULL, "Need non-NULL lock"); if (lock->owned_by_self()) return; - fatal1("must own lock %s", lock->name()); + fatal(err_msg("must own lock %s", lock->name())); } #endif @@ -167,6 +168,7 @@ def(CMark_lock , Monitor, nonleaf, true ); // coordinate concurrent mark thread def(ZF_mon , Monitor, leaf, true ); def(Cleanup_mon , Monitor, nonleaf, true ); + def(CMRegionStack_lock , Mutex, leaf, true ); def(SATB_Q_FL_lock , Mutex , special, true ); def(SATB_Q_CBL_mon , Monitor, nonleaf, true ); def(Shared_SATB_Q_lock , Mutex, nonleaf, true ); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/mutexLocker.hpp --- a/src/share/vm/runtime/mutexLocker.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/mutexLocker.hpp Wed May 05 16:39:47 2010 -0700 @@ -63,6 +63,7 @@ extern Monitor* CMark_lock; // used for concurrent mark thread coordination extern Monitor* ZF_mon; // used for G1 conc zero-fill. extern Monitor* Cleanup_mon; // used for G1 conc cleanup. +extern Mutex* CMRegionStack_lock; // used for protecting accesses to the CM region stack extern Mutex* SATB_Q_FL_lock; // Protects SATB Q // buffer free list. extern Monitor* SATB_Q_CBL_mon; // Protects SATB Q diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/os.cpp --- a/src/share/vm/runtime/os.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/os.cpp Wed May 05 16:39:47 2010 -0700 @@ -406,8 +406,10 @@ #ifdef ASSERT inline size_t get_size(void* obj) { size_t size = *size_addr_from_obj(obj); - if (size < 0 ) - fatal2("free: size field of object #%p was overwritten (%lu)", obj, size); + if (size < 0) { + fatal(err_msg("free: size field of object #" PTR_FORMAT " was overwritten (" + SIZE_FORMAT ")", obj, size)); + } return size; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/safepoint.cpp --- a/src/share/vm/runtime/safepoint.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/safepoint.cpp Wed May 05 16:39:47 2010 -0700 @@ -594,7 +594,7 @@ break; default: - fatal1("Illegal threadstate encountered: %d", state); + fatal(err_msg("Illegal threadstate encountered: %d", state)); } // Check for pending. async. exceptions or suspends - except if the diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/sharedRuntime.cpp --- a/src/share/vm/runtime/sharedRuntime.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/sharedRuntime.cpp Wed May 05 16:39:47 2010 -0700 @@ -259,13 +259,16 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) { assert(frame::verify_return_pc(return_address), "must be a return pc"); + // Reset MethodHandle flag. + thread->set_is_method_handle_return(false); + // the fastest case first CodeBlob* blob = CodeCache::find_blob(return_address); if (blob != NULL && blob->is_nmethod()) { nmethod* code = (nmethod*)blob; assert(code != NULL, "nmethod must be present"); // Check if the return address is a MethodHandle call site. - thread->set_is_method_handle_exception(code->is_method_handle_return(return_address)); + thread->set_is_method_handle_return(code->is_method_handle_return(return_address)); // native nmethods don't have exception handlers assert(!code->is_native_method(), "no exception handler"); assert(code->header_begin() != code->exception_begin(), "no exception handler"); @@ -292,7 +295,7 @@ nmethod* code = (nmethod*)blob; assert(code != NULL, "nmethod must be present"); // Check if the return address is a MethodHandle call site. - thread->set_is_method_handle_exception(code->is_method_handle_return(return_address)); + thread->set_is_method_handle_return(code->is_method_handle_return(return_address)); assert(code->header_begin() != code->exception_begin(), "no exception handler"); return code->exception_begin(); } @@ -470,6 +473,13 @@ t = table.entry_for(catch_pco, -1, 0); } +#ifdef COMPILER1 + if (t == NULL && nm->is_compiled_by_c1()) { + assert(nm->unwind_handler_begin() != NULL, ""); + return nm->unwind_handler_begin(); + } +#endif + if (t == NULL) { tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci); tty->print_cr(" Exception:"); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/signature.cpp --- a/src/share/vm/runtime/signature.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/signature.cpp Wed May 05 16:39:47 2010 -0700 @@ -57,7 +57,7 @@ } void SignatureIterator::expect(char c) { - if (_signature->byte_at(_index) != c) fatal1("expecting %c", c); + if (_signature->byte_at(_index) != c) fatal(err_msg("expecting %c", c)); _index++; } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/stubRoutines.cpp --- a/src/share/vm/runtime/stubRoutines.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/stubRoutines.cpp Wed May 05 16:39:47 2010 -0700 @@ -118,7 +118,10 @@ ResourceMark rm; TraceTime timer("StubRoutines generation 1", TraceStartupTime); _code1 = BufferBlob::create("StubRoutines (1)", code_size1); - if( _code1 == NULL) vm_exit_out_of_memory1(code_size1, "CodeCache: no room for %s", "StubRoutines (1)"); + if (_code1 == NULL) { + vm_exit_out_of_memory(code_size1, + "CodeCache: no room for StubRoutines (1)"); + } CodeBuffer buffer(_code1->instructions_begin(), _code1->instructions_size()); StubGenerator_generate(&buffer, false); } @@ -164,7 +167,10 @@ ResourceMark rm; TraceTime timer("StubRoutines generation 2", TraceStartupTime); _code2 = BufferBlob::create("StubRoutines (2)", code_size2); - if( _code2 == NULL) vm_exit_out_of_memory1(code_size2, "CodeCache: no room for %s", "StubRoutines (2)"); + if (_code2 == NULL) { + vm_exit_out_of_memory(code_size2, + "CodeCache: no room for StubRoutines (2)"); + } CodeBuffer buffer(_code2->instructions_begin(), _code2->instructions_size()); StubGenerator_generate(&buffer, true); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/thread.hpp --- a/src/share/vm/runtime/thread.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/thread.hpp Wed May 05 16:39:47 2010 -0700 @@ -772,7 +772,7 @@ volatile address _exception_pc; // PC where exception happened volatile address _exception_handler_pc; // PC for handler of exception volatile int _exception_stack_size; // Size of frame where exception happened - volatile int _is_method_handle_exception; // True if the current exception PC is at a MethodHandle call. + volatile int _is_method_handle_return; // true (== 1) if the current exception PC is a MethodHandle call site. // support for compilation bool _is_compiling; // is true if a compilation is active inthis thread (one compilation per thread possible) @@ -1108,13 +1108,13 @@ int exception_stack_size() const { return _exception_stack_size; } address exception_pc() const { return _exception_pc; } address exception_handler_pc() const { return _exception_handler_pc; } - int is_method_handle_exception() const { return _is_method_handle_exception; } + bool is_method_handle_return() const { return _is_method_handle_return == 1; } void set_exception_oop(oop o) { _exception_oop = o; } void set_exception_pc(address a) { _exception_pc = a; } void set_exception_handler_pc(address a) { _exception_handler_pc = a; } void set_exception_stack_size(int size) { _exception_stack_size = size; } - void set_is_method_handle_exception(int value) { _is_method_handle_exception = value; } + void set_is_method_handle_return(bool value) { _is_method_handle_return = value ? 1 : 0; } // Stack overflow support inline size_t stack_available(address cur_sp); @@ -1188,7 +1188,7 @@ static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc ); } static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); } static ByteSize exception_stack_size_offset() { return byte_offset_of(JavaThread, _exception_stack_size); } - static ByteSize is_method_handle_exception_offset() { return byte_offset_of(JavaThread, _is_method_handle_exception); } + static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); } static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state ); } static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags ); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/runtime/vmThread.cpp --- a/src/share/vm/runtime/vmThread.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/runtime/vmThread.cpp Wed May 05 16:39:47 2010 -0700 @@ -106,7 +106,7 @@ // restore queue to empty state _queue[prio]->set_next(_queue[prio]); _queue[prio]->set_prev(_queue[prio]); - assert(queue_empty(prio), "drain corrupted queue") + assert(queue_empty(prio), "drain corrupted queue"); #ifdef DEBUG int len = 0; VM_Operation* cur; @@ -593,7 +593,8 @@ // Check the VM operation allows nested VM operation. This normally not the case, e.g., the compiler // does not allow nested scavenges or compiles. if (!prev_vm_operation->allow_nested_vm_operations()) { - fatal2("Nested VM operation %s requested by operation %s", op->name(), vm_operation()->name()); + fatal(err_msg("Nested VM operation %s requested by operation %s", + op->name(), vm_operation()->name())); } op->set_calling_thread(prev_vm_operation->calling_thread(), prev_vm_operation->priority()); } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/services/g1MemoryPool.cpp --- a/src/share/vm/services/g1MemoryPool.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/services/g1MemoryPool.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2007-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ // See the comment at the top of g1MemoryPool.hpp size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) { - size_t young_list_length = g1h->young_list_length(); + size_t young_list_length = g1h->young_list()->length(); size_t eden_used = young_list_length * HeapRegion::GrainBytes; size_t survivor_used = survivor_space_used(g1h); eden_used = subtract_up_to_zero(eden_used, survivor_used); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/utilities/debug.cpp --- a/src/share/vm/utilities/debug.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/utilities/debug.cpp Wed May 05 16:39:47 2010 -0700 @@ -72,7 +72,7 @@ // assert/guarantee/... may happen very early during VM initialization. // Don't rely on anything that is initialized by Threads::create_vm(). For // example, don't use tty. -bool assert_is_suppressed(const char* file_name, int line_no) { +bool error_is_suppressed(const char* file_name, int line_no) { // The following 1-element cache requires that passed-in // file names are always only constant literals. if (file_name == last_file_name && line_no == last_line_no) return true; @@ -163,38 +163,30 @@ #else // Place-holder for non-existent suppression check: -#define assert_is_suppressed(file_name, line_no) (false) +#define error_is_suppressed(file_name, line_no) (false) #endif //PRODUCT -void report_assertion_failure(const char* file_name, int line_no, const char* message) { - if (Debugging || assert_is_suppressed(file_name, line_no)) return; - VMError err(ThreadLocalStorage::get_thread_slow(), message, file_name, line_no); +void report_vm_error(const char* file, int line, const char* error_msg, + const char* detail_msg) +{ + if (Debugging || error_is_suppressed(file, line)) return; + Thread* const thread = ThreadLocalStorage::get_thread_slow(); + VMError err(thread, file, line, error_msg, detail_msg); err.report_and_die(); } -void report_fatal(const char* file_name, int line_no, const char* message) { - if (Debugging || assert_is_suppressed(file_name, line_no)) return; - VMError err(ThreadLocalStorage::get_thread_slow(), message, file_name, line_no); - err.report_and_die(); +void report_fatal(const char* file, int line, const char* message) +{ + report_vm_error(file, line, "fatal error", message); } -void report_fatal_vararg(const char* file_name, int line_no, const char* format, ...) { - char buffer[256]; - va_list ap; - va_start(ap, format); - jio_vsnprintf(buffer, sizeof(buffer), format, ap); - va_end(ap); - report_fatal(file_name, line_no, buffer); -} - - // Used by report_vm_out_of_memory to detect recursion. static jint _exiting_out_of_mem = 0; -// Just passing the flow to VMError to handle error -void report_vm_out_of_memory(const char* file_name, int line_no, size_t size, const char* message) { - if (Debugging || assert_is_suppressed(file_name, line_no)) return; +void report_vm_out_of_memory(const char* file, int line, size_t size, + const char* message) { + if (Debugging || error_is_suppressed(file, line)) return; // We try to gather additional information for the first out of memory // error only; gathering additional data might cause an allocation and a @@ -206,46 +198,28 @@ if (first_time_here) { Thread* thread = ThreadLocalStorage::get_thread_slow(); - VMError(thread, size, message, file_name, line_no).report_and_die(); + VMError(thread, file, line, size, message).report_and_die(); } // Dump core and abort vm_abort(true); } -void report_vm_out_of_memory_vararg(const char* file_name, int line_no, size_t size, const char* format, ...) { - char buffer[256]; - va_list ap; - va_start(ap, format); - jio_vsnprintf(buffer, sizeof(buffer), format, ap); - va_end(ap); - report_vm_out_of_memory(file_name, line_no, size, buffer); +void report_should_not_call(const char* file, int line) { + report_vm_error(file, line, "ShouldNotCall()"); } -void report_should_not_call(const char* file_name, int line_no) { - if (Debugging || assert_is_suppressed(file_name, line_no)) return; - VMError err(ThreadLocalStorage::get_thread_slow(), "ShouldNotCall()", file_name, line_no); - err.report_and_die(); +void report_should_not_reach_here(const char* file, int line) { + report_vm_error(file, line, "ShouldNotReachHere()"); } - -void report_should_not_reach_here(const char* file_name, int line_no) { - if (Debugging || assert_is_suppressed(file_name, line_no)) return; - VMError err(ThreadLocalStorage::get_thread_slow(), "ShouldNotReachHere()", file_name, line_no); - err.report_and_die(); +void report_unimplemented(const char* file, int line) { + report_vm_error(file, line, "Unimplemented()"); } - -void report_unimplemented(const char* file_name, int line_no) { - if (Debugging || assert_is_suppressed(file_name, line_no)) return; - VMError err(ThreadLocalStorage::get_thread_slow(), "Unimplemented()", file_name, line_no); - err.report_and_die(); -} - - -void report_untested(const char* file_name, int line_no, const char* msg) { +void report_untested(const char* file, int line, const char* message) { #ifndef PRODUCT - warning("Untested: %s in %s: %d\n", msg, file_name, line_no); + warning("Untested: %s in %s: %d\n", message, file, line); #endif // PRODUCT } @@ -284,6 +258,51 @@ return error_reported; } +#ifndef PRODUCT +#include + +void test_error_handler(size_t test_num) +{ + if (test_num == 0) return; + + // If asserts are disabled, use the corresponding guarantee instead. + size_t n = test_num; + NOT_DEBUG(if (n <= 2) n += 2); + + const char* const str = "hello"; + const size_t num = (size_t)os::vm_page_size(); + + const char* const eol = os::line_separator(); + const char* const msg = "this message should be truncated during formatting"; + + // Keep this in sync with test/runtime/6888954/vmerrors.sh. + switch (n) { + case 1: assert(str == NULL, "expected null"); + case 2: assert(num == 1023 && *str == 'X', + err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str)); + case 3: guarantee(str == NULL, "expected null"); + case 4: guarantee(num == 1023 && *str == 'X', + err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str)); + case 5: fatal("expected null"); + case 6: fatal(err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str)); + case 7: fatal(err_msg("%s%s# %s%s# %s%s# %s%s# %s%s# " + "%s%s# %s%s# %s%s# %s%s# %s%s# " + "%s%s# %s%s# %s%s# %s%s# %s", + msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, + msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, + msg, eol, msg, eol, msg, eol, msg, eol, msg)); + case 8: vm_exit_out_of_memory(num, "ChunkPool::allocate"); + case 9: ShouldNotCallThis(); + case 10: ShouldNotReachHere(); + case 11: Unimplemented(); + // This is last because it does not generate an hs_err* file on Windows. + case 12: os::signal_raise(SIGSEGV); + + default: ShouldNotReachHere(); + } +} +#endif // #ifndef PRODUCT + // ------ helper functions for debugging go here ------------ #ifndef PRODUCT diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/utilities/debug.hpp --- a/src/share/vm/utilities/debug.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/utilities/debug.hpp Wed May 05 16:39:47 2010 -0700 @@ -22,28 +22,54 @@ * */ +#include + +// Simple class to format the ctor arguments into a fixed-sized buffer. +template +class FormatBuffer { +public: + inline FormatBuffer(const char * format, ...); + operator const char *() const { return _buf; } + +private: + FormatBuffer(const FormatBuffer &); // prevent copies + +private: + char _buf[bufsz]; +}; + +template +FormatBuffer::FormatBuffer(const char * format, ...) { + va_list argp; + va_start(argp, format); + vsnprintf(_buf, bufsz, format, argp); + va_end(argp); +} + +// Used to format messages for assert(), guarantee(), fatal(), etc. +typedef FormatBuffer<> err_msg; + // assertions #ifdef ASSERT -// Turn this off by default: -//#define USE_REPEATED_ASSERTS -#ifdef USE_REPEATED_ASSERTS - #define assert(p,msg) \ - { for (int __i = 0; __i < AssertRepeat; __i++) { \ - if (!(p)) { \ - report_assertion_failure(__FILE__, __LINE__, \ - "assert(" XSTR(p) ",\"" msg "\")");\ - BREAKPOINT; \ - } \ - } \ - } -#else - #define assert(p,msg) \ - if (!(p)) { \ - report_assertion_failure(__FILE__, __LINE__, \ - "assert(" XSTR(p) ",\"" msg "\")");\ - BREAKPOINT; \ - } -#endif +#ifndef USE_REPEATED_ASSERTS +#define assert(p, msg) \ +do { \ + if (!(p)) { \ + report_vm_error(__FILE__, __LINE__, "assert(" #p ") failed", msg); \ + BREAKPOINT; \ + } \ +} while (0) +#else // #ifndef USE_REPEATED_ASSERTS +#define assert(p, msg) +do { \ + for (int __i = 0; __i < AssertRepeat; __i++) { \ + if (!(p)) { \ + report_vm_error(__FILE__, __LINE__, "assert(" #p ") failed", msg); \ + BREAKPOINT; \ + } \ + } \ +} while (0) +#endif // #ifndef USE_REPEATED_ASSERTS // This version of assert is for use with checking return status from // library calls that return actual error values eg. EINVAL, @@ -52,70 +78,83 @@ // what status was actually returned, so we pass the status variable as // an extra arg and use strerror to convert it to a meaningful string // like "Invalid argument", "out of memory" etc -#define assert_status(p, status, msg) \ - do { \ - if (!(p)) { \ - char buf[128]; \ - snprintf(buf, 127, \ - "assert_status(" XSTR(p) ", error: %s(%d), \"" msg "\")" , \ - strerror((status)), (status)); \ - report_assertion_failure(__FILE__, __LINE__, buf); \ - BREAKPOINT; \ - } \ - } while (0) - -// Another version of assert where the message is not a string literal -// The boolean condition is not printed out because cpp doesn't like it. -#define assert_msg(p, msg) \ - if (!(p)) { \ - report_assertion_failure(__FILE__, __LINE__, msg); \ - BREAKPOINT; \ - } +#define assert_status(p, status, msg) \ +do { \ + if (!(p)) { \ + report_vm_error(__FILE__, __LINE__, "assert(" #p ") failed", \ + err_msg("error %s(%d) %s", strerror(status), \ + status, msg)); \ + BREAKPOINT; \ + } \ +} while (0) // Do not assert this condition if there's already another error reported. #define assert_if_no_error(cond,msg) assert((cond) || is_error_reported(), msg) -#else +#else // #ifdef ASSERT #define assert(p,msg) #define assert_status(p,status,msg) #define assert_if_no_error(cond,msg) - #define assert_msg(cond,msg) -#endif - - -// fatals -#define fatal(m) { report_fatal(__FILE__, __LINE__, m ); BREAKPOINT; } -#define fatal1(m,x1) { report_fatal_vararg(__FILE__, __LINE__, m, x1 ); BREAKPOINT; } -#define fatal2(m,x1,x2) { report_fatal_vararg(__FILE__, __LINE__, m, x1, x2 ); BREAKPOINT; } -#define fatal3(m,x1,x2,x3) { report_fatal_vararg(__FILE__, __LINE__, m, x1, x2, x3 ); BREAKPOINT; } -#define fatal4(m,x1,x2,x3,x4) { report_fatal_vararg(__FILE__, __LINE__, m, x1, x2, x3, x4 ); BREAKPOINT; } - -// out of memory -#define vm_exit_out_of_memory(s,m) { report_vm_out_of_memory(__FILE__, __LINE__, s, m ); BREAKPOINT; } -#define vm_exit_out_of_memory1(s,m,x1) { report_vm_out_of_memory_vararg(__FILE__, __LINE__, s, m, x1 ); BREAKPOINT; } -#define vm_exit_out_of_memory2(s,m,x1,x2) { report_vm_out_of_memory_vararg(__FILE__, __LINE__, s, m, x1, x2 ); BREAKPOINT; } -#define vm_exit_out_of_memory3(s,m,x1,x2,x3) { report_vm_out_of_memory_vararg(__FILE__, __LINE__, s, m, x1, x2, x3 ); BREAKPOINT; } -#define vm_exit_out_of_memory4(s,m,x1,x2,x3,x4) { report_vm_out_of_memory_vararg(__FILE__, __LINE__, s, m, x1, x2, x3, x4); BREAKPOINT; } +#endif // #ifdef ASSERT // guarantee is like assert except it's always executed -- use it for -// cheap tests that catch errors that would otherwise be hard to find +// cheap tests that catch errors that would otherwise be hard to find. // guarantee is also used for Verify options. -#define guarantee(b,msg) { if (!(b)) fatal("guarantee(" XSTR(b) ",\"" msg "\")"); } +#define guarantee(p, msg) \ +do { \ + if (!(p)) { \ + report_vm_error(__FILE__, __LINE__, "guarantee(" #p ") failed", msg); \ + BREAKPOINT; \ + } \ +} while (0) + +#define fatal(msg) \ +do { \ + report_fatal(__FILE__, __LINE__, msg); \ + BREAKPOINT; \ +} while (0) + +// out of memory +#define vm_exit_out_of_memory(size, msg) \ +do { \ + report_vm_out_of_memory(__FILE__, __LINE__, size, msg); \ + BREAKPOINT; \ +} while (0) -#define ShouldNotCallThis() { report_should_not_call (__FILE__, __LINE__); BREAKPOINT; } -#define ShouldNotReachHere() { report_should_not_reach_here (__FILE__, __LINE__); BREAKPOINT; } -#define Unimplemented() { report_unimplemented (__FILE__, __LINE__); BREAKPOINT; } -#define Untested(msg) { report_untested (__FILE__, __LINE__, msg); BREAKPOINT; } +#define ShouldNotCallThis() \ +do { \ + report_should_not_call(__FILE__, __LINE__); \ + BREAKPOINT; \ +} while (0) + +#define ShouldNotReachHere() \ +do { \ + report_should_not_reach_here(__FILE__, __LINE__); \ + BREAKPOINT; \ +} while (0) + +#define Unimplemented() \ +do { \ + report_unimplemented(__FILE__, __LINE__); \ + BREAKPOINT; \ +} while (0) + +#define Untested(msg) \ +do { \ + report_untested(__FILE__, __LINE__, msg); \ + BREAKPOINT; \ +} while (0); // error reporting helper functions -void report_assertion_failure(const char* file_name, int line_no, const char* message); -void report_fatal_vararg(const char* file_name, int line_no, const char* format, ...); -void report_fatal(const char* file_name, int line_no, const char* message); -void report_vm_out_of_memory_vararg(const char* file_name, int line_no, size_t size, const char* format, ...); -void report_vm_out_of_memory(const char* file_name, int line_no, size_t size, const char* message); -void report_should_not_call(const char* file_name, int line_no); -void report_should_not_reach_here(const char* file_name, int line_no); -void report_unimplemented(const char* file_name, int line_no); -void report_untested(const char* file_name, int line_no, const char* msg); +void report_vm_error(const char* file, int line, const char* error_msg, + const char* detail_msg = NULL); +void report_fatal(const char* file, int line, const char* message); +void report_vm_out_of_memory(const char* file, int line, size_t size, + const char* message); +void report_should_not_call(const char* file, int line); +void report_should_not_reach_here(const char* file, int line); +void report_unimplemented(const char* file, int line); +void report_untested(const char* file, int line, const char* message); + void warning(const char* format, ...); // out of memory reporting @@ -125,5 +164,8 @@ bool is_error_reported(); void set_error_reported(); +/* Test assert(), fatal(), guarantee(), etc. */ +NOT_PRODUCT(void test_error_handler(size_t test_num);) + void pd_ps(frame f); void pd_obfuscate_location(char *buf, size_t buflen); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/utilities/exceptions.cpp --- a/src/share/vm/utilities/exceptions.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/utilities/exceptions.cpp Wed May 05 16:39:47 2010 -0700 @@ -378,7 +378,7 @@ void Exceptions::debug_check_abort(const char *value_string) { if (AbortVMOnException != NULL && value_string != NULL && strstr(value_string, AbortVMOnException)) { - fatal1("Saw %s, aborting", value_string); + fatal(err_msg("Saw %s, aborting", value_string)); } } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/utilities/macros.hpp --- a/src/share/vm/utilities/macros.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/utilities/macros.hpp Wed May 05 16:39:47 2010 -0700 @@ -188,6 +188,4 @@ #define NOT_SPARC(code) code #endif -#define FIX_THIS(code) report_assertion_failure("FIX_THIS",__FILE__, __LINE__, "") - #define define_pd_global(type, name, value) const type pd_##name = value; diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/utilities/vmError.cpp --- a/src/share/vm/utilities/vmError.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/utilities/vmError.cpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,7 +65,8 @@ _current_step = 0; _current_step_info = NULL; - _message = ""; + _message = NULL; + _detail_msg = NULL; _filename = NULL; _lineno = 0; @@ -73,31 +74,36 @@ } // Constructor for internal errors -VMError::VMError(Thread* thread, const char* message, const char* filename, int lineno) { +VMError::VMError(Thread* thread, const char* filename, int lineno, + const char* message, const char * detail_msg) +{ + _thread = thread; + _id = internal_error; // Value that's not an OS exception/signal + _filename = filename; + _lineno = lineno; + _message = message; + _detail_msg = detail_msg; + + _verbose = false; + _current_step = 0; + _current_step_info = NULL; + + _pc = NULL; + _siginfo = NULL; + _context = NULL; + + _size = 0; +} + +// Constructor for OOM errors +VMError::VMError(Thread* thread, const char* filename, int lineno, size_t size, + const char* message) { _thread = thread; - _id = internal_error; // set it to a value that's not an OS exception/signal + _id = oom_error; // Value that's not an OS exception/signal _filename = filename; _lineno = lineno; _message = message; - - _verbose = false; - _current_step = 0; - _current_step_info = NULL; - - _pc = NULL; - _siginfo = NULL; - _context = NULL; - - _size = 0; -} - -// Constructor for OOM errors -VMError::VMError(Thread* thread, size_t size, const char* message, const char* filename, int lineno) { - _thread = thread; - _id = oom_error; // set it to a value that's not an OS exception/signal - _filename = filename; - _lineno = lineno; - _message = message; + _detail_msg = NULL; _verbose = false; _current_step = 0; @@ -114,10 +120,11 @@ // Constructor for non-fatal errors VMError::VMError(const char* message) { _thread = NULL; - _id = internal_error; // set it to a value that's not an OS exception/signal + _id = internal_error; // Value that's not an OS exception/signal _filename = NULL; _lineno = 0; _message = message; + _detail_msg = NULL; _verbose = false; _current_step = 0; @@ -191,27 +198,77 @@ "%s (0x%x) at pc=" PTR_FORMAT ", pid=%d, tid=" UINTX_FORMAT, signame, _id, _pc, os::current_process_id(), os::current_thread_id()); + } else if (_filename != NULL && _lineno > 0) { + // skip directory names + char separator = os::file_separator()[0]; + const char *p = strrchr(_filename, separator); + int n = jio_snprintf(buf, buflen, + "Internal Error at %s:%d, pid=%d, tid=" UINTX_FORMAT, + p ? p + 1 : _filename, _lineno, + os::current_process_id(), os::current_thread_id()); + if (n >= 0 && n < buflen && _message) { + if (_detail_msg) { + jio_snprintf(buf + n, buflen - n, "%s%s: %s", + os::line_separator(), _message, _detail_msg); + } else { + jio_snprintf(buf + n, buflen - n, "%sError: %s", + os::line_separator(), _message); + } + } } else { - if (_filename != NULL && _lineno > 0) { - // skip directory names - char separator = os::file_separator()[0]; - const char *p = strrchr(_filename, separator); - - jio_snprintf(buf, buflen, - "Internal Error at %s:%d, pid=%d, tid=" UINTX_FORMAT " \nError: %s", - p ? p + 1 : _filename, _lineno, - os::current_process_id(), os::current_thread_id(), - _message ? _message : ""); - } else { - jio_snprintf(buf, buflen, - "Internal Error (0x%x), pid=%d, tid=" UINTX_FORMAT, - _id, os::current_process_id(), os::current_thread_id()); - } + jio_snprintf(buf, buflen, + "Internal Error (0x%x), pid=%d, tid=" UINTX_FORMAT, + _id, os::current_process_id(), os::current_thread_id()); } return buf; } +void VMError::print_stack_trace(outputStream* st, JavaThread* jt, + char* buf, int buflen, bool verbose) { +#ifdef ZERO + if (jt->zero_stack()->sp() && jt->top_zero_frame()) { + // StackFrameStream uses the frame anchor, which may not have + // been set up. This can be done at any time in Zero, however, + // so if it hasn't been set up then we just set it up now and + // clear it again when we're done. + bool has_last_Java_frame = jt->has_last_Java_frame(); + if (!has_last_Java_frame) + jt->set_last_Java_frame(); + st->print("Java frames:"); + + // If the top frame is a Shark frame and the frame anchor isn't + // set up then it's possible that the information in the frame + // is garbage: it could be from a previous decache, or it could + // simply have never been written. So we print a warning... + StackFrameStream sfs(jt); + if (!has_last_Java_frame && !sfs.is_done()) { + if (sfs.current()->zeroframe()->is_shark_frame()) { + st->print(" (TOP FRAME MAY BE JUNK)"); + } + } + st->cr(); + + // Print the frames + for(int i = 0; !sfs.is_done(); sfs.next(), i++) { + sfs.current()->zero_print_on_error(i, st, buf, buflen); + st->cr(); + } + + // Reset the frame anchor if necessary + if (!has_last_Java_frame) + jt->reset_last_Java_frame(); + } +#else + if (jt->has_last_Java_frame()) { + st->print_cr("Java frames: (J=compiled Java code, j=interpreted, Vv=VM code)"); + for(StackFrameStream sfs(jt); !sfs.is_done(); sfs.next()) { + sfs.current()->print_on_error(st, buf, buflen, verbose); + st->cr(); + } + } +#endif // ZERO +} // This is the main function to report a fatal error. Only one thread can // call this function, so we don't need to worry about MT-safety. But it's @@ -324,7 +381,9 @@ STEP(40, "(printing error message)") // error message - if (_message && _message[0] != '\0') { + if (_detail_msg) { + st->print_cr("# %s: %s", _message ? _message : "Error", _detail_msg); + } else if (_message) { st->print_cr("# Error: %s", _message); } @@ -457,49 +516,7 @@ STEP(130, "(printing Java stack)" ) if (_verbose && _thread && _thread->is_Java_thread()) { - JavaThread* jt = (JavaThread*)_thread; -#ifdef ZERO - if (jt->zero_stack()->sp() && jt->top_zero_frame()) { - // StackFrameStream uses the frame anchor, which may not have - // been set up. This can be done at any time in Zero, however, - // so if it hasn't been set up then we just set it up now and - // clear it again when we're done. - bool has_last_Java_frame = jt->has_last_Java_frame(); - if (!has_last_Java_frame) - jt->set_last_Java_frame(); - st->print("Java frames:"); - - // If the top frame is a Shark frame and the frame anchor isn't - // set up then it's possible that the information in the frame - // is garbage: it could be from a previous decache, or it could - // simply have never been written. So we print a warning... - StackFrameStream sfs(jt); - if (!has_last_Java_frame && !sfs.is_done()) { - if (sfs.current()->zeroframe()->is_shark_frame()) { - st->print(" (TOP FRAME MAY BE JUNK)"); - } - } - st->cr(); - - // Print the frames - for(int i = 0; !sfs.is_done(); sfs.next(), i++) { - sfs.current()->zero_print_on_error(i, st, buf, sizeof(buf)); - st->cr(); - } - - // Reset the frame anchor if necessary - if (!has_last_Java_frame) - jt->reset_last_Java_frame(); - } -#else - if (jt->has_last_Java_frame()) { - st->print_cr("Java frames: (J=compiled Java code, j=interpreted, Vv=VM code)"); - for(StackFrameStream sfs(jt); !sfs.is_done(); sfs.next()) { - sfs.current()->print_on_error(st, buf, sizeof(buf)); - st->cr(); - } - } -#endif // ZERO + print_stack_trace(st, (JavaThread*)_thread, buf, sizeof(buf)); } STEP(135, "(printing target Java thread stack)" ) @@ -509,13 +526,7 @@ JavaThread* jt = ((NamedThread *)_thread)->processed_thread(); if (jt != NULL) { st->print_cr("JavaThread " PTR_FORMAT " (nid = " UINTX_FORMAT ") was being processed", jt, jt->osthread()->thread_id()); - if (jt->has_last_Java_frame()) { - st->print_cr("Java frames: (J=compiled Java code, j=interpreted, Vv=VM code)"); - for(StackFrameStream sfs(jt); !sfs.is_done(); sfs.next()) { - sfs.current()->print_on_error(st, buf, sizeof(buf), true); - st->cr(); - } - } + print_stack_trace(st, jt, buf, sizeof(buf), true); } } diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/utilities/vmError.hpp --- a/src/share/vm/utilities/vmError.hpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/utilities/vmError.hpp Wed May 05 16:39:47 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ // 0x8xxxxxxx system warnings const char * _message; + const char * _detail_msg; Thread * _thread; // NULL if it's native thread @@ -70,17 +71,24 @@ // generate an error report void report(outputStream* st); + // generate a stack trace + static void print_stack_trace(outputStream* st, JavaThread* jt, + char* buf, int buflen, bool verbose = false); + // accessor - const char* message() { return _message; } + const char* message() const { return _message; } + const char* detail_msg() const { return _detail_msg; } public: // Constructor for crashes VMError(Thread* thread, int sig, address pc, void* siginfo, void* context); // Constructor for VM internal errors - VMError(Thread* thread, const char* message, const char* filename, int lineno); + VMError(Thread* thread, const char* filename, int lineno, + const char* message, const char * detail_msg); - // Constructors for VM OOM errors - VMError(Thread* thread, size_t size, const char* message, const char* filename, int lineno); + // Constructor for VM OOM errors + VMError(Thread* thread, const char* filename, int lineno, size_t size, + const char* message); // Constructor for non-fatal errors VMError(const char* message); diff -r f43b5e9f7881 -r 3fca8e9cd36a src/share/vm/utilities/xmlstream.cpp --- a/src/share/vm/utilities/xmlstream.cpp Wed May 05 09:28:13 2010 -0400 +++ b/src/share/vm/utilities/xmlstream.cpp Wed May 05 16:39:47 2010 -0700 @@ -328,7 +328,7 @@ // ------------------------------------------------------------------ void xmlStream::va_done(const char* format, va_list ap) { char buffer[200]; - guarantee(strlen(format) + 10 < sizeof(buffer), "bigger format buffer") + guarantee(strlen(format) + 10 < sizeof(buffer), "bigger format buffer"); const char* kind = format; const char* kind_end = strchr(kind, ' '); size_t kind_len = (kind_end != NULL) ? (kind_end - kind) : strlen(kind); diff -r f43b5e9f7881 -r 3fca8e9cd36a test/compiler/6431242/Test.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/6431242/Test.java Wed May 05 16:39:47 2010 -0700 @@ -0,0 +1,176 @@ +/* + * Copyright 2006 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +/* + * @test + * @bug 6431242 + * @run main/othervm -server -XX:+PrintCompilation Test + */ + +public class Test{ + + int _len = 8; + int[] _arr_i = new int[_len]; + long[] _arr_l = new long[_len]; + + int[] _arr_i_cp = new int [_len]; + long[] _arr_l_cp = new long [_len]; + + int _k = 0x12345678; + int _j = 0; + int _ir = 0x78563412; + int _ir1 = 0x78563413; + int _ir2 = 0x79563412; + + long _m = 0x123456789abcdef0L; + long _l = 0L; + long _lr = 0xf0debc9a78563412L; + long _lr1 = 0xf0debc9a78563413L; + long _lr2 = 0xf1debc9a78563412L; + + void init() { + for (int i=0; i<_arr_i.length; i++) { + _arr_i[i] = _k; + _arr_l[i] = _m; + } + } + + public int test_int_reversed(int i) { + return Integer.reverseBytes(i); + } + + public long test_long_reversed(long i) { + return Long.reverseBytes(i); + } + + public void test_copy_ints(int[] dst, int[] src) { + for(int i=0; i 0) { + try { + return (short) Integer.valueOf(args[0]).intValue(); + } catch (NumberFormatException e) { } + } + return v; + } + + private static char initChar(String[] args, char v) { + if (args.length > 0) { + try { + return (char) Integer.valueOf(args[0]).intValue(); + } catch (NumberFormatException e) { } + } + return v; + } + + private static void testChar(char a, char b) { + if (a != Character.reverseBytes(b)) { + throw new RuntimeException("FAIL: " + (int)a + " != Character.reverseBytes(" + (int)b + ")"); + } + if (b != Character.reverseBytes(a)) { + throw new RuntimeException("FAIL: " + (int)b + " != Character.reverseBytes(" + (int)a + ")"); + } + } + + private static void testShort(short a, short b) { + if (a != Short.reverseBytes(b)) { + throw new RuntimeException("FAIL: " + (int)a + " != Short.reverseBytes(" + (int)b + ")"); + } + if (b != Short.reverseBytes(a)) { + throw new RuntimeException("FAIL: " + (int)b + " != Short.reverseBytes(" + (int)a + ")"); + } + } + + public static void main(String[] args) { + for (int i = 0; i < 100000; ++i) { // Trigger compilation + char c1 = initChar(args, (char) 0x0123); + char c2 = initChar(args, (char) 0x2301); + char c3 = initChar(args, (char) 0xaabb); + char c4 = initChar(args, (char) 0xbbaa); + short s1 = initShort(args, (short) 0x0123); + short s2 = initShort(args, (short) 0x2301); + short s3 = initShort(args, (short) 0xaabb); + short s4 = initShort(args, (short) 0xbbaa); + testChar(c1, c2); + testChar(c3, c4); + testShort(s1, s2); + testShort(s3, s4); + } + } +} diff -r f43b5e9f7881 -r 3fca8e9cd36a test/runtime/6888954/vmerrors.sh --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/6888954/vmerrors.sh Wed May 05 16:39:47 2010 -0700 @@ -0,0 +1,71 @@ +# @test +# @bug 6888954 +# @summary exercise HotSpot error handling code +# @author John Coomes +# @run shell vmerrors.sh + +# Repeatedly invoke java with a command-line option that causes HotSpot to +# produce an error report and terminate just after initialization. Each +# invocation is identified by a small integer, , which provokes a different +# error (assertion failure, guarantee failure, fatal error, etc.). The output +# from stdout/stderr is written to .out and the hs_err_pidXXX.log file is +# renamed to .log. +# +# The automated checking done by this script is minimal. When updating the +# fatal error handler it is more useful to run it manually or to use the -retain +# option with the jtreg so that test directories are not removed automatically. +# To run stand-alone: +# +# TESTJAVA=/java/home/dir +# TESTVMOPTS=... +# export TESTJAVA TESTVMOPTS +# sh test/runtime/6888954/vmerrors.sh + +ulimit -c 0 # no core files + +i=1 +rc=0 + +assert_re='(assert|guarantee)[(](str|num).*failed: *' +guarantee_re='guarantee[(](str|num).*failed: *' +fatal_re='fatal error: *' +signal_re='(SIGSEGV|EXCEPTION_ACCESS_VIOLATION).* at pc=' +tail_1='.*expected null' +tail_2='.*num=' + +for re in \ + "${assert_re}${tail_1}" "${assert_re}${tail_2}" \ + "${guarantee_re}${tail_1}" "${guarantee_re}${tail_2}" \ + "${fatal_re}${tail_1}" "${fatal_re}${tail_2}" \ + "${fatal_re}.*truncated" "ChunkPool::allocate" \ + "ShouldNotCall" "ShouldNotReachHere" \ + "Unimplemented" "$signal_re" + +do + i2=$i + [ $i -lt 10 ] && i2=0$i + + "$TESTJAVA/bin/java" $TESTVMOPTS -XX:+IgnoreUnrecognizedVMOptions \ + -XX:ErrorHandlerTest=${i} -version > ${i2}.out 2>&1 + + # If ErrorHandlerTest is ignored (product build), stop. + # + # Using the built-in variable $! to get the pid does not work reliably on + # windows; use a wildcard instead. + mv hs_err_pid*.log ${i2}.log || exit $rc + + for f in ${i2}.log ${i2}.out + do + egrep -- "$re" $f > $$ + if [ $? -ne 0 ] + then + echo "ErrorHandlerTest=$i failed ($f)" + rc=1 + fi + done + rm -f $$ + + i=$(expr $i + 1) +done + +exit $rc