Mercurial > hg > graal-jvmci-8
changeset 2491:0654ee04b214
Merge with OpenJDK.
line wrap: on
line diff
--- a/.hgtags Fri Apr 22 15:22:45 2011 +0200 +++ b/.hgtags Fri Apr 22 15:30:53 2011 +0200 @@ -151,3 +151,18 @@ e9aa2ca89ad6c53420623d579765f9706ec523d7 jdk7-b130 0aa3b49089112d5faa77902ad680c582ab53f651 jdk7-b131 e9aa2ca89ad6c53420623d579765f9706ec523d7 hs21-b02 +0e531ab5ba04967a0e9aa6aef65e6eb3a0dcf632 jdk7-b132 +a8d643a4db47c7b58e0bcb49c77b5c3610de86a8 hs21-b03 +1b3a350709e4325d759bb453ff3fb6a463270488 jdk7-b133 +447e6faab4a8755d4860c2366630729dbaec111c jdk7-b134 +3c76374706ea8a77e15aec8310e831e5734f8775 hs21-b04 +b898f0fc3cedc972d884d31a751afd75969531cf jdk7-b135 +b898f0fc3cedc972d884d31a751afd75969531cf hs21-b05 +bd586e392d93b7ed7a1636dcc8da2b6a4203a102 jdk7-b136 +bd586e392d93b7ed7a1636dcc8da2b6a4203a102 hs21-b06 +2dbcb4a4d8dace5fe78ceb563b134f1fb296cd8f jdk7-b137 +2dbcb4a4d8dace5fe78ceb563b134f1fb296cd8f hs21-b07 +0930dc920c185afbf40fed9a655290b8e5b16783 jdk7-b138 +0930dc920c185afbf40fed9a655290b8e5b16783 hs21-b08 +611e19a16519d6fb5deea9ab565336e6e6ee475d jdk7-b139 +611e19a16519d6fb5deea9ab565336e6e6ee475d hs21-b09
--- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/HelloWorld.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/HelloWorld.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,7 +55,7 @@ synchronized(lock) { if (useMethodInvoke) { try { - Method method = HelloWorld.class.getMethod("e", null); + Method method = HelloWorld.class.getMethod("e"); Integer result = (Integer) method.invoke(null, new Object[0]); return result.intValue(); }
--- a/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithKlass.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithKlass.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ByteValueImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ByteValueImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,12 +52,10 @@ return intValue(); } - public int compareTo(Object obj) { - byte other = ((ByteValue)obj).value(); - return value() - other; + public int compareTo(ByteValue byteVal) { + return value() - byteVal.value(); } - public Type type() { return vm.theByteType(); }
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/CharValueImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/CharValueImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,9 +52,8 @@ return intValue(); } - public int compareTo(Object obj) { - char other = ((CharValue)obj).value(); - return value() - other; + public int compareTo(CharValue charVal) { + return value() - charVal.value(); } public Type type() {
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ClassObjectReferenceImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ClassObjectReferenceImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ import com.sun.jdi.*; import sun.jvm.hotspot.oops.Instance; import sun.jvm.hotspot.oops.Klass; -import sun.jvm.hotspot.oops.OopUtilities; +import sun.jvm.hotspot.oops.java_lang_Class; public class ClassObjectReferenceImpl extends ObjectReferenceImpl implements ClassObjectReference { @@ -39,7 +39,7 @@ public ReferenceType reflectedType() { if (reflectedType == null) { - Klass k = OopUtilities.classOopToKlass(ref()); + Klass k = java_lang_Class.asKlass(ref()); reflectedType = vm.referenceType(k); } return reflectedType;
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ConnectorImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ConnectorImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -186,7 +186,7 @@ // assert isVMVersionMismatch(throwable), "not a VMVersionMismatch" Class expClass = throwable.getClass(); Method targetVersionMethod = expClass.getMethod("getTargetVersion", new Class[0]); - return (String) targetVersionMethod.invoke(throwable, null); + return (String) targetVersionMethod.invoke(throwable); } /** If the causal chain has a sun.jvm.hotspot.runtime.VMVersionMismatchException,
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/DoubleValueImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/DoubleValueImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,8 +45,8 @@ } } - public int compareTo(Object obj) { - double other = ((DoubleValue)obj).value(); + public int compareTo(DoubleValue doubleVal) { + double other = doubleVal.value(); if (value() < other) { return -1; } else if (value() == other) {
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/FieldImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/FieldImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,7 +62,7 @@ // get the value of static field ValueImpl getValue() { - return getValue(saField.getFieldHolder()); + return getValue(saField.getFieldHolder().getJavaMirror()); } // get the value of this Field from a specific Oop @@ -145,8 +145,7 @@ } // From interface Comparable - public int compareTo(Object object) { - Field field = (Field)object; + public int compareTo(Field field) { ReferenceTypeImpl declaringType = (ReferenceTypeImpl)declaringType(); int rc = declaringType.compareTo(field.declaringType()); if (rc == 0) {
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/FloatValueImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/FloatValueImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,8 +52,8 @@ return intValue(); } - public int compareTo(Object obj) { - float other = ((FloatValue)obj).value(); + public int compareTo(FloatValue floatVal) { + float other = floatVal.value(); if (value() < other) { return -1; } else if (value() == other) {
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/IntegerValueImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/IntegerValueImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,9 +52,8 @@ return intValue(); } - public int compareTo(Object obj) { - int other = ((IntegerValue)obj).value(); - return value() - other; + public int compareTo(IntegerValue integerVal) { + return value() - integerVal.value(); } public Type type() {
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/LocalVariableImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/LocalVariableImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,8 +67,8 @@ return (int)method.hashCode() + slot(); } - public int compareTo(Object object) { - LocalVariableImpl other = (LocalVariableImpl)object; + public int compareTo(LocalVariable localVar) { + LocalVariableImpl other = (LocalVariableImpl) localVar; int rc = method.compareTo(other.method); if (rc == 0) { rc = slot() - other.slot();
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/LocationImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/LocationImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,8 +78,7 @@ return method().hashCode() + (int)codeIndex(); } - public int compareTo(Object object) { - LocationImpl other = (LocationImpl)object; + public int compareTo(Location other) { int rc = method().compareTo(other.method()); if (rc == 0) { long diff = codeIndex() - other.codeIndex();
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/LongValueImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/LongValueImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,8 +52,8 @@ return intValue(); } - public int compareTo(Object obj) { - long other = ((LongValue)obj).value(); + public int compareTo(LongValue longVal) { + long other = longVal.value(); if (value() < other) { return -1; } else if (value() == other) {
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/MethodImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/MethodImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -200,8 +200,7 @@ } // From interface Comparable - public int compareTo(Object object) { - Method method = (Method)object; + public int compareTo(Method method) { ReferenceTypeImpl declaringType = (ReferenceTypeImpl)declaringType(); int rc = declaringType.compareTo(method.declaringType()); if (rc == 0) {
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ReferenceTypeImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ReferenceTypeImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -99,7 +99,7 @@ return saKlass.hashCode(); } - public int compareTo(Object object) { + public int compareTo(ReferenceType refType) { /* * Note that it is critical that compareTo() == 0 * implies that equals() == true. Otherwise, TreeSet @@ -108,7 +108,7 @@ * (Classes of the same name loaded by different class loaders * or in different VMs must not return 0). */ - ReferenceTypeImpl other = (ReferenceTypeImpl)object; + ReferenceTypeImpl other = (ReferenceTypeImpl)refType; int comp = name().compareTo(other.name()); if (comp == 0) { Oop rf1 = ref();
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ShortValueImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ShortValueImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,9 +52,8 @@ return intValue(); } - public int compareTo(Object obj) { - short other = ((ShortValue)obj).value(); - return value() - other; + public int compareTo(ShortValue shortVal) { + return value() - shortVal.value(); } public Type type() {
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -798,12 +798,11 @@ } public String description() { - String[] versionParts = {"" + vmmgr.majorInterfaceVersion(), - "" + vmmgr.minorInterfaceVersion(), - name()}; return java.text.MessageFormat.format(java.util.ResourceBundle. getBundle("com.sun.tools.jdi.resources.jdi").getString("version_format"), - versionParts); + "" + vmmgr.majorInterfaceVersion(), + "" + vmmgr.minorInterfaceVersion(), + name()); } public String version() {
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/DictionaryEntry.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/DictionaryEntry.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/LoaderConstraintEntry.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/LoaderConstraintEntry.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/PlaceholderEntry.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/PlaceholderEntry.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/StringTable.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/StringTable.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,12 +44,10 @@ private static synchronized void initialize(TypeDataBase db) { Type type = db.lookupType("StringTable"); theTableField = type.getAddressField("_the_table"); - stringTableSize = db.lookupIntConstant("StringTable::string_table_size").intValue(); } // Fields private static AddressField theTableField; - private static int stringTableSize; // Accessors public static StringTable getTheTable() { @@ -57,10 +55,6 @@ return (StringTable) VMObjectFactory.newObject(StringTable.class, tmp); } - public static int getStringTableSize() { - return stringTableSize; - } - public StringTable(Address addr) { super(addr); }
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -331,8 +331,6 @@ if (Assert.ASSERTS_ENABLED) { Assert.that(getTagAt(i).isInvokeDynamic(), "Corrupted constant pool"); } - if (getTagAt(i).value() == JVM_CONSTANT_InvokeDynamicTrans) - return null; int bsmSpec = extractLowShortFromInt(this.getIntAt(i)); TypeArray operands = getOperands(); if (operands == null) return null; // safety first @@ -368,7 +366,6 @@ case JVM_CONSTANT_MethodHandle: return "JVM_CONSTANT_MethodHandle"; case JVM_CONSTANT_MethodType: return "JVM_CONSTANT_MethodType"; case JVM_CONSTANT_InvokeDynamic: return "JVM_CONSTANT_InvokeDynamic"; - case JVM_CONSTANT_InvokeDynamicTrans: return "JVM_CONSTANT_InvokeDynamic/transitional"; case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid"; case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass"; case JVM_CONSTANT_UnresolvedClassInError: return "JVM_CONSTANT_UnresolvedClassInError"; @@ -428,7 +425,6 @@ case JVM_CONSTANT_MethodHandle: case JVM_CONSTANT_MethodType: case JVM_CONSTANT_InvokeDynamic: - case JVM_CONSTANT_InvokeDynamicTrans: visitor.doInt(new IntField(new NamedFieldIdentifier(nameForTag(ctag)), indexOffset(index), true), true); break; } @@ -592,7 +588,6 @@ break; } - case JVM_CONSTANT_InvokeDynamicTrans: case JVM_CONSTANT_InvokeDynamic: { dos.writeByte(cpConstType); int value = getIntAt(ci);
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Instance.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Instance.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,7 +64,7 @@ public void iterateFields(OopVisitor visitor, boolean doVMFields) { super.iterateFields(visitor, doVMFields); - ((InstanceKlass) getKlass()).iterateNonStaticFields(visitor); + ((InstanceKlass) getKlass()).iterateNonStaticFields(visitor, this); } public void printValueOn(PrintStream tty) {
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -87,7 +87,7 @@ innerClasses = new OopField(type.getOopField("_inner_classes"), Oop.getHeaderSize()); nonstaticFieldSize = new CIntField(type.getCIntegerField("_nonstatic_field_size"), Oop.getHeaderSize()); staticFieldSize = new CIntField(type.getCIntegerField("_static_field_size"), Oop.getHeaderSize()); - staticOopFieldSize = new CIntField(type.getCIntegerField("_static_oop_field_size"), Oop.getHeaderSize()); + staticOopFieldCount = new CIntField(type.getCIntegerField("_static_oop_field_count"), Oop.getHeaderSize()); nonstaticOopMapSize = new CIntField(type.getCIntegerField("_nonstatic_oop_map_size"), Oop.getHeaderSize()); isMarkedDependent = new CIntField(type.getCIntegerField("_is_marked_dependent"), Oop.getHeaderSize()); initState = new CIntField(type.getCIntegerField("_init_state"), Oop.getHeaderSize()); @@ -140,7 +140,7 @@ private static OopField innerClasses; private static CIntField nonstaticFieldSize; private static CIntField staticFieldSize; - private static CIntField staticOopFieldSize; + private static CIntField staticOopFieldCount; private static CIntField nonstaticOopMapSize; private static CIntField isMarkedDependent; private static CIntField initState; @@ -241,6 +241,10 @@ // Byteside of the header private static long headerSize; + public long getObjectSize(Oop object) { + return getSizeHelper() * VM.getVM().getAddressSize(); + } + public static long getHeaderSize() { return headerSize; } // Accessors for declared fields @@ -261,8 +265,7 @@ public Symbol getSourceDebugExtension(){ return getSymbol(sourceDebugExtension); } public TypeArray getInnerClasses() { return (TypeArray) innerClasses.getValue(this); } public long getNonstaticFieldSize() { return nonstaticFieldSize.getValue(this); } - public long getStaticFieldSize() { return staticFieldSize.getValue(this); } - public long getStaticOopFieldSize() { return staticOopFieldSize.getValue(this); } + public long getStaticOopFieldCount() { return staticOopFieldCount.getValue(this); } public long getNonstaticOopMapSize() { return nonstaticOopMapSize.getValue(this); } public boolean getIsMarkedDependent() { return isMarkedDependent.getValue(this) != 0; } public long getVtableLen() { return vtableLen.getValue(this); } @@ -453,14 +456,29 @@ visitor.doOop(innerClasses, true); visitor.doCInt(nonstaticFieldSize, true); visitor.doCInt(staticFieldSize, true); - visitor.doCInt(staticOopFieldSize, true); + visitor.doCInt(staticOopFieldCount, true); visitor.doCInt(nonstaticOopMapSize, true); visitor.doCInt(isMarkedDependent, true); visitor.doCInt(initState, true); visitor.doCInt(vtableLen, true); visitor.doCInt(itableLen, true); } + } + /* + * Visit the static fields of this InstanceKlass with the obj of + * the visitor set to the oop holding the fields, which is + * currently the java mirror. + */ + public void iterateStaticFields(OopVisitor visitor) { + visitor.setObj(getJavaMirror()); + visitor.prologue(); + iterateStaticFieldsInternal(visitor); + visitor.epilogue(); + + } + + void iterateStaticFieldsInternal(OopVisitor visitor) { TypeArray fields = getFields(); int length = (int) fields.getLength(); for (int index = 0; index < length; index += NEXT_OFFSET) { @@ -478,9 +496,9 @@ return getSuper(); } - public void iterateNonStaticFields(OopVisitor visitor) { + public void iterateNonStaticFields(OopVisitor visitor, Oop obj) { if (getSuper() != null) { - ((InstanceKlass) getSuper()).iterateNonStaticFields(visitor); + ((InstanceKlass) getSuper()).iterateNonStaticFields(visitor, obj); } TypeArray fields = getFields(); @@ -692,7 +710,7 @@ public long getObjectSize() { long bodySize = alignObjectOffset(getVtableLen() * getHeap().getOopSize()) + alignObjectOffset(getItableLen() * getHeap().getOopSize()) - + (getStaticFieldSize() + getNonstaticOopMapSize()) * getHeap().getOopSize(); + + (getNonstaticOopMapSize()) * getHeap().getOopSize(); return alignObjectSize(headerSize + bodySize); }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceMirrorKlass.java Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.oops; + +import java.io.*; +import java.util.*; +import sun.jvm.hotspot.debugger.*; +import sun.jvm.hotspot.memory.*; +import sun.jvm.hotspot.runtime.*; +import sun.jvm.hotspot.types.*; +import sun.jvm.hotspot.utilities.*; + +// An InstanceKlass is the VM level representation of a Java class. + +public class InstanceMirrorKlass extends InstanceKlass { + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { + // Just make sure it's there for now + Type type = db.lookupType("instanceMirrorKlass"); + } + + InstanceMirrorKlass(OopHandle handle, ObjectHeap heap) { + super(handle, heap); + } + + public long getObjectSize(Oop o) { + return java_lang_Class.getOopSize(o) * VM.getVM().getAddressSize(); + } + + public void iterateNonStaticFields(OopVisitor visitor, Oop obj) { + super.iterateNonStaticFields(visitor, obj); + // Fetch the real klass from the mirror object + Klass klass = java_lang_Class.asKlass(obj); + if (klass instanceof InstanceKlass) { + ((InstanceKlass)klass).iterateStaticFields(visitor); + } + } +}
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/IntField.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/IntField.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,12 @@ super(holder, fieldArrayIndex); } - public int getValue(Oop obj) { return obj.getHandle().getJIntAt(getOffset()); } + public int getValue(Oop obj) { + if (!isVMField() && !obj.isInstance() && !obj.isArray()) { + throw new InternalError(obj.toString()); + } + return obj.getHandle().getJIntAt(getOffset()); + } public void setValue(Oop obj, int value) throws MutationException { // Fix this: setJIntAt is missing in Address }
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -362,7 +362,16 @@ if (klass.equals(compiledICHolderKlassHandle)) return new CompiledICHolder(handle, this); if (klass.equals(methodDataKlassHandle)) return new MethodData(handle, this); } - if (klass.equals(instanceKlassKlassHandle)) return new InstanceKlass(handle, this); + if (klass.equals(instanceKlassKlassHandle)) { + InstanceKlass ik = new InstanceKlass(handle, this); + if (ik.getName().asString().equals("java/lang/Class")) { + // We would normally do this using the vtable style + // lookup but since it's not used for these currently + // it's simpler to just check for the name. + return new InstanceMirrorKlass(handle, this); + } + return ik; + } if (klass.equals(objArrayKlassKlassHandle)) return new ObjArrayKlass(handle, this); if (klass.equals(typeArrayKlassKlassHandle)) return new TypeArrayKlass(handle, this);
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Oop.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -103,12 +103,8 @@ // Returns the byte size of this object public long getObjectSize() { Klass k = getKlass(); - if (k instanceof InstanceKlass) { - return ((InstanceKlass)k).getSizeHelper() - * VM.getVM().getAddressSize(); - } - // If it is not an instance, this method should be replaced. - return getHeaderSize(); + // All other types should be overriding getObjectSize directly + return ((InstanceKlass)k).getObjectSize(this); } // Type test operations
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/OopField.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/OopField.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2002, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,11 +41,17 @@ } public Oop getValue(Oop obj) { + if (!isVMField() && !obj.isInstance() && !obj.isArray()) { + throw new InternalError(); + } return obj.getHeap().newOop(getValueAsOopHandle(obj)); } /** Debugging support */ public OopHandle getValueAsOopHandle(Oop obj) { + if (!isVMField() && !obj.isInstance() && !obj.isArray()) { + throw new InternalError(obj.toString()); + } return obj.getHandle().getOopHandleAt(getOffset()); }
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,9 +74,6 @@ private static int THREAD_STATUS_TERMINATED; */ - // java.lang.Class fields - private static OopField hcKlassField; - // java.util.concurrent.locks.AbstractOwnableSynchronizer fields private static OopField absOwnSyncOwnerThreadField; @@ -268,33 +265,6 @@ return null; } - // initialize fields for java.lang.Class - private static void initClassFields() { - if (hcKlassField == null) { - // hc_klass is a HotSpot magic field and hence we can't - // find it from InstanceKlass for java.lang.Class. - TypeDataBase db = VM.getVM().getTypeDataBase(); - int hcKlassOffset = (int) Instance.getHeaderSize(); - try { - hcKlassOffset += (db.lookupIntConstant("java_lang_Class::hc_klass_offset").intValue() * - VM.getVM().getHeapOopSize()); - } catch (RuntimeException re) { - // ignore, currently java_lang_Class::hc_klass_offset is zero - } - if (VM.getVM().isCompressedOopsEnabled()) { - hcKlassField = new NarrowOopField(new NamedFieldIdentifier("hc_klass"), hcKlassOffset, true); - } else { - hcKlassField = new OopField(new NamedFieldIdentifier("hc_klass"), hcKlassOffset, true); - } - } - } - - /** get klassOop field at offset hc_klass_offset from a java.lang.Class object */ - public static Klass classOopToKlass(Oop aClass) { - initClassFields(); - return (Klass) hcKlassField.getValue(aClass); - } - // initialize fields for j.u.c.l AbstractOwnableSynchornizer class private static void initAbsOwnSyncFields() { if (absOwnSyncOwnerThreadField == null) {
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Symbol.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Symbol.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/java_lang_Class.java Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.oops; + +import java.util.*; + +import sun.jvm.hotspot.debugger.*; +import sun.jvm.hotspot.memory.*; +import sun.jvm.hotspot.runtime.*; +import sun.jvm.hotspot.types.Type; +import sun.jvm.hotspot.types.TypeDataBase; +import sun.jvm.hotspot.utilities.*; +import sun.jvm.hotspot.jdi.JVMTIThreadState; + +/** A utility class encapsulating useful oop operations */ + +// initialize fields for java.lang.Class +public class java_lang_Class { + + // java.lang.Class fields + static OopField klassField; + static IntField oopSizeField; + + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + private static synchronized void initialize(TypeDataBase db) { + // klass and oop_size are HotSpot magic fields and hence we can't + // find them from InstanceKlass for java.lang.Class. + Type jlc = db.lookupType("java_lang_Class"); + int klassOffset = (int) jlc.getCIntegerField("klass_offset").getValue(); + if (VM.getVM().isCompressedOopsEnabled()) { + klassField = new NarrowOopField(new NamedFieldIdentifier("klass"), klassOffset, true); + } else { + klassField = new OopField(new NamedFieldIdentifier("klass"), klassOffset, true); + } + int oopSizeOffset = (int) jlc.getCIntegerField("oop_size_offset").getValue(); + oopSizeField = new IntField(new NamedFieldIdentifier("oop_size"), oopSizeOffset, true); + } + + /** get klassOop field at offset hc_klass_offset from a java.lang.Class object */ + public static Klass asKlass(Oop aClass) { + return (Klass) java_lang_Class.klassField.getValue(aClass); + } + + /** get oop_size field at offset oop_size_offset from a java.lang.Class object */ + public static long getOopSize(Oop aClass) { + return java_lang_Class.oopSizeField.getValue(aClass); + } +}
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java Fri Apr 22 15:30:53 2011 +0200 @@ -42,7 +42,7 @@ public static final int JVM_CONSTANT_NameAndType = 12; public static final int JVM_CONSTANT_MethodHandle = 15; public static final int JVM_CONSTANT_MethodType = 16; - public static final int JVM_CONSTANT_InvokeDynamicTrans = 17; // only occurs in old class files + // static final int JVM_CONSTANT_(unused) = 17; public static final int JVM_CONSTANT_InvokeDynamic = 18; // JVM_CONSTANT_MethodHandle subtypes
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -839,20 +839,18 @@ } private void readSystemProperties() { - InstanceKlass systemKls = getSystemDictionary().getSystemKlass(); - systemKls.iterate(new DefaultOopVisitor() { - ObjectReader objReader = new ObjectReader(); - public void doOop(sun.jvm.hotspot.oops.OopField field, boolean isVMField) { - if (field.getID().getName().equals("props")) { - try { - sysProps = (Properties) objReader.readObject(field.getValue(getObj())); - } catch (Exception e) { - if (Assert.ASSERTS_ENABLED) { - e.printStackTrace(); - } - } - } - } - }, false); + final InstanceKlass systemKls = getSystemDictionary().getSystemKlass(); + systemKls.iterateStaticFields(new DefaultOopVisitor() { + ObjectReader objReader = new ObjectReader(); + public void doOop(sun.jvm.hotspot.oops.OopField field, boolean isVMField) { + if (field.getID().getName().equals("props")) { + try { + sysProps = (Properties) objReader.readObject(field.getValue(getObj())); + } catch (Exception e) { + e.printStackTrace(); + } + } + } + }); } }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,16 +64,16 @@ */ InstanceKlass ik = SystemDictionaryHelper.findInstanceKlass("java.lang.ref.Finalizer"); - final OopField queueField[] = new OopField[1]; - ik.iterateFields(new DefaultOopVisitor() { + final Oop[] queueref = new Oop[1]; + ik.iterateStaticFields(new DefaultOopVisitor() { public void doOop(OopField field, boolean isVMField) { - String name = field.getID().getName(); - if (name.equals("queue")) { - queueField[0] = field; - } + String name = field.getID().getName(); + if (name.equals("queue")) { + queueref[0] = field.getValue(getObj()); + } } - }, false); - Oop queue = queueField[0].getValue(ik); + }); + Oop queue = queueref[0]; InstanceKlass k = (InstanceKlass) queue.getKlass();
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -321,7 +321,6 @@ break; } - case JVM_CONSTANT_InvokeDynamicTrans: case JVM_CONSTANT_InvokeDynamic: { dos.writeByte(cpConstType); int value = cpool.getIntAt(ci);
--- a/agent/src/share/classes/sun/jvm/hotspot/types/Field.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/types/Field.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -598,7 +598,6 @@ buf.cell(Integer.toString(cpool.getIntAt(index))); break; - case JVM_CONSTANT_InvokeDynamicTrans: case JVM_CONSTANT_InvokeDynamic: buf.cell("JVM_CONSTANT_InvokeDynamic"); buf.cell(genLowHighShort(cpool.getIntAt(index)) +
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java Fri Apr 22 15:30:53 2011 +0200 @@ -40,7 +40,7 @@ private static int JVM_CONSTANT_NameAndType = 12; private static int JVM_CONSTANT_MethodHandle = 15; // JSR 292 private static int JVM_CONSTANT_MethodType = 16; // JSR 292 - private static int JVM_CONSTANT_InvokeDynamicTrans = 17; // JSR 292, only occurs in old class files + // static int JVM_CONSTANT_(unused) = 17; // JSR 292 early drafts only private static int JVM_CONSTANT_InvokeDynamic = 18; // JSR 292 private static int JVM_CONSTANT_Invalid = 0; // For bad value initialization private static int JVM_CONSTANT_UnresolvedClass = 100; // Temporary tag until actual use @@ -83,7 +83,6 @@ public boolean isMethodHandle() { return tag == JVM_CONSTANT_MethodHandle; } public boolean isMethodType() { return tag == JVM_CONSTANT_MethodType; } public boolean isInvokeDynamic() { return tag == JVM_CONSTANT_InvokeDynamic; } - public boolean isInvokeDynamicTrans() { return tag == JVM_CONSTANT_InvokeDynamicTrans; } public boolean isInvalid() { return tag == JVM_CONSTANT_Invalid; }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableEntry.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableEntry.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapGXLWriter.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapGXLWriter.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2007, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -164,7 +164,7 @@ protected void writeClass(Instance instance) throws IOException { writeObjectHeader(instance); - Klass reflectedType = OopUtilities.classOopToKlass(instance); + Klass reflectedType = java_lang_Class.asKlass(instance); boolean isInstanceKlass = (reflectedType instanceof InstanceKlass); // reflectedType is null for primitive types (int.class etc). if (reflectedType != null) {
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -455,7 +455,7 @@ } protected void writeClass(Instance instance) throws IOException { - Klass reflectedKlass = OopUtilities.classOopToKlass(instance); + Klass reflectedKlass = java_lang_Class.asKlass(instance); // dump instance record only for primitive type Class objects. // all other Class objects are covered by writeClassDumpRecords. if (reflectedKlass == null) { @@ -746,7 +746,7 @@ out.writeByte((byte)kind); if (ik != null) { // static field - writeField(field, ik); + writeField(field, ik.getJavaMirror()); } } }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/ReversePtrsAnalysis.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/ReversePtrsAnalysis.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -117,10 +117,10 @@ public boolean doObj(Oop obj) { if (obj instanceof InstanceKlass) { final InstanceKlass ik = (InstanceKlass) obj; - ik.iterateFields( + ik.iterateStaticFields( new DefaultOopVisitor() { public void doOop(OopField field, boolean isVMField) { - Oop next = field.getValue(ik); + Oop next = field.getValue(getObj()); LivenessPathElement lp = new LivenessPathElement(null, new NamedFieldIdentifier("Static field \"" + field.getID().getName() + @@ -142,8 +142,7 @@ System.err.println(); } } - }, - false); + }); } return false; }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaFactoryImpl.java Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaFactoryImpl.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -158,7 +158,7 @@ } else if (className.equals(javaLangThread())) { res = new JSJavaThread(instance, this); } else if (className.equals(javaLangClass())) { - Klass reflectedType = OopUtilities.classOopToKlass(instance); + Klass reflectedType = java_lang_Class.asKlass(instance); if (reflectedType != null) { JSJavaKlass jk = newJSJavaKlass(reflectedType); // we don't support mirrors of VM internal Klasses
--- a/agent/test/jdi/sasanity.sh Fri Apr 22 15:22:45 2011 +0200 +++ b/agent/test/jdi/sasanity.sh Fri Apr 22 15:30:53 2011 +0200 @@ -43,6 +43,7 @@ fi jdk=$1 +shift OS=`uname` if [ "$OS" != "Linux" ]; then @@ -68,7 +69,7 @@ tmp=/tmp/sagsetup rm -f $tmp -$jdk/bin/java sagtarg > $tmp & +$jdk/bin/java $* sagtarg > $tmp & pid=$! while [ ! -s $tmp ] ; do # Kludge alert!
--- a/make/hotspot_version Fri Apr 22 15:22:45 2011 +0200 +++ b/make/hotspot_version Fri Apr 22 15:30:53 2011 +0200 @@ -35,7 +35,7 @@ HS_MAJOR_VER=21 HS_MINOR_VER=0 -HS_BUILD_NUMBER=03 +HS_BUILD_NUMBER=09 JDK_MAJOR_VER=1 JDK_MINOR_VER=7
--- a/make/linux/Makefile Fri Apr 22 15:22:45 2011 +0200 +++ b/make/linux/Makefile Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/adlc.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/linux/makefiles/adlc.make Fri Apr 22 15:30:53 2011 +0200 @@ -102,7 +102,7 @@ $(EXEC) : $(OBJECTS) @echo Making adlc - $(QUIETLY) $(LINK_NOPROF.CC) -o $(EXEC) $(OBJECTS) + $(QUIETLY) $(HOST.LINK_NOPROF.CC) -o $(EXEC) $(OBJECTS) # Random dependencies: $(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp @@ -204,14 +204,14 @@ $(OUTDIR)/%.o: %.cpp @echo Compiling $< $(QUIETLY) $(REMOVE_TARGET) - $(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE) + $(QUIETLY) $(HOST.COMPILE.CC) -o $@ $< $(COMPILE_DONE) # Some object files are given a prefix, to disambiguate # them from objects of the same name built for the VM. $(OUTDIR)/adlc-%.o: %.cpp @echo Compiling $< $(QUIETLY) $(REMOVE_TARGET) - $(QUIETLY) $(COMPILE.CC) -o $@ $< $(COMPILE_DONE) + $(QUIETLY) $(HOST.COMPILE.CC) -o $@ $< $(COMPILE_DONE) # #########################################################################
--- a/make/linux/makefiles/arm.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/linux/makefiles/arm.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. # ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. #
--- a/make/linux/makefiles/gcc.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/linux/makefiles/gcc.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -30,9 +30,13 @@ ifdef CROSS_COMPILE_ARCH CPP = $(ALT_COMPILER_PATH)/g++ CC = $(ALT_COMPILER_PATH)/gcc +HOSTCPP = g++ +HOSTCC = gcc else CPP = g++ CC = gcc +HOSTCPP = $(CPP) +HOSTCC = $(CC) endif AS = $(CC) -c
--- a/make/linux/makefiles/mapfile-vers-debug Fri Apr 22 15:22:45 2011 +0200 +++ b/make/linux/makefiles/mapfile-vers-debug Fri Apr 22 15:30:53 2011 +0200 @@ -3,7 +3,7 @@ # # -# Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/mapfile-vers-product Fri Apr 22 15:22:45 2011 +0200 +++ b/make/linux/makefiles/mapfile-vers-product Fri Apr 22 15:30:53 2011 +0200 @@ -3,7 +3,7 @@ # # -# Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/ppc.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/linux/makefiles/ppc.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved. # ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. #
--- a/make/linux/makefiles/rules.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/linux/makefiles/rules.make Fri Apr 22 15:30:53 2011 +0200 @@ -55,6 +55,14 @@ LINK_LIB.CC = $(CCC) $(LFLAGS) $(SHARED_FLAG) PREPROCESS.CC = $(CC_COMPILE) -E +# cross compiling the jvm with c2 requires host compilers to build +# adlc tool + +HOST.CC_COMPILE = $(HOSTCPP) $(CPPFLAGS) $(CFLAGS) +HOST.COMPILE.CC = $(HOST.CC_COMPILE) -c +HOST.LINK_NOPROF.CC = $(HOSTCPP) $(LFLAGS) $(AOUT_FLAGS) + + # Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k". REMOVE_TARGET = rm -f $@
--- a/make/linux/makefiles/sa.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/linux/makefiles/sa.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -97,8 +97,8 @@ $(foreach file,$(AGENT_FILES1),$(shell echo $(file) >> $(AGENT_FILES1_LIST))) $(foreach file,$(AGENT_FILES2),$(shell echo $(file) >> $(AGENT_FILES2_LIST))) - $(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES1_LIST) - $(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES2_LIST) + $(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES1_LIST) + $(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES2_LIST) $(QUIETLY) $(REMOTE) $(COMPILE.RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer $(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
--- a/make/linux/makefiles/sparcWorks.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/linux/makefiles/sparcWorks.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,9 @@ CC = cc AS = $(CC) -c +HOSTCPP = $(CPP) +HOSTCC = $(CC) + ARCHFLAG = $(ARCHFLAG/$(BUILDARCH)) ARCHFLAG/i486 = -m32 ARCHFLAG/amd64 = -m64
--- a/make/linux/makefiles/top.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/linux/makefiles/top.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/vm.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/linux/makefiles/vm.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -142,13 +142,15 @@ COMPILER2_PATHS += $(HS_COMMON_SRC)/share/vm/libadt COMPILER2_PATHS += $(GENERATED)/adfiles +SHARK_PATHS := $(GAMMADIR)/src/share/vm/shark + # Include dirs per type. Src_Dirs/CORE := $(CORE_PATHS) Src_Dirs/COMPILER1 := $(CORE_PATHS) $(COMPILER1_PATHS) Src_Dirs/COMPILER2 := $(CORE_PATHS) $(COMPILER2_PATHS) Src_Dirs/TIERED := $(CORE_PATHS) $(COMPILER1_PATHS) $(COMPILER2_PATHS) Src_Dirs/ZERO := $(CORE_PATHS) -Src_Dirs/SHARK := $(CORE_PATHS) +Src_Dirs/SHARK := $(CORE_PATHS) $(SHARK_PATHS) Src_Dirs := $(Src_Dirs/$(TYPE)) COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\*
--- a/make/solaris/makefiles/adlc.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/solaris/makefiles/adlc.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/buildtree.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/solaris/makefiles/buildtree.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/rules.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/solaris/makefiles/rules.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/sa.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/solaris/makefiles/sa.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -88,8 +88,8 @@ $(foreach file,$(AGENT_FILES1),$(shell echo $(file) >> $(AGENT_FILES1_LIST))) $(foreach file,$(AGENT_FILES2),$(shell echo $(file) >> $(AGENT_FILES2_LIST))) - $(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES1_LIST) - $(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES2_LIST) + $(QUIETLY) $(COMPILE.JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES1_LIST) + $(QUIETLY) $(COMPILE.JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES2_LIST) $(QUIETLY) $(COMPILE.RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer $(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
--- a/make/solaris/makefiles/top.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/solaris/makefiles/top.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/vm.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/solaris/makefiles/vm.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/create.bat Fri Apr 22 15:22:45 2011 +0200 +++ b/make/windows/create.bat Fri Apr 22 15:30:53 2011 +0200 @@ -95,16 +95,15 @@ echo Will generate VC9 {Visual Studio 2008} ) else ( if "%MSC_VER%" == "1600" ( -echo Detected Visual Studio 2010, but -echo will generate VC9 {Visual Studio 2008} -echo Use conversion wizard in VS 2010. +echo Will generate VC10 {Visual Studio 2010} +set ProjectFile=%HotSpotBuildSpace%\jvm.vcxproj ) else ( echo Will generate VC7 project {Visual Studio 2003 .NET} ) ) ) ) -echo %ProjectFile% +echo %ProjectFile% echo ************************************************************** REM Test all variables to see whether the directories they
--- a/make/windows/create_obj_files.sh Fri Apr 22 15:22:45 2011 +0200 +++ b/make/windows/create_obj_files.sh Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/compile.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/windows/makefiles/compile.make Fri Apr 22 15:30:53 2011 +0200 @@ -207,6 +207,9 @@ # Manifest Tool - used in VS2005 and later to adjust manifests stored # as resources inside build artifacts. MT=mt.exe +!if "$(BUILDARCH)" == "i486" +LINK_FLAGS = /SAFESEH $(LINK_FLAGS) +!endif !endif # Compile for space above time.
--- a/make/windows/makefiles/launcher.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/windows/makefiles/launcher.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,71 +1,73 @@ -# -# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - - -LAUNCHER_FLAGS=$(CPP_FLAGS) $(ARCHFLAG) \ - /D FULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \ - /D JDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \ - /D JDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \ - /D GAMMA \ - /D LAUNCHER_TYPE=\"gamma\" \ - /D _CRT_SECURE_NO_WARNINGS \ - /D _CRT_SECURE_NO_DEPRECATE \ - /D LINK_INTO_LIBJVM \ - /I $(WorkSpace)\src\os\windows\launcher \ - /I $(WorkSpace)\src\share\tools\launcher \ - /I $(WorkSpace)\src\share\vm\prims \ - /I $(WorkSpace)\src\share\vm \ - /I $(WorkSpace)\src\cpu\$(Platform_arch)\vm \ - /I $(WorkSpace)\src\os\windows\vm - -LINK_FLAGS=/manifest $(HS_INTERNAL_NAME).lib kernel32.lib user32.lib /nologo /machine:$(MACHINE) /map /debug /subsystem:console - -!if "$(COMPILER_NAME)" == "VS2005" -# This VS2005 compiler has /GS as a default and requires bufferoverflowU.lib -# on the link command line, otherwise we get missing __security_check_cookie -# externals at link time. Even with /GS-, you need bufferoverflowU.lib. -BUFFEROVERFLOWLIB = bufferoverflowU.lib -LINK_FLAGS = $(LINK_FLAGS) $(BUFFEROVERFLOWLIB) -!endif - -LAUNCHERDIR = $(WorkSpace)/src/os/windows/launcher -LAUNCHERDIR_SHARE = $(WorkSpace)/src/share/tools/launcher - -OUTDIR = launcher - -{$(LAUNCHERDIR)}.c{$(OUTDIR)}.obj: - -mkdir $(OUTDIR) 2>NUL >NUL - $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $< - -{$(LAUNCHERDIR_SHARE)}.c{$(OUTDIR)}.obj: - -mkdir $(OUTDIR) 2>NUL >NUL - $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $< - -$(OUTDIR)\*.obj: $(LAUNCHERDIR)\*.c $(LAUNCHERDIR)\*.h $(LAUNCHERDIR_SHARE)\*.c $(LAUNCHERDIR_SHARE)\*.h - -launcher: $(OUTDIR)\java.obj $(OUTDIR)\java_md.obj $(OUTDIR)\jli_util.obj - echo $(JAVA_HOME) > jdkpath.txt - $(LINK) $(LINK_FLAGS) /out:hotspot.exe $** - - +# +# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# + + +LAUNCHER_FLAGS=$(CPP_FLAGS) $(ARCHFLAG) \ + /D FULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \ + /D JDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \ + /D JDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \ + /D GAMMA \ + /D LAUNCHER_TYPE=\"gamma\" \ + /D _CRT_SECURE_NO_WARNINGS \ + /D _CRT_SECURE_NO_DEPRECATE \ + /D LINK_INTO_LIBJVM \ + /I $(WorkSpace)\src\os\windows\launcher \ + /I $(WorkSpace)\src\share\tools\launcher \ + /I $(WorkSpace)\src\share\vm\prims \ + /I $(WorkSpace)\src\share\vm \ + /I $(WorkSpace)\src\cpu\$(Platform_arch)\vm \ + /I $(WorkSpace)\src\os\windows\vm + +LINK_FLAGS=/manifest $(HS_INTERNAL_NAME).lib kernel32.lib user32.lib /nologo /machine:$(MACHINE) /map /debug /subsystem:console + +!if "$(COMPILER_NAME)" == "VS2005" +# This VS2005 compiler has /GS as a default and requires bufferoverflowU.lib +# on the link command line, otherwise we get missing __security_check_cookie +# externals at link time. Even with /GS-, you need bufferoverflowU.lib. +BUFFEROVERFLOWLIB = bufferoverflowU.lib +LINK_FLAGS = $(LINK_FLAGS) $(BUFFEROVERFLOWLIB) +!endif + +!if "$(COMPILER_NAME)" == "VS2010" && "$(BUILDARCH)" == "i486" +LINK_FLAGS = /SAFESEH $(LINK_FLAGS) +!endif + +LAUNCHERDIR = $(WorkSpace)/src/os/windows/launcher +LAUNCHERDIR_SHARE = $(WorkSpace)/src/share/tools/launcher + +OUTDIR = launcher + +{$(LAUNCHERDIR)}.c{$(OUTDIR)}.obj: + -mkdir $(OUTDIR) 2>NUL >NUL + $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $< + +{$(LAUNCHERDIR_SHARE)}.c{$(OUTDIR)}.obj: + -mkdir $(OUTDIR) 2>NUL >NUL + $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $< + +$(OUTDIR)\*.obj: $(LAUNCHERDIR)\*.c $(LAUNCHERDIR)\*.h $(LAUNCHERDIR_SHARE)\*.c $(LAUNCHERDIR_SHARE)\*.h + +launcher: $(OUTDIR)\java.obj $(OUTDIR)\java_md.obj $(OUTDIR)\jli_util.obj + echo $(JAVA_HOME) > jdkpath.txt + $(LINK) $(LINK_FLAGS) /out:hotspot.exe $**
--- a/make/windows/makefiles/projectcreator.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/windows/makefiles/projectcreator.make Fri Apr 22 15:30:53 2011 +0200 @@ -27,10 +27,6 @@ # This is used externally by both batch and IDE builds, so can't # reference any of the HOTSPOTWORKSPACE, HOTSPOTBUILDSPACE, # HOTSPOTRELEASEBINDEST, or HOTSPOTDEBUGBINDEST environment variables. -# -# NOTE: unfortunately the ProjectCreatorSources list must be kept -# synchronized between this and the Solaris version -# (make/solaris/makefiles/projectcreator.make). ProjectCreatorSources=\ $(WorkSpace)\src\share\tools\ProjectCreator\DirectoryTree.java \ @@ -42,6 +38,7 @@ $(WorkSpace)\src\share\tools\ProjectCreator\WinGammaPlatformVC7.java \ $(WorkSpace)\src\share\tools\ProjectCreator\WinGammaPlatformVC8.java \ $(WorkSpace)\src\share\tools\ProjectCreator\WinGammaPlatformVC9.java \ + $(WorkSpace)\src\share\tools\ProjectCreator\WinGammaPlatformVC10.java \ $(WorkSpace)\src\share\tools\ProjectCreator\Util.java \ $(WorkSpace)\src\share\tools\ProjectCreator\BuildConfig.java \ $(WorkSpace)\src\share\tools\ProjectCreator\ArgsParser.java
--- a/make/windows/makefiles/rules.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/windows/makefiles/rules.make Fri Apr 22 15:30:53 2011 +0200 @@ -65,8 +65,8 @@ !elseif "$(MSC_VER)" == "1600" -# for compatibility - we don't yet have a ProjectCreator for VC10 -VcVersion=VC9 +VcVersion=VC10 +ProjectFile=jvm.vcxproj !else
--- a/make/windows/makefiles/sa.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/windows/makefiles/sa.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -55,9 +55,9 @@ $(GENERATED)\sa-jdi.jar: $(AGENT_FILES1:/=\) $(AGENT_FILES2:/=\) @if not exist $(SA_CLASSDIR) mkdir $(SA_CLASSDIR) @echo ...Building sa-jdi.jar - @echo ...$(COMPILE_JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -d $(SA_CLASSDIR) .... - @$(COMPILE_JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES1:/=\) - @$(COMPILE_JAVAC) -source 1.4 -target 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES2:/=\) + @echo ...$(COMPILE_JAVAC) -classpath $(SA_CLASSPATH) -d $(SA_CLASSDIR) .... + @$(COMPILE_JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES1:/=\) + @$(COMPILE_JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) $(AGENT_FILES2:/=\) $(COMPILE_RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer $(QUIETLY) echo $(SA_BUILD_VERSION_PROP)> $(SA_PROPERTIES) $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
--- a/make/windows/makefiles/vm.make Fri Apr 22 15:22:45 2011 +0200 +++ b/make/windows/makefiles/vm.make Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/assembler_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/assembler_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -3179,7 +3179,7 @@ Label& wrong_method_type) { assert_different_registers(mtype_reg, mh_reg, temp_reg); // compare method type against that of the receiver - RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg); + RegisterOrConstant mhtype_offset = delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg); load_heap_oop(mh_reg, mhtype_offset, temp_reg); cmp(temp_reg, mtype_reg); br(Assembler::notEqual, false, Assembler::pn, wrong_method_type); @@ -3195,14 +3195,14 @@ Register temp_reg) { assert_different_registers(vmslots_reg, mh_reg, temp_reg); // load mh.type.form.vmslots - if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) { + if (java_lang_invoke_MethodHandle::vmslots_offset_in_bytes() != 0) { // hoist vmslots into every mh to avoid dependent load chain - ld( Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)), vmslots_reg); + ld( Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmslots_offset_in_bytes, temp_reg)), vmslots_reg); } else { Register temp2_reg = vmslots_reg; - load_heap_oop(Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg); - load_heap_oop(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg); - ld( Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg); + load_heap_oop(Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg); + load_heap_oop(Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg); + ld( Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg); } } @@ -3213,7 +3213,7 @@ // pick out the interpreted side of the handler // NOTE: vmentry is not an oop! - ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg); + ld_ptr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg); // off we go... ld_ptr(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes(), temp_reg);
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -301,7 +301,8 @@ // thread. assert(_obj != noreg, "must be a valid register"); assert(_oop_index >= 0, "must have oop index"); - __ ld_ptr(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3); + __ load_heap_oop(_obj, java_lang_Class::klass_offset_in_bytes(), G3); + __ ld_ptr(G3, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3); __ cmp(G2_thread, G3); __ br(Assembler::notEqual, false, Assembler::pn, call_patch); __ delayed()->nop();
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -395,9 +395,9 @@ int offset = code_offset(); - __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); __ delayed()->nop(); - debug_only(__ stop("should have gone to the caller");) + __ should_not_reach_here(); assert(code_offset() - offset <= exception_handler_size, "overflow"); __ end_a_stub(); @@ -2058,6 +2058,13 @@ BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; if (basic_type == T_ARRAY) basic_type = T_OBJECT; +#ifdef _LP64 + // higher 32bits must be null + __ sra(dst_pos, 0, dst_pos); + __ sra(src_pos, 0, src_pos); + __ sra(length, 0, length); +#endif + // set up the arraycopy stub information ArrayCopyStub* stub = op->stub(); @@ -2065,20 +2072,36 @@ // the known type isn't loaded since the code sanity checks // in debug mode and the type isn't required when we know the exact type // also check that the type is an array type. - // We also, for now, always call the stub if the barrier set requires a - // write_ref_pre barrier (which the stub does, but none of the optimized - // cases currently does). - if (op->expected_type() == NULL || - Universe::heap()->barrier_set()->has_write_ref_pre_barrier()) { + if (op->expected_type() == NULL) { __ mov(src, O0); __ mov(src_pos, O1); __ mov(dst, O2); __ mov(dst_pos, O3); __ mov(length, O4); - __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); - - __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry()); - __ delayed()->nop(); + address copyfunc_addr = StubRoutines::generic_arraycopy(); + + if (copyfunc_addr == NULL) { // Use C version if stub was not generated + __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); + } else { +#ifndef PRODUCT + if (PrintC1Statistics) { + address counter = (address)&Runtime1::_generic_arraycopystub_cnt; + __ inc_counter(counter, G1, G3); + } +#endif + __ call_VM_leaf(tmp, copyfunc_addr); + } + + if (copyfunc_addr != NULL) { + __ xor3(O0, -1, tmp); + __ sub(length, tmp, length); + __ add(src_pos, tmp, src_pos); + __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry()); + __ delayed()->add(dst_pos, tmp, dst_pos); + } else { + __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry()); + __ delayed()->nop(); + } __ bind(*stub->continuation()); return; } @@ -2135,20 +2158,137 @@ __ delayed()->nop(); } + int shift = shift_amount(basic_type); + if (flags & LIR_OpArrayCopy::type_check) { - if (UseCompressedOops) { - // We don't need decode because we just need to compare - __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); - __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); - __ cmp(tmp, tmp2); - __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); + // We don't know the array types are compatible + if (basic_type != T_OBJECT) { + // Simple test for basic type arrays + if (UseCompressedOops) { + // We don't need decode because we just need to compare + __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); + __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); + __ cmp(tmp, tmp2); + __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); + } else { + __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); + __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); + __ cmp(tmp, tmp2); + __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); + } + __ delayed()->nop(); } else { - __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); - __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); - __ cmp(tmp, tmp2); - __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); + // For object arrays, if src is a sub class of dst then we can + // safely do the copy. + address copyfunc_addr = StubRoutines::checkcast_arraycopy(); + + Label cont, slow; + assert_different_registers(tmp, tmp2, G3, G1); + + __ load_klass(src, G3); + __ load_klass(dst, G1); + + __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL); + + __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + __ delayed()->nop(); + + __ cmp(G3, 0); + if (copyfunc_addr != NULL) { // use stub if available + // src is not a sub class of dst so we have to do a + // per-element check. + __ br(Assembler::notEqual, false, Assembler::pt, cont); + __ delayed()->nop(); + + __ bind(slow); + + int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; + if ((flags & mask) != mask) { + // Check that at least both of them object arrays. + assert(flags & mask, "one of the two should be known to be an object array"); + + if (!(flags & LIR_OpArrayCopy::src_objarray)) { + __ load_klass(src, tmp); + } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { + __ load_klass(dst, tmp); + } + int lh_offset = klassOopDesc::header_size() * HeapWordSize + + Klass::layout_helper_offset_in_bytes(); + + __ lduw(tmp, lh_offset, tmp2); + + jint objArray_lh = Klass::array_layout_helper(T_OBJECT); + __ set(objArray_lh, tmp); + __ cmp(tmp, tmp2); + __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); + __ delayed()->nop(); + } + + Register src_ptr = O0; + Register dst_ptr = O1; + Register len = O2; + Register chk_off = O3; + Register super_k = O4; + + __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); + if (shift == 0) { + __ add(src_ptr, src_pos, src_ptr); + } else { + __ sll(src_pos, shift, tmp); + __ add(src_ptr, tmp, src_ptr); + } + + __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); + if (shift == 0) { + __ add(dst_ptr, dst_pos, dst_ptr); + } else { + __ sll(dst_pos, shift, tmp); + __ add(dst_ptr, tmp, dst_ptr); + } + __ mov(length, len); + __ load_klass(dst, tmp); + + int ek_offset = (klassOopDesc::header_size() * HeapWordSize + + objArrayKlass::element_klass_offset_in_bytes()); + __ ld_ptr(tmp, ek_offset, super_k); + + int sco_offset = (klassOopDesc::header_size() * HeapWordSize + + Klass::super_check_offset_offset_in_bytes()); + __ lduw(super_k, sco_offset, chk_off); + + __ call_VM_leaf(tmp, copyfunc_addr); + +#ifndef PRODUCT + if (PrintC1Statistics) { + Label failed; + __ br_notnull(O0, false, Assembler::pn, failed); + __ delayed()->nop(); + __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3); + __ bind(failed); + } +#endif + + __ br_null(O0, false, Assembler::pt, *stub->continuation()); + __ delayed()->xor3(O0, -1, tmp); + +#ifndef PRODUCT + if (PrintC1Statistics) { + __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3); + } +#endif + + __ sub(length, tmp, length); + __ add(src_pos, tmp, src_pos); + __ br(Assembler::always, false, Assembler::pt, *stub->entry()); + __ delayed()->add(dst_pos, tmp, dst_pos); + + __ bind(cont); + } else { + __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); + __ delayed()->nop(); + __ bind(cont); + } } - __ delayed()->nop(); } #ifdef ASSERT @@ -2207,14 +2347,18 @@ } #endif - int shift = shift_amount(basic_type); +#ifndef PRODUCT + if (PrintC1Statistics) { + address counter = Runtime1::arraycopy_count_address(basic_type); + __ inc_counter(counter, G1, G3); + } +#endif Register src_ptr = O0; Register dst_ptr = O1; Register len = O2; __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); - LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null if (shift == 0) { __ add(src_ptr, src_pos, src_ptr); } else { @@ -2223,7 +2367,6 @@ } __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); - LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null if (shift == 0) { __ add(dst_ptr, dst_pos, dst_ptr); } else { @@ -2231,18 +2374,14 @@ __ add(dst_ptr, tmp, dst_ptr); } - if (basic_type != T_OBJECT) { - if (shift == 0) { - __ mov(length, len); - } else { - __ sll(length, shift, len); - } - __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy)); - } else { - // oop_arraycopy takes a length in number of elements, so don't scale it. - __ mov(length, len); - __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy)); - } + bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; + bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; + const char *name; + address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); + + // arraycopy stubs takes a length in number of elements, so don't scale it. + __ mov(length, len); + __ call_VM_leaf(tmp, entry); __ bind(*stub->continuation()); }
--- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -387,7 +387,7 @@ void C1_MacroAssembler::verify_not_null_oop(Register r) { Label not_null; - br_zero(Assembler::notEqual, false, Assembler::pt, r, not_null); + br_notnull(r, false, Assembler::pt, not_null); delayed()->nop(); stop("non-null oop required"); bind(not_null);
--- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -148,7 +148,7 @@ static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), - " mismatch in calculation"); + "mismatch in calculation"); sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); OopMap* oop_map = new OopMap(frame_size_in_slots, 0); @@ -176,9 +176,8 @@ static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) { assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), - " mismatch in calculation"); + "mismatch in calculation"); __ save_frame_c1(frame_size_in_bytes); - sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); // Record volatile registers as callee-save values in an OopMap so their save locations will be // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for @@ -367,23 +366,7 @@ switch (id) { case forward_exception_id: { - // we're handling an exception in the context of a compiled - // frame. The registers have been saved in the standard - // places. Perform an exception lookup in the caller and - // dispatch to the handler if found. Otherwise unwind and - // dispatch to the callers exception handler. - - oop_maps = new OopMapSet(); - OopMap* oop_map = generate_oop_map(sasm, true); - - // transfer the pending exception to the exception_oop - __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception); - __ ld_ptr(Oexception, 0, G0); - __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset())); - __ add(I7, frame::pc_return_offset, Oissuing_pc); - - generate_handle_exception(sasm, oop_maps, oop_map); - __ should_not_reach_here(); + oop_maps = generate_handle_exception(id, sasm); } break; @@ -671,15 +654,14 @@ break; case handle_exception_id: - { - __ set_info("handle_exception", dont_gc_arguments); - // make a frame and preserve the caller's caller-save registers + { __ set_info("handle_exception", dont_gc_arguments); + oop_maps = generate_handle_exception(id, sasm); + } + break; - oop_maps = new OopMapSet(); - OopMap* oop_map = save_live_registers(sasm); - __ mov(Oexception->after_save(), Oexception); - __ mov(Oissuing_pc->after_save(), Oissuing_pc); - generate_handle_exception(sasm, oop_maps, oop_map); + case handle_exception_from_callee_id: + { __ set_info("handle_exception_from_callee", dont_gc_arguments); + oop_maps = generate_handle_exception(id, sasm); } break; @@ -696,7 +678,7 @@ G2_thread, Oissuing_pc->after_save()); __ verify_not_null_oop(Oexception->after_save()); - // Restore SP from L7 if the exception PC is a MethodHandle call site. + // Restore SP from L7 if the exception PC is a method handle call site. __ mov(O0, G5); // Save the target address. __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0); __ tst(L0); // Condition codes are preserved over the restore. @@ -1006,48 +988,89 @@ } -void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) { - Label no_deopt; +OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { + __ block_comment("generate_handle_exception"); + + // Save registers, if required. + OopMapSet* oop_maps = new OopMapSet(); + OopMap* oop_map = NULL; + switch (id) { + case forward_exception_id: + // We're handling an exception in the context of a compiled frame. + // The registers have been saved in the standard places. Perform + // an exception lookup in the caller and dispatch to the handler + // if found. Otherwise unwind and dispatch to the callers + // exception handler. + oop_map = generate_oop_map(sasm, true); + + // transfer the pending exception to the exception_oop + __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception); + __ ld_ptr(Oexception, 0, G0); + __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset())); + __ add(I7, frame::pc_return_offset, Oissuing_pc); + break; + case handle_exception_id: + // At this point all registers MAY be live. + oop_map = save_live_registers(sasm); + __ mov(Oexception->after_save(), Oexception); + __ mov(Oissuing_pc->after_save(), Oissuing_pc); + break; + case handle_exception_from_callee_id: + // At this point all registers except exception oop (Oexception) + // and exception pc (Oissuing_pc) are dead. + oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); + sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); + __ save_frame_c1(frame_size_in_bytes); + __ mov(Oexception->after_save(), Oexception); + __ mov(Oissuing_pc->after_save(), Oissuing_pc); + break; + default: ShouldNotReachHere(); + } __ verify_not_null_oop(Oexception); // save the exception and issuing pc in the thread - __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); + __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset())); - // save the real return address and use the throwing pc as the return address to lookup (has bci & oop map) - __ mov(I7, L0); + // use the throwing pc as the return address to lookup (has bci & oop map) __ mov(Oissuing_pc, I7); __ sub(I7, frame::pc_return_offset, I7); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); + oop_maps->add_gc_map(call_offset, oop_map); // Note: if nmethod has been deoptimized then regardless of // whether it had a handler or not we will deoptimize // by entering the deopt blob with a pending exception. -#ifdef ASSERT - Label done; - __ tst(O0); - __ br(Assembler::notZero, false, Assembler::pn, done); - __ delayed()->nop(); - __ stop("should have found address"); - __ bind(done); -#endif + // Restore the registers that were saved at the beginning, remove + // the frame and jump to the exception handler. + switch (id) { + case forward_exception_id: + case handle_exception_id: + restore_live_registers(sasm); + __ jmp(O0, 0); + __ delayed()->restore(); + break; + case handle_exception_from_callee_id: + // Restore SP from L7 if the exception PC is a method handle call site. + __ mov(O0, G5); // Save the target address. + __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0); + __ tst(L0); // Condition codes are preserved over the restore. + __ restore(); - // restore the registers that were saved at the beginning and jump to the exception handler. - restore_live_registers(sasm); + __ jmp(G5, 0); // jump to the exception handler + __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required. + break; + default: ShouldNotReachHere(); + } - __ jmp(O0, 0); - __ delayed()->restore(); - - oop_maps->add_gc_map(call_offset, oop_map); + return oop_maps; } #undef __ -#define __ masm-> - const char *Runtime1::pd_name_for_address(address entry) { return "<unknown function>"; }
--- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1188,8 +1188,8 @@ __ st_ptr(O2, XXX_STATE(_stack)); // PREPUSH __ lduh(max_stack, O3); // Full size expression stack - guarantee(!EnableMethodHandles, "no support yet for java.dyn.MethodHandle"); //6815692 - //6815692//if (EnableMethodHandles) + guarantee(!EnableInvokeDynamic, "no support yet for java.lang.invoke.MethodHandle"); //6815692 + //6815692//if (EnableInvokeDynamic) //6815692// __ inc(O3, methodOopDesc::extra_stack_entries()); __ sll(O3, LogBytesPerWord, O3); __ sub(O2, O3, O3);
--- a/src/cpu/sparc/vm/dump_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/dump_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,13 +80,19 @@ for (int j = 0; j < num_virtuals; ++j) { dummy_vtable[num_virtuals * i + j] = (void*)masm->pc(); __ save(SP, -256, SP); + int offset = (i << 8) + j; + Register src = G0; + if (!Assembler::is_simm13(offset)) { + __ sethi(offset, L0); + src = L0; + offset = offset & ((1 << 10) - 1); + } __ brx(Assembler::always, false, Assembler::pt, common_code); // Load L0 with a value indicating vtable/offset pair. // -- bits[ 7..0] (8 bits) which virtual method in table? - // -- bits[12..8] (5 bits) which virtual method table? - // -- must fit in 13-bit instruction immediate field. - __ delayed()->set((i << 8) + j, L0); + // -- bits[13..8] (6 bits) which virtual method table? + __ delayed()->or3(src, offset, L0); } }
--- a/src/cpu/sparc/vm/globals_sparc.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/globals_sparc.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,6 +51,7 @@ define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC define_pd_global(intx, InlineSmallCode, 1500); + #ifdef _LP64 // Stack slots are 2X larger in LP64 than in the 32 bit VM. define_pd_global(intx, ThreadStackSize, 1024); @@ -71,4 +72,6 @@ define_pd_global(bool, UseMembar, false); +// GC Ergo Flags +define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread #endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -743,12 +743,12 @@ if (index_size == sizeof(u2)) { get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); } else if (index_size == sizeof(u4)) { - assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic"); + assert(EnableInvokeDynamic, "giant index used only for JSR 292"); get_4_byte_integer_at_bcp(bcp_offset, cache, tmp); assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); xor3(tmp, -1, tmp); // convert to plain index } else if (index_size == sizeof(u1)) { - assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles"); + assert(EnableInvokeDynamic, "tiny index used only for JSR 292"); ldub(Lbcp, bcp_offset, tmp); } else { ShouldNotReachHere();
--- a/src/cpu/sparc/vm/interpreter_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/interpreter_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -260,9 +260,9 @@ // Method handle invoker -// Dispatch a method of the form java.dyn.MethodHandles::invoke(...) +// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...) address InterpreterGenerator::generate_method_handle_entry(void) { - if (!EnableMethodHandles) { + if (!EnableInvokeDynamic) { return generate_abstract_entry(); }
--- a/src/cpu/sparc/vm/jni_sparc.h Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/jni_sparc.h Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -112,8 +112,8 @@ } // given the MethodType, find out where the MH argument is buried - __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O4_argslot); - __ ldsw( Address(O4_argslot, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot); + __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O4_argslot); + __ ldsw( Address(O4_argslot, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot); __ add(Gargs, __ argument_offset(O4_argslot, 1), O4_argbase); // Note: argument_address uses its input as a scratch register! __ ld_ptr(Address(O4_argbase, -Interpreter::stackElementSize), G3_method_handle); @@ -141,10 +141,10 @@ // load up an adapter from the calling type (Java weaves this) Register O2_form = O2_scratch; Register O3_adapter = O3_scratch; - __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O2_form); - // load_heap_oop(Address(O2_form, __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); + __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O2_form); + // load_heap_oop(Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); // deal with old JDK versions: - __ add( Address(O2_form, __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); + __ add( Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); __ cmp(O3_adapter, O2_form); Label sorry_no_invoke_generic; __ brx(Assembler::lessUnsigned, false, Assembler::pn, sorry_no_invoke_generic); @@ -376,16 +376,16 @@ // which conversion op types are implemented here? int MethodHandles::adapter_conversion_ops_supported_mask() { - return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY) - |(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW) - |(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST) - |(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM) - |(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM) - |(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS) - |(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS) - |(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS) - |(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS) - //|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG! + return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS) + //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG! ); // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS. } @@ -413,21 +413,22 @@ const Register O1_actual = O1; const Register O2_required = O2; - guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); + guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); // Some handy addresses: Address G5_method_fie( G5_method, in_bytes(methodOopDesc::from_interpreted_offset())); + Address G5_method_fce( G5_method, in_bytes(methodOopDesc::from_compiled_offset())); - Address G3_mh_vmtarget( G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes()); + Address G3_mh_vmtarget( G3_method_handle, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes()); - Address G3_dmh_vmindex( G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes()); + Address G3_dmh_vmindex( G3_method_handle, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes()); - Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes()); - Address G3_bmh_argument( G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes()); + Address G3_bmh_vmargslot( G3_method_handle, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes()); + Address G3_bmh_argument( G3_method_handle, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes()); - Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes()); - Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes()); - Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes()); + Address G3_amh_vmargslot( G3_method_handle, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes()); + Address G3_amh_argument ( G3_method_handle, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes()); + Address G3_amh_conversion(G3_method_handle, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes()); const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); @@ -444,17 +445,15 @@ case _raise_exception: { // Not a real MH entry, but rather shared code for raising an - // exception. Since we use a C2I adapter to set up the - // interpreter state, arguments are expected in compiler - // argument registers. + // exception. Since we use the compiled entry, arguments are + // expected in compiler argument registers. assert(raise_exception_method(), "must be set"); - address c2i_entry = raise_exception_method()->get_c2i_entry(); - assert(c2i_entry, "method must be linked"); + assert(raise_exception_method()->from_compiled_entry(), "method must be linked"); __ mov(O5_savedSP, SP); // Cut the stack back to where the caller started. Label L_no_method; - // FIXME: fill in _raise_exception_method with a suitable sun.dyn method + // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method __ set(AddressLiteral((address) &_raise_exception_method), G5_method); __ ld_ptr(Address(G5_method, 0), G5_method); __ tst(G5_method); @@ -468,10 +467,9 @@ __ delayed()->nop(); __ verify_oop(G5_method); - __ jump_to(AddressLiteral(c2i_entry), O3_scratch); + __ jump_indirect_to(G5_method_fce, O3_scratch); // jump to compiled entry __ delayed()->nop(); - // If we get here, the Java runtime did not do its job of creating the exception. // Do something that is at least causes a valid throw from the interpreter. __ bind(L_no_method); __ unimplemented("call throw_WrongMethodType_entry"); @@ -777,9 +775,13 @@ switch (ek) { case _adapter_opt_i2l: { - __ ldsw(arg_lsw, O2_scratch); // Load LSW - NOT_LP64(__ srlx(O2_scratch, BitsPerInt, O3_scratch)); // Move high bits to lower bits for std - __ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64 +#ifdef _LP64 + __ ldsw(arg_lsw, O2_scratch); // Load LSW sign-extended +#else + __ ldsw(arg_lsw, O3_scratch); // Load LSW sign-extended + __ srlx(O3_scratch, BitsPerInt, O2_scratch); // Move MSW value to lower 32-bits for std +#endif + __ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64 } break; case _adapter_opt_unboxl:
--- a/src/cpu/sparc/vm/nativeInst_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,6 +52,22 @@ ICache::invalidate_range(instaddr, 7 * BytesPerInstWord); } +void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) { + ResourceMark rm; + unsigned char buffer[10 * BytesPerInstWord]; + CodeBuffer buf(buffer, 10 * BytesPerInstWord); + MacroAssembler masm(&buf); + + Register destreg = inv_rd(*(unsigned int *)instaddr); + // Generate the proper sequence into a temporary buffer and compare + // it with the original sequence. + masm.patchable_sethi(x, destreg); + int len = buffer - masm.pc(); + for (int i = 0; i < len; i++) { + assert(instaddr[i] == buffer[i], "instructions must match"); + } +} + void NativeInstruction::verify() { // make sure code pattern is actually an instruction address address addr = addr_at(0);
--- a/src/cpu/sparc/vm/nativeInst_sparc.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/nativeInst_sparc.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -254,6 +254,7 @@ // sethi. This only does the sethi. The disp field (bottom 10 bits) // must be handled separately. static void set_data64_sethi(address instaddr, intptr_t x); + static void verify_data64_sethi(address instaddr, intptr_t x); // combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st) static int data32(int sethi_insn, int arith_insn) {
--- a/src/cpu/sparc/vm/relocInfo_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/relocInfo_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "oops/oop.inline.hpp" #include "runtime/safepoint.hpp" -void Relocation::pd_set_data_value(address x, intptr_t o) { +void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { NativeInstruction* ip = nativeInstruction_at(addr()); jint inst = ip->long_at(0); assert(inst != NativeInstruction::illegal_instruction(), "no breakpoint"); @@ -83,7 +83,11 @@ guarantee(Assembler::is_simm13(simm13), "offset can't overflow simm13"); inst &= ~Assembler::simm( -1, 13); inst |= Assembler::simm(simm13, 13); - ip->set_long_at(0, inst); + if (verify_only) { + assert(ip->long_at(0) == inst, "instructions must match"); + } else { + ip->set_long_at(0, inst); + } } break; @@ -97,19 +101,36 @@ jint np = oopDesc::encode_heap_oop((oop)x); inst &= ~Assembler::hi22(-1); inst |= Assembler::hi22((intptr_t)np); - ip->set_long_at(0, inst); + if (verify_only) { + assert(ip->long_at(0) == inst, "instructions must match"); + } else { + ip->set_long_at(0, inst); + } inst2 = ip->long_at( NativeInstruction::nop_instruction_size ); guarantee(Assembler::inv_op(inst2)==Assembler::arith_op, "arith op"); - ip->set_long_at(NativeInstruction::nop_instruction_size, ip->set_data32_simm13( inst2, (intptr_t)np)); + if (verify_only) { + assert(ip->long_at(NativeInstruction::nop_instruction_size) == NativeInstruction::set_data32_simm13( inst2, (intptr_t)np), + "instructions must match"); + } else { + ip->set_long_at(NativeInstruction::nop_instruction_size, NativeInstruction::set_data32_simm13( inst2, (intptr_t)np)); + } break; } - ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x ); + if (verify_only) { + ip->verify_data64_sethi( ip->addr_at(0), (intptr_t)x ); + } else { + ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x ); + } #else guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi"); inst &= ~Assembler::hi22( -1); inst |= Assembler::hi22((intptr_t)x); // (ignore offset; it doesn't play into the sethi) - ip->set_long_at(0, inst); + if (verify_only) { + assert(ip->long_at(0) == inst, "instructions must match"); + } else { + ip->set_long_at(0, inst); + } #endif } break;
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1769,6 +1769,7 @@ // returns. nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, methodHandle method, + int compile_id, int total_in_args, int comp_args_on_stack, // in VMRegStackSlots BasicType *in_sig_bt, @@ -2462,6 +2463,7 @@ __ flush(); nmethod *nm = nmethod::new_native_nmethod(method, + compile_id, masm->code(), vep_offset, frame_complete,
--- a/src/cpu/sparc/vm/sparc.ad Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/sparc.ad Fri Apr 22 15:30:53 2011 +0200 @@ -1843,6 +1843,10 @@ // registers? True for Intel but false for most RISCs const bool Matcher::clone_shift_expressions = false; +// Do we need to mask the count passed to shift instructions or does +// the cpu only look at the lower 5/6 bits anyway? +const bool Matcher::need_masked_shift_count = false; + bool Matcher::narrow_oop_use_complex_address() { NOT_LP64(ShouldNotCallThis()); assert(UseCompressedOops, "only for compressed oops code");
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -968,19 +968,6 @@ return start; } - static address disjoint_byte_copy_entry; - static address disjoint_short_copy_entry; - static address disjoint_int_copy_entry; - static address disjoint_long_copy_entry; - static address disjoint_oop_copy_entry; - - static address byte_copy_entry; - static address short_copy_entry; - static address int_copy_entry; - static address long_copy_entry; - static address oop_copy_entry; - - static address checkcast_copy_entry; // // Verify that a register contains clean 32-bits positive value @@ -1046,31 +1033,40 @@ // // The input registers are overwritten. // - void gen_write_ref_array_pre_barrier(Register addr, Register count) { + void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { BarrierSet* bs = Universe::heap()->barrier_set(); - if (bs->has_write_ref_pre_barrier()) { - assert(bs->has_write_ref_array_pre_opt(), - "Else unsupported barrier set."); - - __ save_frame(0); - // Save the necessary global regs... will be used after. - if (addr->is_global()) { - __ mov(addr, L0); - } - if (count->is_global()) { - __ mov(count, L1); - } - __ mov(addr->after_save(), O0); - // Get the count into O1 - __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)); - __ delayed()->mov(count->after_save(), O1); - if (addr->is_global()) { - __ mov(L0, addr); - } - if (count->is_global()) { - __ mov(L1, count); - } - __ restore(); + switch (bs->kind()) { + case BarrierSet::G1SATBCT: + case BarrierSet::G1SATBCTLogging: + // With G1, don't generate the call if we statically know that the target in uninitialized + if (!dest_uninitialized) { + __ save_frame(0); + // Save the necessary global regs... will be used after. + if (addr->is_global()) { + __ mov(addr, L0); + } + if (count->is_global()) { + __ mov(count, L1); + } + __ mov(addr->after_save(), O0); + // Get the count into O1 + __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)); + __ delayed()->mov(count->after_save(), O1); + if (addr->is_global()) { + __ mov(L0, addr); + } + if (count->is_global()) { + __ mov(L1, count); + } + __ restore(); + } + break; + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: + case BarrierSet::ModRef: + break; + default: + ShouldNotReachHere(); } } // @@ -1084,7 +1080,7 @@ // The input registers are overwritten. // void gen_write_ref_array_post_barrier(Register addr, Register count, - Register tmp) { + Register tmp) { BarrierSet* bs = Universe::heap()->barrier_set(); switch (bs->kind()) { @@ -1283,7 +1279,7 @@ // to: O1 // count: O2 treated as signed // - address generate_disjoint_byte_copy(bool aligned, const char * name) { + address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -1299,9 +1295,11 @@ assert_clean_int(count, O3); // Make sure 'count' is clean int. - if (!aligned) disjoint_byte_copy_entry = __ pc(); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - if (!aligned) BLOCK_COMMENT("Entry:"); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); + } // for short arrays, just do single element copy __ cmp(count, 23); // 16 + 7 @@ -1391,15 +1389,13 @@ // to: O1 // count: O2 treated as signed // - address generate_conjoint_byte_copy(bool aligned, const char * name) { + address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, + address *entry, const char *name) { // Do reverse copy. __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); - address nooverlap_target = aligned ? - StubRoutines::arrayof_jbyte_disjoint_arraycopy() : - disjoint_byte_copy_entry; Label L_skip_alignment, L_align, L_aligned_copy; Label L_copy_byte, L_copy_byte_loop, L_exit; @@ -1412,9 +1408,11 @@ assert_clean_int(count, O3); // Make sure 'count' is clean int. - if (!aligned) byte_copy_entry = __ pc(); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - if (!aligned) BLOCK_COMMENT("Entry:"); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); + } array_overlap_test(nooverlap_target, 0); @@ -1504,7 +1502,7 @@ // to: O1 // count: O2 treated as signed // - address generate_disjoint_short_copy(bool aligned, const char * name) { + address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -1520,9 +1518,11 @@ assert_clean_int(count, O3); // Make sure 'count' is clean int. - if (!aligned) disjoint_short_copy_entry = __ pc(); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - if (!aligned) BLOCK_COMMENT("Entry:"); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); + } // for short arrays, just do single element copy __ cmp(count, 11); // 8 + 3 (22 bytes) @@ -1842,15 +1842,13 @@ // to: O1 // count: O2 treated as signed // - address generate_conjoint_short_copy(bool aligned, const char * name) { + address generate_conjoint_short_copy(bool aligned, address nooverlap_target, + address *entry, const char *name) { // Do reverse copy. __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); - address nooverlap_target = aligned ? - StubRoutines::arrayof_jshort_disjoint_arraycopy() : - disjoint_short_copy_entry; Label L_skip_alignment, L_skip_alignment2, L_aligned_copy; Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit; @@ -1865,9 +1863,11 @@ assert_clean_int(count, O3); // Make sure 'count' is clean int. - if (!aligned) short_copy_entry = __ pc(); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - if (!aligned) BLOCK_COMMENT("Entry:"); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); + } array_overlap_test(nooverlap_target, 1); @@ -2072,7 +2072,7 @@ // to: O1 // count: O2 treated as signed // - address generate_disjoint_int_copy(bool aligned, const char * name) { + address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -2080,9 +2080,11 @@ const Register count = O2; assert_clean_int(count, O3); // Make sure 'count' is clean int. - if (!aligned) disjoint_int_copy_entry = __ pc(); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - if (!aligned) BLOCK_COMMENT("Entry:"); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); + } generate_disjoint_int_copy_core(aligned); @@ -2204,20 +2206,19 @@ // to: O1 // count: O2 treated as signed // - address generate_conjoint_int_copy(bool aligned, const char * name) { + address generate_conjoint_int_copy(bool aligned, address nooverlap_target, + address *entry, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); - address nooverlap_target = aligned ? - StubRoutines::arrayof_jint_disjoint_arraycopy() : - disjoint_int_copy_entry; - assert_clean_int(O2, O3); // Make sure 'count' is clean int. - if (!aligned) int_copy_entry = __ pc(); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - if (!aligned) BLOCK_COMMENT("Entry:"); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); + } array_overlap_test(nooverlap_target, 2); @@ -2336,16 +2337,18 @@ // to: O1 // count: O2 treated as signed // - address generate_disjoint_long_copy(bool aligned, const char * name) { + address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); assert_clean_int(O2, O3); // Make sure 'count' is clean int. - if (!aligned) disjoint_long_copy_entry = __ pc(); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - if (!aligned) BLOCK_COMMENT("Entry:"); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); + } generate_disjoint_long_copy_core(aligned); @@ -2406,19 +2409,21 @@ // to: O1 // count: O2 treated as signed // - address generate_conjoint_long_copy(bool aligned, const char * name) { + address generate_conjoint_long_copy(bool aligned, address nooverlap_target, + address *entry, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); - assert(!aligned, "usage"); - address nooverlap_target = disjoint_long_copy_entry; + assert(aligned, "Should always be aligned"); assert_clean_int(O2, O3); // Make sure 'count' is clean int. - if (!aligned) long_copy_entry = __ pc(); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - if (!aligned) BLOCK_COMMENT("Entry:"); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); + } array_overlap_test(nooverlap_target, 3); @@ -2439,7 +2444,8 @@ // to: O1 // count: O2 treated as signed // - address generate_disjoint_oop_copy(bool aligned, const char * name) { + address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name, + bool dest_uninitialized = false) { const Register from = O0; // source array address const Register to = O1; // destination array address @@ -2451,14 +2457,16 @@ assert_clean_int(count, O3); // Make sure 'count' is clean int. - if (!aligned) disjoint_oop_copy_entry = __ pc(); - // caller can pass a 64-bit byte count here - if (!aligned) BLOCK_COMMENT("Entry:"); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here + BLOCK_COMMENT("Entry:"); + } // save arguments for barrier generation __ mov(to, G1); __ mov(count, G5); - gen_write_ref_array_pre_barrier(G1, G5); + gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized); #ifdef _LP64 assert_clean_int(count, O3); // Make sure 'count' is clean int. if (UseCompressedOops) { @@ -2487,7 +2495,9 @@ // to: O1 // count: O2 treated as signed // - address generate_conjoint_oop_copy(bool aligned, const char * name) { + address generate_conjoint_oop_copy(bool aligned, address nooverlap_target, + address *entry, const char *name, + bool dest_uninitialized = false) { const Register from = O0; // source array address const Register to = O1; // destination array address @@ -2499,21 +2509,18 @@ assert_clean_int(count, O3); // Make sure 'count' is clean int. - if (!aligned) oop_copy_entry = __ pc(); - // caller can pass a 64-bit byte count here - if (!aligned) BLOCK_COMMENT("Entry:"); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here + BLOCK_COMMENT("Entry:"); + } + + array_overlap_test(nooverlap_target, LogBytesPerHeapOop); // save arguments for barrier generation __ mov(to, G1); __ mov(count, G5); - - gen_write_ref_array_pre_barrier(G1, G5); - - address nooverlap_target = aligned ? - StubRoutines::arrayof_oop_disjoint_arraycopy() : - disjoint_oop_copy_entry; - - array_overlap_test(nooverlap_target, LogBytesPerHeapOop); + gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized); #ifdef _LP64 if (UseCompressedOops) { @@ -2582,7 +2589,7 @@ // ckval: O4 (super_klass) // ret: O0 zero for success; (-1^K) where K is partial transfer count // - address generate_checkcast_copy(const char* name) { + address generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized = false) { const Register O0_from = O0; // source array address const Register O1_to = O1; // destination array address @@ -2600,8 +2607,6 @@ StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); - gen_write_ref_array_pre_barrier(O1, O2); - #ifdef ASSERT // We sometimes save a frame (see generate_type_check below). // If this will cause trouble, let's fail now instead of later. @@ -2625,9 +2630,12 @@ } #endif //ASSERT - checkcast_copy_entry = __ pc(); - // caller can pass a 64-bit byte count here (from generic stub) - BLOCK_COMMENT("Entry:"); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from generic stub) + BLOCK_COMMENT("Entry:"); + } + gen_write_ref_array_pre_barrier(O1_to, O2_count, dest_uninitialized); Label load_element, store_element, do_card_marks, fail, done; __ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it @@ -2700,7 +2708,11 @@ // Examines the alignment of the operands and dispatches // to a long, int, short, or byte copy loop. // - address generate_unsafe_copy(const char* name) { + address generate_unsafe_copy(const char* name, + address byte_copy_entry, + address short_copy_entry, + address int_copy_entry, + address long_copy_entry) { const Register O0_from = O0; // source array address const Register O1_to = O1; // destination array address @@ -2796,8 +2808,13 @@ // O0 == 0 - success // O0 == -1 - need to call System.arraycopy // - address generate_generic_copy(const char *name) { - + address generate_generic_copy(const char *name, + address entry_jbyte_arraycopy, + address entry_jshort_arraycopy, + address entry_jint_arraycopy, + address entry_oop_arraycopy, + address entry_jlong_arraycopy, + address entry_checkcast_arraycopy) { Label L_failed, L_objArray; // Input registers @@ -2970,15 +2987,15 @@ BLOCK_COMMENT("choose copy loop based on element size"); __ cmp(G3_elsize, 0); - __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jbyte_arraycopy); + __ br(Assembler::equal, true, Assembler::pt, entry_jbyte_arraycopy); __ delayed()->signx(length, count); // length __ cmp(G3_elsize, LogBytesPerShort); - __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jshort_arraycopy); + __ br(Assembler::equal, true, Assembler::pt, entry_jshort_arraycopy); __ delayed()->signx(length, count); // length __ cmp(G3_elsize, LogBytesPerInt); - __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jint_arraycopy); + __ br(Assembler::equal, true, Assembler::pt, entry_jint_arraycopy); __ delayed()->signx(length, count); // length #ifdef ASSERT { Label L; @@ -2989,7 +3006,7 @@ __ bind(L); } #endif - __ br(Assembler::always,false,Assembler::pt,StubRoutines::_jlong_arraycopy); + __ br(Assembler::always, false, Assembler::pt, entry_jlong_arraycopy); __ delayed()->signx(length, count); // length // objArrayKlass @@ -3013,7 +3030,7 @@ __ add(src, src_pos, from); // src_addr __ add(dst, dst_pos, to); // dst_addr __ BIND(L_plain_copy); - __ br(Assembler::always, false, Assembler::pt,StubRoutines::_oop_arraycopy); + __ br(Assembler::always, false, Assembler::pt, entry_oop_arraycopy); __ delayed()->signx(length, count); // length __ BIND(L_checkcast_copy); @@ -3057,7 +3074,7 @@ __ ld_ptr(G4_dst_klass, ek_offset, O4); // dest elem klass // lduw(O4, sco_offset, O3); // sco of elem klass - __ br(Assembler::always, false, Assembler::pt, checkcast_copy_entry); + __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy); __ delayed()->lduw(O4, sco_offset, O3); } @@ -3068,39 +3085,124 @@ } void generate_arraycopy_stubs() { - - // Note: the disjoint stubs must be generated first, some of - // the conjoint stubs use them. - StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy"); - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy"); - StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy"); - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy"); - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy"); - StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy"); - StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy"); - StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy"); - StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy"); - - StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy"); - StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); - StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy"); - StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, "jlong_arraycopy"); - StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, "oop_arraycopy"); - StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy"); - StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy"); + address entry; + address entry_jbyte_arraycopy; + address entry_jshort_arraycopy; + address entry_jint_arraycopy; + address entry_oop_arraycopy; + address entry_jlong_arraycopy; + address entry_checkcast_arraycopy; + + //*** jbyte + // Always need aligned and unaligned versions + StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, + "jbyte_disjoint_arraycopy"); + StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, + &entry_jbyte_arraycopy, + "jbyte_arraycopy"); + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry, + "arrayof_jbyte_disjoint_arraycopy"); + StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL, + "arrayof_jbyte_arraycopy"); + + //*** jshort + // Always need aligned and unaligned versions + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, + "jshort_disjoint_arraycopy"); + StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, + &entry_jshort_arraycopy, + "jshort_arraycopy"); + StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry, + "arrayof_jshort_disjoint_arraycopy"); + StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL, + "arrayof_jshort_arraycopy"); + + //*** jint + // Aligned versions + StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry, + "arrayof_jint_disjoint_arraycopy"); + StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy, + "arrayof_jint_arraycopy"); #ifdef _LP64 - // since sizeof(jint) < sizeof(HeapWord), there's a different flavor: - StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy"); - #else - StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; + // In 64 bit we need both aligned and unaligned versions of jint arraycopy. + // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it). + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry, + "jint_disjoint_arraycopy"); + StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, + &entry_jint_arraycopy, + "jint_arraycopy"); +#else + // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version + // (in fact in 32bit we always have a pre-loop part even in the aligned version, + // because it uses 64-bit loads/stores, so the aligned flag is actually ignored). + StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy; + StubRoutines::_jint_arraycopy = StubRoutines::_arrayof_jint_arraycopy; #endif - StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; - StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; - - StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy"); - StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy"); - StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy"); + + + //*** jlong + // It is always aligned + StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry, + "arrayof_jlong_disjoint_arraycopy"); + StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy, + "arrayof_jlong_arraycopy"); + StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy; + StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy; + + + //*** oops + // Aligned versions + StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, &entry, + "arrayof_oop_disjoint_arraycopy"); + StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy, + "arrayof_oop_arraycopy"); + // Aligned versions without pre-barriers + StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, &entry, + "arrayof_oop_disjoint_arraycopy_uninit", + /*dest_uninitialized*/true); + StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, entry, NULL, + "arrayof_oop_arraycopy_uninit", + /*dest_uninitialized*/true); +#ifdef _LP64 + if (UseCompressedOops) { + // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy. + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry, + "oop_disjoint_arraycopy"); + StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy, + "oop_arraycopy"); + // Unaligned versions without pre-barriers + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(false, &entry, + "oop_disjoint_arraycopy_uninit", + /*dest_uninitialized*/true); + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, entry, NULL, + "oop_arraycopy_uninit", + /*dest_uninitialized*/true); + } else +#endif + { + // oop arraycopy is always aligned on 32bit and 64bit without compressed oops + StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy; + StubRoutines::_oop_arraycopy = StubRoutines::_arrayof_oop_arraycopy; + StubRoutines::_oop_disjoint_arraycopy_uninit = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit; + StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit; + } + + StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, + /*dest_uninitialized*/true); + + StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", + entry_jbyte_arraycopy, + entry_jshort_arraycopy, + entry_jint_arraycopy, + entry_jlong_arraycopy); + StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", + entry_jbyte_arraycopy, + entry_jshort_arraycopy, + entry_jint_arraycopy, + entry_oop_arraycopy, + entry_jlong_arraycopy, + entry_checkcast_arraycopy); StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); @@ -3224,21 +3326,6 @@ }; // end class declaration - -address StubGenerator::disjoint_byte_copy_entry = NULL; -address StubGenerator::disjoint_short_copy_entry = NULL; -address StubGenerator::disjoint_int_copy_entry = NULL; -address StubGenerator::disjoint_long_copy_entry = NULL; -address StubGenerator::disjoint_oop_copy_entry = NULL; - -address StubGenerator::byte_copy_entry = NULL; -address StubGenerator::short_copy_entry = NULL; -address StubGenerator::int_copy_entry = NULL; -address StubGenerator::long_copy_entry = NULL; -address StubGenerator::oop_copy_entry = NULL; - -address StubGenerator::checkcast_copy_entry = NULL; - void StubGenerator_generate(CodeBuffer* code, bool all) { StubGenerator g(code, all); }
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -334,8 +334,8 @@ void TemplateTable::fast_aldc(bool wide) { transition(vtos, atos); - if (!EnableMethodHandles) { - // We should not encounter this bytecode if !EnableMethodHandles. + if (!EnableInvokeDynamic) { + // We should not encounter this bytecode if !EnableInvokeDynamic. // The verifier will stop it. However, if we get past the verifier, // this will stop the thread in a reasonable way, without crashing the JVM. __ call_VM(noreg, CAST_FROM_FN_PTR(address, @@ -3303,7 +3303,7 @@ __ sll(Rret, LogBytesPerWord, Rret); __ ld_ptr(Rtemp, Rret, Rret); // get return address - __ load_heap_oop(G5_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle); + __ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle); __ null_check(G3_method_handle); // Adjust Rret first so Llast_SP can be same as Rret
--- a/src/cpu/x86/vm/assembler_x86.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/assembler_x86.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1601,6 +1601,17 @@ emit_byte(0xC0 | encode); } +void Assembler::movdl(XMMRegister dst, Address src) { + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + InstructionMark im(this); + emit_byte(0x66); + prefix(src, dst); + emit_byte(0x0F); + emit_byte(0x6E); + emit_operand(dst, src); +} + + void Assembler::movdqa(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); @@ -2306,7 +2317,7 @@ } void Assembler::prefetchr(Address src) { - NOT_LP64(assert(VM_Version::supports_3dnow(), "must support")); + NOT_LP64(assert(VM_Version::supports_3dnow_prefetch(), "must support")); InstructionMark im(this); prefetch_prefix(src); emit_byte(0x0D); @@ -2338,7 +2349,7 @@ } void Assembler::prefetchw(Address src) { - NOT_LP64(assert(VM_Version::supports_3dnow(), "must support")); + NOT_LP64(assert(VM_Version::supports_3dnow_prefetch(), "must support")); InstructionMark im(this); prefetch_prefix(src); emit_byte(0x0D); @@ -2412,7 +2423,10 @@ } void Assembler::psrlq(XMMRegister dst, int shift) { - // HMM Table D-1 says sse2 or mmx + // Shift 64 bit value logically right by specified number of bits. + // HMM Table D-1 says sse2 or mmx. + // Do not confuse it with psrldq SSE2 instruction which + // shifts 128 bit value in xmm register by number of bytes. NOT_LP64(assert(VM_Version::supports_sse(), "")); int encode = prefixq_and_encode(xmm2->encoding(), dst->encoding()); @@ -2423,6 +2437,18 @@ emit_byte(shift); } +void Assembler::psrldq(XMMRegister dst, int shift) { + // Shift 128 bit value in xmm register by number of bytes. + NOT_LP64(assert(VM_Version::supports_sse2(), "")); + + int encode = prefixq_and_encode(xmm3->encoding(), dst->encoding()); + emit_byte(0x66); + emit_byte(0x0F); + emit_byte(0x73); + emit_byte(0xC0 | encode); + emit_byte(shift); +} + void Assembler::ptest(XMMRegister dst, Address src) { assert(VM_Version::supports_sse4_1(), ""); @@ -3484,7 +3510,6 @@ // anywhere in the codeCache then we are always reachable. // This would have to change if we ever save/restore shared code // to be more pessimistic. - disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int)); if (!is_simm32(disp)) return false; disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int)); @@ -3508,6 +3533,14 @@ return is_simm32(disp); } +// Check if the polling page is not reachable from the code cache using rip-relative +// addressing. +bool Assembler::is_polling_page_far() { + intptr_t addr = (intptr_t)os::get_polling_page(); + return !is_simm32(addr - (intptr_t)CodeCache::low_bound()) || + !is_simm32(addr - (intptr_t)CodeCache::high_bound()); +} + void Assembler::emit_data64(jlong data, relocInfo::relocType rtype, int format) { @@ -6860,6 +6893,11 @@ } } +void MacroAssembler::testl(Register dst, AddressLiteral src) { + assert(reachable(src), "Address should be reachable"); + testl(dst, as_Address(src)); +} + ////////////////////////////////////////////////////////////////////////////////// #ifndef SERIALGC @@ -7095,17 +7133,6 @@ LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } -void MacroAssembler::test32(Register src1, AddressLiteral src2) { - // src2 must be rval - - if (reachable(src2)) { - testl(src1, as_Address(src2)); - } else { - lea(rscratch1, src2); - testl(src1, Address(rscratch1, 0)); - } -} - // C++ bool manipulation void MacroAssembler::testbool(Register dst) { if(sizeof(bool) == 1) @@ -7742,6 +7769,28 @@ } } +void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { + if (VM_Version::supports_cmov()) { + cmovl(cc, dst, src); + } else { + Label L; + jccb(negate_condition(cc), L); + movl(dst, src); + bind(L); + } +} + +void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { + if (VM_Version::supports_cmov()) { + cmovl(cc, dst, src); + } else { + Label L; + jccb(negate_condition(cc), L); + movl(dst, src); + bind(L); + } +} + void MacroAssembler::verify_oop(Register reg, const char* s) { if (!VerifyOops) return; @@ -7805,7 +7854,7 @@ void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type) { - Address type_addr(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)); + Address type_addr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)); // compare method type against that of the receiver if (UseCompressedOops) { load_heap_oop(temp_reg, type_addr); @@ -7825,14 +7874,14 @@ Register temp_reg) { assert_different_registers(vmslots_reg, mh_reg, temp_reg); // load mh.type.form.vmslots - if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) { + if (java_lang_invoke_MethodHandle::vmslots_offset_in_bytes() != 0) { // hoist vmslots into every mh to avoid dependent load chain - movl(vmslots_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg))); + movl(vmslots_reg, Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmslots_offset_in_bytes, temp_reg))); } else { Register temp2_reg = vmslots_reg; - load_heap_oop(temp2_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg))); - load_heap_oop(temp2_reg, Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg))); - movl(vmslots_reg, Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg))); + load_heap_oop(temp2_reg, Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg))); + load_heap_oop(temp2_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg))); + movl(vmslots_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg))); } } @@ -7847,7 +7896,7 @@ // pick out the interpreted side of the handler // NOTE: vmentry is not an oop! - movptr(temp_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg))); + movptr(temp_reg, Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg))); // off we go... jmp(Address(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes())); @@ -7892,12 +7941,12 @@ #endif push(rax); // save rax, // addr may contain rsp so we will have to adjust it based on the push - // we just did + // we just did (and on 64 bit we do two pushes) // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which // stores rax into addr which is backwards of what was intended. if (addr.uses(rsp)) { lea(rax, addr); - pushptr(Address(rax, BytesPerWord)); + pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord)); } else { pushptr(addr); } @@ -8347,6 +8396,17 @@ movptr(dst, src); } +// Doesn't do verfication, generates fixed size code +void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) { +#ifdef _LP64 + if (UseCompressedOops) { + movl(dst, src); + decode_heap_oop_not_null(dst); + } else +#endif + movptr(dst, src); +} + void MacroAssembler::store_heap_oop(Address dst, Register src) { #ifdef _LP64 if (UseCompressedOops) { @@ -8567,101 +8627,418 @@ } #endif // _LP64 -// IndexOf substring. -void MacroAssembler::string_indexof(Register str1, Register str2, - Register cnt1, Register cnt2, Register result, - XMMRegister vec, Register tmp) { +// IndexOf for constant substrings with size >= 8 chars +// which don't need to be loaded through stack. +void MacroAssembler::string_indexofC8(Register str1, Register str2, + Register cnt1, Register cnt2, + int int_cnt2, Register result, + XMMRegister vec, Register tmp) { assert(UseSSE42Intrinsics, "SSE4.2 is required"); - Label RELOAD_SUBSTR, PREP_FOR_SCAN, SCAN_TO_SUBSTR, - SCAN_SUBSTR, RET_NOT_FOUND, CLEANUP; - - push(str1); // string addr - push(str2); // substr addr - push(cnt2); // substr count - jmpb(PREP_FOR_SCAN); - - // Substr count saved at sp - // Substr saved at sp+1*wordSize - // String saved at sp+2*wordSize - - // Reload substr for rescan - bind(RELOAD_SUBSTR); - movl(cnt2, Address(rsp, 0)); - movptr(str2, Address(rsp, wordSize)); - // We came here after the beginninig of the substring was - // matched but the rest of it was not so we need to search - // again. Start from the next element after the previous match. - subptr(str1, result); // Restore counter - shrl(str1, 1); - addl(cnt1, str1); - decrementl(cnt1); - lea(str1, Address(result, 2)); // Reload string - - // Load substr - bind(PREP_FOR_SCAN); - movdqu(vec, Address(str2, 0)); - addl(cnt1, 8); // prime the loop - subptr(str1, 16); - - // Scan string for substr in 16-byte vectors - bind(SCAN_TO_SUBSTR); - subl(cnt1, 8); - addptr(str1, 16); - - // pcmpestri + // This method uses pcmpestri inxtruction with bound registers // inputs: // xmm - substring // rax - substring length (elements count) - // mem - scaned string + // mem - scanned string + // rdx - string length (elements count) + // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts) + // outputs: + // rcx - matched index in string + assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); + + Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, + RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR, + MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE; + + // Note, inline_string_indexOf() generates checks: + // if (substr.count > string.count) return -1; + // if (substr.count == 0) return 0; + assert(int_cnt2 >= 8, "this code isused only for cnt2 >= 8 chars"); + + // Load substring. + movdqu(vec, Address(str2, 0)); + movl(cnt2, int_cnt2); + movptr(result, str1); // string addr + + if (int_cnt2 > 8) { + jmpb(SCAN_TO_SUBSTR); + + // Reload substr for rescan, this code + // is executed only for large substrings (> 8 chars) + bind(RELOAD_SUBSTR); + movdqu(vec, Address(str2, 0)); + negptr(cnt2); // Jumped here with negative cnt2, convert to positive + + bind(RELOAD_STR); + // We came here after the beginning of the substring was + // matched but the rest of it was not so we need to search + // again. Start from the next element after the previous match. + + // cnt2 is number of substring reminding elements and + // cnt1 is number of string reminding elements when cmp failed. + // Restored cnt1 = cnt1 - cnt2 + int_cnt2 + subl(cnt1, cnt2); + addl(cnt1, int_cnt2); + movl(cnt2, int_cnt2); // Now restore cnt2 + + decrementl(cnt1); // Shift to next element + cmpl(cnt1, cnt2); + jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring + + addptr(result, 2); + + } // (int_cnt2 > 8) + + // Scan string for start of substr in 16-byte vectors + bind(SCAN_TO_SUBSTR); + pcmpestri(vec, Address(result, 0), 0x0d); + jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1 + subl(cnt1, 8); + jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string + cmpl(cnt1, cnt2); + jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring + addptr(result, 16); + jmpb(SCAN_TO_SUBSTR); + + // Found a potential substr + bind(FOUND_CANDIDATE); + // Matched whole vector if first element matched (tmp(rcx) == 0). + if (int_cnt2 == 8) { + jccb(Assembler::overflow, RET_FOUND); // OF == 1 + } else { // int_cnt2 > 8 + jccb(Assembler::overflow, FOUND_SUBSTR); + } + // After pcmpestri tmp(rcx) contains matched element index + // Compute start addr of substr + lea(result, Address(result, tmp, Address::times_2)); + + // Make sure string is still long enough + subl(cnt1, tmp); + cmpl(cnt1, cnt2); + if (int_cnt2 == 8) { + jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR); + } else { // int_cnt2 > 8 + jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD); + } + // Left less then substring. + + bind(RET_NOT_FOUND); + movl(result, -1); + jmpb(EXIT); + + if (int_cnt2 > 8) { + // This code is optimized for the case when whole substring + // is matched if its head is matched. + bind(MATCH_SUBSTR_HEAD); + pcmpestri(vec, Address(result, 0), 0x0d); + // Reload only string if does not match + jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0 + + Label CONT_SCAN_SUBSTR; + // Compare the rest of substring (> 8 chars). + bind(FOUND_SUBSTR); + // First 8 chars are already matched. + negptr(cnt2); + addptr(cnt2, 8); + + bind(SCAN_SUBSTR); + subl(cnt1, 8); + cmpl(cnt2, -8); // Do not read beyond substring + jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR); + // Back-up strings to avoid reading beyond substring: + // cnt1 = cnt1 - cnt2 + 8 + addl(cnt1, cnt2); // cnt2 is negative + addl(cnt1, 8); + movl(cnt2, 8); negptr(cnt2); + bind(CONT_SCAN_SUBSTR); + if (int_cnt2 < (int)G) { + movdqu(vec, Address(str2, cnt2, Address::times_2, int_cnt2*2)); + pcmpestri(vec, Address(result, cnt2, Address::times_2, int_cnt2*2), 0x0d); + } else { + // calculate index in register to avoid integer overflow (int_cnt2*2) + movl(tmp, int_cnt2); + addptr(tmp, cnt2); + movdqu(vec, Address(str2, tmp, Address::times_2, 0)); + pcmpestri(vec, Address(result, tmp, Address::times_2, 0), 0x0d); + } + // Need to reload strings pointers if not matched whole vector + jccb(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0 + addptr(cnt2, 8); + jccb(Assembler::negative, SCAN_SUBSTR); + // Fall through if found full substring + + } // (int_cnt2 > 8) + + bind(RET_FOUND); + // Found result if we matched full small substring. + // Compute substr offset + subptr(result, str1); + shrl(result, 1); // index + bind(EXIT); + +} // string_indexofC8 + +// Small strings are loaded through stack if they cross page boundary. +void MacroAssembler::string_indexof(Register str1, Register str2, + Register cnt1, Register cnt2, + int int_cnt2, Register result, + XMMRegister vec, Register tmp) { + assert(UseSSE42Intrinsics, "SSE4.2 is required"); + // + // int_cnt2 is length of small (< 8 chars) constant substring + // or (-1) for non constant substring in which case its length + // is in cnt2 register. + // + // Note, inline_string_indexOf() generates checks: + // if (substr.count > string.count) return -1; + // if (substr.count == 0) return 0; + // + assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < 8), "should be != 0"); + + // This method uses pcmpestri inxtruction with bound registers + // inputs: + // xmm - substring + // rax - substring length (elements count) + // mem - scanned string // rdx - string length (elements count) // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts) // outputs: // rcx - matched index in string assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); - pcmpestri(vec, Address(str1, 0), 0x0d); - jcc(Assembler::above, SCAN_TO_SUBSTR); // CF == 0 && ZF == 0 - jccb(Assembler::aboveEqual, RET_NOT_FOUND); // CF == 0 - - // Fallthrough: found a potential substr + Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR, + RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR, + FOUND_CANDIDATE; + + { //======================================================== + // We don't know where these strings are located + // and we can't read beyond them. Load them through stack. + Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR; + + movptr(tmp, rsp); // save old SP + + if (int_cnt2 > 0) { // small (< 8 chars) constant substring + if (int_cnt2 == 1) { // One char + load_unsigned_short(result, Address(str2, 0)); + movdl(vec, result); // move 32 bits + } else if (int_cnt2 == 2) { // Two chars + movdl(vec, Address(str2, 0)); // move 32 bits + } else if (int_cnt2 == 4) { // Four chars + movq(vec, Address(str2, 0)); // move 64 bits + } else { // cnt2 = { 3, 5, 6, 7 } + // Array header size is 12 bytes in 32-bit VM + // + 6 bytes for 3 chars == 18 bytes, + // enough space to load vec and shift. + assert(HeapWordSize*typeArrayKlass::header_size() >= 12,"sanity"); + movdqu(vec, Address(str2, (int_cnt2*2)-16)); + psrldq(vec, 16-(int_cnt2*2)); + } + } else { // not constant substring + cmpl(cnt2, 8); + jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough + + // We can read beyond string if srt+16 does not cross page boundary + // since heaps are aligned and mapped by pages. + assert(os::vm_page_size() < (int)G, "default page should be small"); + movl(result, str2); // We need only low 32 bits + andl(result, (os::vm_page_size()-1)); + cmpl(result, (os::vm_page_size()-16)); + jccb(Assembler::belowEqual, CHECK_STR); + + // Move small strings to stack to allow load 16 bytes into vec. + subptr(rsp, 16); + int stk_offset = wordSize-2; + push(cnt2); + + bind(COPY_SUBSTR); + load_unsigned_short(result, Address(str2, cnt2, Address::times_2, -2)); + movw(Address(rsp, cnt2, Address::times_2, stk_offset), result); + decrement(cnt2); + jccb(Assembler::notZero, COPY_SUBSTR); + + pop(cnt2); + movptr(str2, rsp); // New substring address + } // non constant + + bind(CHECK_STR); + cmpl(cnt1, 8); + jccb(Assembler::aboveEqual, BIG_STRINGS); + + // Check cross page boundary. + movl(result, str1); // We need only low 32 bits + andl(result, (os::vm_page_size()-1)); + cmpl(result, (os::vm_page_size()-16)); + jccb(Assembler::belowEqual, BIG_STRINGS); + + subptr(rsp, 16); + int stk_offset = -2; + if (int_cnt2 < 0) { // not constant + push(cnt2); + stk_offset += wordSize; + } + movl(cnt2, cnt1); + + bind(COPY_STR); + load_unsigned_short(result, Address(str1, cnt2, Address::times_2, -2)); + movw(Address(rsp, cnt2, Address::times_2, stk_offset), result); + decrement(cnt2); + jccb(Assembler::notZero, COPY_STR); + + if (int_cnt2 < 0) { // not constant + pop(cnt2); + } + movptr(str1, rsp); // New string address + + bind(BIG_STRINGS); + // Load substring. + if (int_cnt2 < 0) { // -1 + movdqu(vec, Address(str2, 0)); + push(cnt2); // substr count + push(str2); // substr addr + push(str1); // string addr + } else { + // Small (< 8 chars) constant substrings are loaded already. + movl(cnt2, int_cnt2); + } + push(tmp); // original SP + + } // Finished loading + + //======================================================== + // Start search + // + + movptr(result, str1); // string addr + + if (int_cnt2 < 0) { // Only for non constant substring + jmpb(SCAN_TO_SUBSTR); + + // SP saved at sp+0 + // String saved at sp+1*wordSize + // Substr saved at sp+2*wordSize + // Substr count saved at sp+3*wordSize + + // Reload substr for rescan, this code + // is executed only for large substrings (> 8 chars) + bind(RELOAD_SUBSTR); + movptr(str2, Address(rsp, 2*wordSize)); + movl(cnt2, Address(rsp, 3*wordSize)); + movdqu(vec, Address(str2, 0)); + // We came here after the beginning of the substring was + // matched but the rest of it was not so we need to search + // again. Start from the next element after the previous match. + subptr(str1, result); // Restore counter + shrl(str1, 1); + addl(cnt1, str1); + decrementl(cnt1); // Shift to next element + cmpl(cnt1, cnt2); + jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring + + addptr(result, 2); + } // non constant + + // Scan string for start of substr in 16-byte vectors + bind(SCAN_TO_SUBSTR); + assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); + pcmpestri(vec, Address(result, 0), 0x0d); + jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1 + subl(cnt1, 8); + jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string + cmpl(cnt1, cnt2); + jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring + addptr(result, 16); + + bind(ADJUST_STR); + cmpl(cnt1, 8); // Do not read beyond string + jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR); + // Back-up string to avoid reading beyond string. + lea(result, Address(result, cnt1, Address::times_2, -16)); + movl(cnt1, 8); + jmpb(SCAN_TO_SUBSTR); + + // Found a potential substr + bind(FOUND_CANDIDATE); + // After pcmpestri tmp(rcx) contains matched element index // Make sure string is still long enough subl(cnt1, tmp); cmpl(cnt1, cnt2); - jccb(Assembler::negative, RET_NOT_FOUND); - // Compute start addr of substr - lea(str1, Address(str1, tmp, Address::times_2)); - movptr(result, str1); // save - - // Compare potential substr - addl(cnt1, 8); // prime the loop - addl(cnt2, 8); - subptr(str1, 16); - subptr(str2, 16); - - // Scan 16-byte vectors of string and substr - bind(SCAN_SUBSTR); - subl(cnt1, 8); - subl(cnt2, 8); - addptr(str1, 16); - addptr(str2, 16); - movdqu(vec, Address(str2, 0)); - pcmpestri(vec, Address(str1, 0), 0x0d); - jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0 - jcc(Assembler::positive, SCAN_SUBSTR); // SF == 0 - - // Compute substr offset - subptr(result, Address(rsp, 2*wordSize)); - shrl(result, 1); // index - jmpb(CLEANUP); + jccb(Assembler::greaterEqual, FOUND_SUBSTR); + // Left less then substring. bind(RET_NOT_FOUND); movl(result, -1); + jmpb(CLEANUP); + + bind(FOUND_SUBSTR); + // Compute start addr of substr + lea(result, Address(result, tmp, Address::times_2)); + + if (int_cnt2 > 0) { // Constant substring + // Repeat search for small substring (< 8 chars) + // from new point without reloading substring. + // Have to check that we don't read beyond string. + cmpl(tmp, 8-int_cnt2); + jccb(Assembler::greater, ADJUST_STR); + // Fall through if matched whole substring. + } else { // non constant + assert(int_cnt2 == -1, "should be != 0"); + + addl(tmp, cnt2); + // Found result if we matched whole substring. + cmpl(tmp, 8); + jccb(Assembler::lessEqual, RET_FOUND); + + // Repeat search for small substring (<= 8 chars) + // from new point 'str1' without reloading substring. + cmpl(cnt2, 8); + // Have to check that we don't read beyond string. + jccb(Assembler::lessEqual, ADJUST_STR); + + Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG; + // Compare the rest of substring (> 8 chars). + movptr(str1, result); + + cmpl(tmp, cnt2); + // First 8 chars are already matched. + jccb(Assembler::equal, CHECK_NEXT); + + bind(SCAN_SUBSTR); + pcmpestri(vec, Address(str1, 0), 0x0d); + // Need to reload strings pointers if not matched whole vector + jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0 + + bind(CHECK_NEXT); + subl(cnt2, 8); + jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring + addptr(str1, 16); + addptr(str2, 16); + subl(cnt1, 8); + cmpl(cnt2, 8); // Do not read beyond substring + jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR); + // Back-up strings to avoid reading beyond substring. + lea(str2, Address(str2, cnt2, Address::times_2, -16)); + lea(str1, Address(str1, cnt2, Address::times_2, -16)); + subl(cnt1, cnt2); + movl(cnt2, 8); + addl(cnt1, 8); + bind(CONT_SCAN_SUBSTR); + movdqu(vec, Address(str2, 0)); + jmpb(SCAN_SUBSTR); + + bind(RET_FOUND_LONG); + movptr(str1, Address(rsp, wordSize)); + } // non constant + + bind(RET_FOUND); + // Compute substr offset + subptr(result, str1); + shrl(result, 1); // index bind(CLEANUP); - addptr(rsp, 3*wordSize); -} + pop(rsp); // restore SP + +} // string_indexof // Compare strings. void MacroAssembler::string_compare(Register str1, Register str2, @@ -8675,14 +9052,7 @@ movl(result, cnt1); subl(cnt1, cnt2); push(cnt1); - if (VM_Version::supports_cmov()) { - cmovl(Assembler::lessEqual, cnt2, result); - } else { - Label GT_LABEL; - jccb(Assembler::greater, GT_LABEL); - movl(cnt2, result); - bind(GT_LABEL); - } + cmov32(Assembler::lessEqual, cnt2, result); // Is the minimum length zero? testl(cnt2, cnt2);
--- a/src/cpu/x86/vm/assembler_x86.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/assembler_x86.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -385,10 +385,18 @@ }; class ExternalAddress: public AddressLiteral { - - public: - - ExternalAddress(address target) : AddressLiteral(target, relocInfo::external_word_type){} + private: + static relocInfo::relocType reloc_for_target(address target) { + // Sometimes ExternalAddress is used for values which aren't + // exactly addresses, like the card table base. + // external_word_type can't be used for values in the first page + // so just skip the reloc in that case. + return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; + } + + public: + + ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {} }; @@ -580,7 +588,6 @@ void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); - bool reachable(AddressLiteral adr) NOT_LP64({ return true;}); // These are all easily abused and hence protected @@ -683,6 +690,8 @@ static bool is_simm32(int32_t x) { return true; } #endif // _LP64 + static bool is_polling_page_far() NOT_LP64({ return false;}); + // Generic instructions // Does 32bit or 64bit as needed for the platform. In some sense these // belong in macro assembler but there is no need for both varieties to exist @@ -1121,6 +1130,7 @@ void movdl(XMMRegister dst, Register src); void movdl(Register dst, XMMRegister src); + void movdl(XMMRegister dst, Address src); // Move Double Quadword void movdq(XMMRegister dst, Register src); @@ -1288,9 +1298,12 @@ void pshuflw(XMMRegister dst, XMMRegister src, int mode); void pshuflw(XMMRegister dst, Address src, int mode); - // Shift Right Logical Quadword Immediate + // Shift Right by bits Logical Quadword Immediate void psrlq(XMMRegister dst, int shift); + // Shift Right by bytes Logical DoubleQuadword Immediate + void psrldq(XMMRegister dst, int shift); + // Logical Compare Double Quadword void ptest(XMMRegister dst, XMMRegister src); void ptest(XMMRegister dst, Address src); @@ -1696,6 +1709,7 @@ void store_klass(Register dst, Register src); void load_heap_oop(Register dst, Address src); + void load_heap_oop_not_null(Register dst, Address src); void store_heap_oop(Address dst, Register src); // Used for storing NULL. All other oop constants should be @@ -2090,7 +2104,10 @@ void leal32(Register dst, Address src) { leal(dst, src); } - void test32(Register src1, AddressLiteral src2); + // Import other testl() methods from the parent class or else + // they will be hidden by the following overriding declaration. + using Assembler::testl; + void testl(Register dst, AddressLiteral src); void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } @@ -2236,10 +2253,13 @@ // Data - void cmov(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmovl(cc, dst, src)); } - - void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmovl(cc, dst, src)); } - void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmovl(cc, dst, src)); } + void cmov32( Condition cc, Register dst, Address src); + void cmov32( Condition cc, Register dst, Register src); + + void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } + + void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } + void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } void movoop(Register dst, jobject obj); void movoop(Address dst, jobject obj); @@ -2290,10 +2310,22 @@ void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } // IndexOf strings. + // Small strings are loaded through stack if they cross page boundary. void string_indexof(Register str1, Register str2, - Register cnt1, Register cnt2, Register result, + Register cnt1, Register cnt2, + int int_cnt2, Register result, XMMRegister vec, Register tmp); + // IndexOf for constant substrings with size >= 8 elements + // which don't need to be loaded through stack. + void string_indexofC8(Register str1, Register str2, + Register cnt1, Register cnt2, + int int_cnt2, Register result, + XMMRegister vec, Register tmp); + + // Smallest code: we don't need to load through stack, + // check string tail. + // Compare strings. void string_compare(Register str1, Register str2, Register cnt1, Register cnt2, Register result,
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -313,10 +313,15 @@ } assert(_obj != noreg, "must be a valid register"); Register tmp = rax; - if (_obj == tmp) tmp = rbx; + Register tmp2 = rbx; __ push(tmp); + __ push(tmp2); + // Load without verification to keep code size small. We need it because + // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null. + __ load_heap_oop_not_null(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes())); __ get_thread(tmp); - __ cmpptr(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); + __ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); + __ pop(tmp2); __ pop(tmp); __ jcc(Assembler::notEqual, call_patch);
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "asm/assembler.hpp" #include "c1/c1_Compilation.hpp" #include "c1/c1_LIRAssembler.hpp" #include "c1/c1_MacroAssembler.hpp" @@ -456,10 +457,8 @@ __ verify_not_null_oop(rax); // search an exception handler (rax: exception oop, rdx: throwing pc) - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id))); - - __ stop("should not reach here"); - + __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); + __ should_not_reach_here(); assert(code_offset() - offset <= exception_handler_size, "overflow"); __ end_a_stub(); @@ -571,24 +570,13 @@ __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); // compute minimum length (in rax) and difference of lengths (on top of stack) - if (VM_Version::supports_cmov()) { - __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); - __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes())); - __ mov (rcx, rbx); - __ subptr (rbx, rax); // subtract lengths - __ push (rbx); // result - __ cmov (Assembler::lessEqual, rax, rcx); - } else { - Label L; - __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); - __ movl (rcx, Address(rax, java_lang_String::count_offset_in_bytes())); - __ mov (rax, rbx); - __ subptr (rbx, rcx); - __ push (rbx); - __ jcc (Assembler::lessEqual, L); - __ mov (rax, rcx); - __ bind (L); - } + __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes())); + __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes())); + __ mov (rcx, rbx); + __ subptr(rbx, rax); // subtract lengths + __ push (rbx); // result + __ cmov (Assembler::lessEqual, rax, rcx); + // is minimum length 0? Label noLoop, haveResult; __ testptr (rax, rax); @@ -650,12 +638,13 @@ AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), relocInfo::poll_return_type); - // NOTE: the requires that the polling page be reachable else the reloc - // goes to the movq that loads the address and not the faulting instruction - // which breaks the signal handler code - - __ test32(rax, polling_page); - + if (Assembler::is_polling_page_far()) { + __ lea(rscratch1, polling_page); + __ relocate(relocInfo::poll_return_type); + __ testl(rax, Address(rscratch1, 0)); + } else { + __ testl(rax, polling_page); + } __ ret(0); } @@ -663,20 +652,17 @@ int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()), relocInfo::poll_type); - - if (info != NULL) { + guarantee(info != NULL, "Shouldn't be NULL"); + int offset = __ offset(); + if (Assembler::is_polling_page_far()) { + __ lea(rscratch1, polling_page); + offset = __ offset(); add_debug_info_for_branch(info); + __ testl(rax, Address(rscratch1, 0)); } else { - ShouldNotReachHere(); + add_debug_info_for_branch(info); + __ testl(rax, polling_page); } - - int offset = __ offset(); - - // NOTE: the requires that the polling page be reachable else the reloc - // goes to the movq that loads the address and not the faulting instruction - // which breaks the signal handler code - - __ test32(rax, polling_page); return offset; } @@ -1415,7 +1401,7 @@ default: ShouldNotReachHere(); break; } - } else if (VM_Version::supports_3dnow()) { + } else if (VM_Version::supports_3dnow_prefetch()) { __ prefetchr(from_addr); } } @@ -1438,7 +1424,7 @@ default: ShouldNotReachHere(); break; } - } else if (VM_Version::supports_3dnow()) { + } else if (VM_Version::supports_3dnow_prefetch()) { __ prefetchw(from_addr); } } @@ -3116,7 +3102,7 @@ BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; if (basic_type == T_ARRAY) basic_type = T_OBJECT; - // if we don't know anything or it's an object array, just go through the generic arraycopy + // if we don't know anything, just go through the generic arraycopy if (default_type == NULL) { Label done; // save outgoing arguments on stack in case call to System.arraycopy is needed @@ -3137,7 +3123,9 @@ store_parameter(src, 4); NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) - address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); + address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); + + address copyfunc_addr = StubRoutines::generic_arraycopy(); // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint #ifdef _LP64 @@ -3155,11 +3143,29 @@ // Allocate abi space for args but be sure to keep stack aligned __ subptr(rsp, 6*wordSize); store_parameter(j_rarg4, 4); - __ call(RuntimeAddress(entry)); + if (copyfunc_addr == NULL) { // Use C version if stub was not generated + __ call(RuntimeAddress(C_entry)); + } else { +#ifndef PRODUCT + if (PrintC1Statistics) { + __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); + } +#endif + __ call(RuntimeAddress(copyfunc_addr)); + } __ addptr(rsp, 6*wordSize); #else __ mov(c_rarg4, j_rarg4); - __ call(RuntimeAddress(entry)); + if (copyfunc_addr == NULL) { // Use C version if stub was not generated + __ call(RuntimeAddress(C_entry)); + } else { +#ifndef PRODUCT + if (PrintC1Statistics) { + __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); + } +#endif + __ call(RuntimeAddress(copyfunc_addr)); + } #endif // _WIN64 #else __ push(length); @@ -3167,13 +3173,28 @@ __ push(dst); __ push(src_pos); __ push(src); - __ call_VM_leaf(entry, 5); // removes pushed parameter from the stack + + if (copyfunc_addr == NULL) { // Use C version if stub was not generated + __ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack + } else { +#ifndef PRODUCT + if (PrintC1Statistics) { + __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); + } +#endif + __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack + } #endif // _LP64 __ cmpl(rax, 0); __ jcc(Assembler::equal, *stub->continuation()); + if (copyfunc_addr != NULL) { + __ mov(tmp, rax); + __ xorl(tmp, -1); + } + // Reload values from the stack so they are where the stub // expects them. __ movptr (dst, Address(rsp, 0*BytesPerWord)); @@ -3181,6 +3202,12 @@ __ movptr (length, Address(rsp, 2*BytesPerWord)); __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); __ movptr (src, Address(rsp, 4*BytesPerWord)); + + if (copyfunc_addr != NULL) { + __ subl(length, tmp); + __ addl(src_pos, tmp); + __ addl(dst_pos, tmp); + } __ jmp(*stub->entry()); __ bind(*stub->continuation()); @@ -3240,10 +3267,6 @@ __ testl(dst_pos, dst_pos); __ jcc(Assembler::less, *stub->entry()); } - if (flags & LIR_OpArrayCopy::length_positive_check) { - __ testl(length, length); - __ jcc(Assembler::less, *stub->entry()); - } if (flags & LIR_OpArrayCopy::src_range_check) { __ lea(tmp, Address(src_pos, length, Address::times_1, 0)); @@ -3256,15 +3279,190 @@ __ jcc(Assembler::above, *stub->entry()); } + if (flags & LIR_OpArrayCopy::length_positive_check) { + __ testl(length, length); + __ jcc(Assembler::less, *stub->entry()); + __ jcc(Assembler::zero, *stub->continuation()); + } + +#ifdef _LP64 + __ movl2ptr(src_pos, src_pos); //higher 32bits must be null + __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null +#endif + if (flags & LIR_OpArrayCopy::type_check) { - if (UseCompressedOops) { - __ movl(tmp, src_klass_addr); - __ cmpl(tmp, dst_klass_addr); + // We don't know the array types are compatible + if (basic_type != T_OBJECT) { + // Simple test for basic type arrays + if (UseCompressedOops) { + __ movl(tmp, src_klass_addr); + __ cmpl(tmp, dst_klass_addr); + } else { + __ movptr(tmp, src_klass_addr); + __ cmpptr(tmp, dst_klass_addr); + } + __ jcc(Assembler::notEqual, *stub->entry()); } else { - __ movptr(tmp, src_klass_addr); - __ cmpptr(tmp, dst_klass_addr); + // For object arrays, if src is a sub class of dst then we can + // safely do the copy. + Label cont, slow; + + __ push(src); + __ push(dst); + + __ load_klass(src, src); + __ load_klass(dst, dst); + + __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); + + __ push(src); + __ push(dst); + __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id))); + __ pop(dst); + __ pop(src); + + __ cmpl(src, 0); + __ jcc(Assembler::notEqual, cont); + + __ bind(slow); + __ pop(dst); + __ pop(src); + + address copyfunc_addr = StubRoutines::checkcast_arraycopy(); + if (copyfunc_addr != NULL) { // use stub if available + // src is not a sub class of dst so we have to do a + // per-element check. + + int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; + if ((flags & mask) != mask) { + // Check that at least both of them object arrays. + assert(flags & mask, "one of the two should be known to be an object array"); + + if (!(flags & LIR_OpArrayCopy::src_objarray)) { + __ load_klass(tmp, src); + } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { + __ load_klass(tmp, dst); + } + int lh_offset = klassOopDesc::header_size() * HeapWordSize + + Klass::layout_helper_offset_in_bytes(); + Address klass_lh_addr(tmp, lh_offset); + jint objArray_lh = Klass::array_layout_helper(T_OBJECT); + __ cmpl(klass_lh_addr, objArray_lh); + __ jcc(Assembler::notEqual, *stub->entry()); + } + +#ifndef _LP64 + // save caller save registers + store_parameter(rax, 2); + store_parameter(rcx, 1); + store_parameter(rdx, 0); + + __ movptr(tmp, dst_klass_addr); + __ movptr(tmp, Address(tmp, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); + __ push(tmp); + __ movl(tmp, Address(tmp, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc))); + __ push(tmp); + __ push(length); + __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); + __ push(tmp); + __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); + __ push(tmp); + + __ call_VM_leaf(copyfunc_addr, 5); +#else + __ movl2ptr(length, length); //higher 32bits must be null + + // save caller save registers: copy them to callee save registers + __ mov(rbx, rdx); + __ mov(r13, r8); + __ mov(r14, r9); +#ifndef _WIN64 + store_parameter(rsi, 1); + store_parameter(rcx, 0); + // on WIN64 other incoming parameters are in rdi and rsi saved + // across the call +#endif + + __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); + assert_different_registers(c_rarg0, dst, dst_pos, length); + __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); + assert_different_registers(c_rarg1, dst, length); + + __ mov(c_rarg2, length); + assert_different_registers(c_rarg2, dst); + +#ifdef _WIN64 + // Allocate abi space for args but be sure to keep stack aligned + __ subptr(rsp, 6*wordSize); + __ load_klass(c_rarg3, dst); + __ movptr(c_rarg3, Address(c_rarg3, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); + store_parameter(c_rarg3, 4); + __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc))); + __ call(RuntimeAddress(copyfunc_addr)); + __ addptr(rsp, 6*wordSize); +#else + __ load_klass(c_rarg4, dst); + __ movptr(c_rarg4, Address(c_rarg4, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); + __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc))); + __ call(RuntimeAddress(copyfunc_addr)); +#endif + +#endif + +#ifndef PRODUCT + if (PrintC1Statistics) { + Label failed; + __ testl(rax, rax); + __ jcc(Assembler::notZero, failed); + __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt)); + __ bind(failed); + } +#endif + + __ testl(rax, rax); + __ jcc(Assembler::zero, *stub->continuation()); + +#ifndef PRODUCT + if (PrintC1Statistics) { + __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt)); + } +#endif + + __ mov(tmp, rax); + + __ xorl(tmp, -1); + +#ifndef _LP64 + // restore caller save registers + assert_different_registers(tmp, rdx, rcx, rax); // result of stub will be lost + __ movptr(rdx, Address(rsp, 0*BytesPerWord)); + __ movptr(rcx, Address(rsp, 1*BytesPerWord)); + __ movptr(rax, Address(rsp, 2*BytesPerWord)); +#else + // restore caller save registers + __ mov(rdx, rbx); + __ mov(r8, r13); + __ mov(r9, r14); +#ifndef _WIN64 + assert_different_registers(tmp, rdx, r8, r9, rcx, rsi); // result of stub will be lost + __ movptr(rcx, Address(rsp, 0*BytesPerWord)); + __ movptr(rsi, Address(rsp, 1*BytesPerWord)); +#else + assert_different_registers(tmp, rdx, r8, r9); // result of stub will be lost +#endif +#endif + + __ subl(length, tmp); + __ addl(src_pos, tmp); + __ addl(dst_pos, tmp); + } + + __ jmp(*stub->entry()); + + __ bind(cont); + __ pop(dst); + __ pop(src); } - __ jcc(Assembler::notEqual, *stub->entry()); } #ifdef ASSERT @@ -3305,16 +3503,16 @@ } #endif - if (shift_amount > 0 && basic_type != T_OBJECT) { - __ shlptr(length, shift_amount); +#ifndef PRODUCT + if (PrintC1Statistics) { + __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type))); } +#endif #ifdef _LP64 assert_different_registers(c_rarg0, dst, dst_pos, length); - __ movl2ptr(src_pos, src_pos); //higher 32bits must be null __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); assert_different_registers(c_rarg1, length); - __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type))); __ mov(c_rarg2, length); @@ -3325,11 +3523,12 @@ store_parameter(tmp, 1); store_parameter(length, 2); #endif // _LP64 - if (basic_type == T_OBJECT) { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy), 0); - } else { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy), 0); - } + + bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; + bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; + const char *name; + address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); + __ call_VM_leaf(entry, 0); __ bind(*stub->continuation()); }
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "asm/assembler.hpp" #include "c1/c1_Defs.hpp" #include "c1/c1_MacroAssembler.hpp" #include "c1/c1_Runtime1.hpp" @@ -248,11 +249,14 @@ #ifdef _LP64 align_dummy_0, align_dummy_1, #endif // _LP64 - dummy1, SLOT2(dummy1H) // 0, 4 - dummy2, SLOT2(dummy2H) // 8, 12 - // Two temps to be used as needed by users of save/restore callee registers - temp_2_off, SLOT2(temp_2H_off) // 16, 20 - temp_1_off, SLOT2(temp_1H_off) // 24, 28 +#ifdef _WIN64 + // Windows always allocates space for it's argument registers (see + // frame::arg_reg_save_area_bytes). + arg_reg_save_1, arg_reg_save_1H, // 0, 4 + arg_reg_save_2, arg_reg_save_2H, // 8, 12 + arg_reg_save_3, arg_reg_save_3H, // 16, 20 + arg_reg_save_4, arg_reg_save_4H, // 24, 28 +#endif // _WIN64 xmm_regs_as_doubles_off, // 32 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 @@ -282,24 +286,7 @@ rax_off, SLOT2(raxH_off) // 480, 484 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 return_off, SLOT2(returnH_off) // 496, 500 - reg_save_frame_size, // As noted: neglects any parameters to runtime // 504 - -#ifdef _WIN64 - c_rarg0_off = rcx_off, -#else - c_rarg0_off = rdi_off, -#endif // WIN64 - - // equates - - // illegal instruction handler - continue_dest_off = temp_1_off, - - // deoptimization equates - fp0_off = float_regs_as_doubles_off, // slot for java float/double return value - xmm0_off = xmm_regs_as_doubles_off, // slot for java float/double return value - deopt_type = temp_2_off, // slot for type of deopt in progress - ret_type = temp_1_off // slot for return type + reg_save_frame_size // As noted: neglects any parameters to runtime // 504 }; @@ -405,11 +392,6 @@ bool save_fpu_registers = true) { __ block_comment("save_live_registers"); - // 64bit passes the args in regs to the c++ runtime - int frame_size_in_slots = reg_save_frame_size NOT_LP64(+ num_rt_args); // args + thread - // frame_size = round_to(frame_size, 4); - sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word ); - __ pusha(); // integer registers // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); @@ -662,19 +644,58 @@ } -void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool save_fpu_registers) { +OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { + __ block_comment("generate_handle_exception"); + // incoming parameters const Register exception_oop = rax; - const Register exception_pc = rdx; + const Register exception_pc = rdx; // other registers used in this stub - const Register real_return_addr = rbx; const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); - __ block_comment("generate_handle_exception"); + // Save registers, if required. + OopMapSet* oop_maps = new OopMapSet(); + OopMap* oop_map = NULL; + switch (id) { + case forward_exception_id: + // We're handling an exception in the context of a compiled frame. + // The registers have been saved in the standard places. Perform + // an exception lookup in the caller and dispatch to the handler + // if found. Otherwise unwind and dispatch to the callers + // exception handler. + oop_map = generate_oop_map(sasm, 1 /*thread*/); + + // load and clear pending exception oop into RAX + __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); + __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); + + // load issuing PC (the return address for this stub) into rdx + __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); + + // make sure that the vm_results are cleared (may be unnecessary) + __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); + __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); + break; + case handle_exception_nofpu_id: + case handle_exception_id: + // At this point all registers MAY be live. + oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id); + break; + case handle_exception_from_callee_id: { + // At this point all registers except exception oop (RAX) and + // exception pc (RDX) are dead. + const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); + oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); + sasm->set_frame_size(frame_size); + WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); + break; + } + default: ShouldNotReachHere(); + } #ifdef TIERED // C2 can leave the fpu stack dirty - if (UseSSE < 2 ) { + if (UseSSE < 2) { __ empty_FPU_stack(); } #endif // TIERED @@ -706,11 +727,7 @@ // save exception oop and issuing pc into JavaThread // (exception handler will load it from here) __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); - __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); - - // save real return address (pc that called this stub) - __ movptr(real_return_addr, Address(rbp, 1*BytesPerWord)); - __ movptr(Address(rsp, temp_1_off * VMRegImpl::stack_slot_size), real_return_addr); + __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); // patch throwing pc into return address (has bci & oop map) __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); @@ -720,33 +737,41 @@ int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); oop_maps->add_gc_map(call_offset, oop_map); - // rax,: handler address + // rax: handler address // will be the deopt blob if nmethod was deoptimized while we looked up // handler regardless of whether handler existed in the nmethod. // only rax, is valid at this time, all other registers have been destroyed by the runtime call __ invalidate_registers(false, true, true, true, true, true); -#ifdef ASSERT - // Do we have an exception handler in the nmethod? - Label done; - __ testptr(rax, rax); - __ jcc(Assembler::notZero, done); - __ stop("no handler found"); - __ bind(done); -#endif - - // exception handler found - // patch the return address -> the stub will directly return to the exception handler + // patch the return address, this stub will directly return to the exception handler __ movptr(Address(rbp, 1*BytesPerWord), rax); - // restore registers - restore_live_registers(sasm, save_fpu_registers); + switch (id) { + case forward_exception_id: + case handle_exception_nofpu_id: + case handle_exception_id: + // Restore the registers that were saved at the beginning. + restore_live_registers(sasm, id == handle_exception_nofpu_id); + break; + case handle_exception_from_callee_id: + // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP + // since we do a leave anyway. - // return to exception handler - __ leave(); - __ ret(0); + // Pop the return address since we are possibly changing SP (restoring from BP). + __ leave(); + __ pop(rcx); + // Restore SP from BP if the exception PC is a method handle call site. + NOT_LP64(__ get_thread(thread);) + __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); + __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); + __ jmp(rcx); // jump to exception handler + break; + default: ShouldNotReachHere(); + } + + return oop_maps; } void Runtime1::c1x_generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map) { @@ -879,7 +904,7 @@ // the pop is also necessary to simulate the effect of a ret(0) __ pop(exception_pc); - // Restore SP from BP if the exception PC is a MethodHandle call site. + // Restore SP from BP if the exception PC is a method handle call site. NOT_LP64(__ get_thread(thread);) __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); @@ -1022,7 +1047,6 @@ __ ret(0); return oop_maps; - } JRT_ENTRY(void, c1x_create_null_exception(JavaThread* thread)) @@ -1046,35 +1070,9 @@ switch (id) { case forward_exception_id: { - // we're handling an exception in the context of a compiled - // frame. The registers have been saved in the standard - // places. Perform an exception lookup in the caller and - // dispatch to the handler if found. Otherwise unwind and - // dispatch to the callers exception handler. - - const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); - const Register exception_oop = rax; - const Register exception_pc = rdx; - - // load pending exception oop into rax, - __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); - // clear pending exception - __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); - - // load issuing PC (the return address for this stub) into rdx - __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); - - // make sure that the vm_results are cleared (may be unnecessary) - __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); - __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); - - // verify that that there is really a valid exception in rax, - __ verify_not_null_oop(exception_oop); - - oop_maps = new OopMapSet(); - OopMap* oop_map = generate_oop_map(sasm, 1); - generate_handle_exception(sasm, oop_maps, oop_map); - __ stop("should not reach here"); + oop_maps = generate_handle_exception(id, sasm); + __ leave(); + __ ret(0); } break; @@ -1409,13 +1407,15 @@ break; case handle_exception_nofpu_id: - save_fpu_registers = false; - // fall through case handle_exception_id: { StubFrame f(sasm, "handle_exception", dont_gc_arguments); - oop_maps = new OopMapSet(); - OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers); - generate_handle_exception(sasm, oop_maps, oop_map, save_fpu_registers); + oop_maps = generate_handle_exception(id, sasm); + } + break; + + case handle_exception_from_callee_id: + { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); + oop_maps = generate_handle_exception(id, sasm); } break;
--- a/src/cpu/x86/vm/frame_x86.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/frame_x86.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -125,7 +125,7 @@ // Entry frames #ifdef AMD64 #ifdef _WIN64 - entry_frame_after_call_words = 8, + entry_frame_after_call_words = 28, entry_frame_call_wrapper_offset = 2, arg_reg_save_area_bytes = 32, // Register argument save area
--- a/src/cpu/x86/vm/globals_x86.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/globals_x86.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,4 +72,6 @@ define_pd_global(bool, UseMembar, false); +// GC Ergo Flags +define_pd_global(intx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread #endif // CPU_X86_VM_GLOBALS_X86_HPP
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -215,7 +215,7 @@ if (index_size == sizeof(u2)) { load_unsigned_short(reg, Address(rsi, bcp_offset)); } else if (index_size == sizeof(u4)) { - assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic"); + assert(EnableInvokeDynamic, "giant index used only for JSR 292"); movl(reg, Address(rsi, bcp_offset)); // Check if the secondary index definition is still ~x, otherwise // we have to change the following assembler code to calculate the @@ -223,7 +223,7 @@ assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); notl(reg); // convert to plain index } else if (index_size == sizeof(u1)) { - assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles"); + assert(EnableInvokeDynamic, "tiny index used only for JSR 292"); load_unsigned_byte(reg, Address(rsi, bcp_offset)); } else { ShouldNotReachHere();
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -213,7 +213,7 @@ if (index_size == sizeof(u2)) { load_unsigned_short(index, Address(r13, bcp_offset)); } else if (index_size == sizeof(u4)) { - assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic"); + assert(EnableInvokeDynamic, "giant index used only for JSR 292"); movl(index, Address(r13, bcp_offset)); // Check if the secondary index definition is still ~x, otherwise // we have to change the following assembler code to calculate the @@ -221,7 +221,7 @@ assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); notl(index); // convert to plain index } else if (index_size == sizeof(u1)) { - assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles"); + assert(EnableInvokeDynamic, "tiny index used only for JSR 292"); load_unsigned_byte(index, Address(r13, bcp_offset)); } else { ShouldNotReachHere();
--- a/src/cpu/x86/vm/interpreter_x86_32.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/interpreter_x86_32.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -231,9 +231,9 @@ // Method handle invoker -// Dispatch a method of the form java.dyn.MethodHandles::invoke(...) +// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...) address InterpreterGenerator::generate_method_handle_entry(void) { - if (!EnableMethodHandles) { + if (!EnableInvokeDynamic) { return generate_abstract_entry(); }
--- a/src/cpu/x86/vm/interpreter_x86_64.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/interpreter_x86_64.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -318,9 +318,9 @@ // Method handle invoker -// Dispatch a method of the form java.dyn.MethodHandles::invoke(...) +// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...) address InterpreterGenerator::generate_method_handle_entry(void) { - if (!EnableMethodHandles) { + if (!EnableInvokeDynamic) { return generate_abstract_entry(); }
--- a/src/cpu/x86/vm/jni_x86.h Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/jni_x86.h Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/methodHandles_x86.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/methodHandles_x86.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -125,9 +125,9 @@ } // given the MethodType, find out where the MH argument is buried - __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp))); + __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); Register rdx_vmslots = rdx_temp; - __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp))); + __ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp))); __ movptr(rcx_recv, __ argument_address(rdx_vmslots)); trace_method_handle(_masm, "invokeExact"); @@ -154,11 +154,11 @@ rcx_argslot, rbx_temp, rdx_temp); // load up an adapter from the calling type (Java weaves this) - __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp))); + __ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp))); Register rdx_adapter = rdx_temp; - // __ load_heap_oop(rdx_adapter, Address(rdx_temp, java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes())); + // __ load_heap_oop(rdx_adapter, Address(rdx_temp, java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes())); // deal with old JDK versions: - __ lea(rdi_temp, Address(rdx_temp, __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp))); + __ lea(rdi_temp, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp))); __ cmpptr(rdi_temp, rdx_temp); Label sorry_no_invoke_generic; __ jcc(Assembler::below, sorry_no_invoke_generic); @@ -371,16 +371,16 @@ // which conversion op types are implemented here? int MethodHandles::adapter_conversion_ops_supported_mask() { - return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY) - |(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW) - |(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST) - |(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM) - |(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM) - |(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS) - |(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS) - |(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS) - |(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS) - //|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG! + return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS) + //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG! ); // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS. } @@ -415,20 +415,21 @@ const Register rarg2_required = LP64_ONLY(j_rarg2) NOT_LP64(rdi); assert_different_registers(rarg0_code, rarg1_actual, rarg2_required, saved_last_sp); - guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); + guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); // some handy addresses Address rbx_method_fie( rbx, methodOopDesc::from_interpreted_offset() ); + Address rbx_method_fce( rbx, methodOopDesc::from_compiled_offset() ); - Address rcx_mh_vmtarget( rcx_recv, java_dyn_MethodHandle::vmtarget_offset_in_bytes() ); - Address rcx_dmh_vmindex( rcx_recv, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes() ); + Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() ); + Address rcx_dmh_vmindex( rcx_recv, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes() ); - Address rcx_bmh_vmargslot( rcx_recv, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes() ); - Address rcx_bmh_argument( rcx_recv, sun_dyn_BoundMethodHandle::argument_offset_in_bytes() ); + Address rcx_bmh_vmargslot( rcx_recv, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes() ); + Address rcx_bmh_argument( rcx_recv, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes() ); - Address rcx_amh_vmargslot( rcx_recv, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes() ); - Address rcx_amh_argument( rcx_recv, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes() ); - Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() ); + Address rcx_amh_vmargslot( rcx_recv, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes() ); + Address rcx_amh_argument( rcx_recv, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes() ); + Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() ); Address vmarg; // __ argument_address(vmargslot) const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); @@ -448,12 +449,10 @@ case _raise_exception: { // Not a real MH entry, but rather shared code for raising an - // exception. Since we use a C2I adapter to set up the - // interpreter state, arguments are expected in compiler - // argument registers. + // exception. Since we use the compiled entry, arguments are + // expected in compiler argument registers. assert(raise_exception_method(), "must be set"); - address c2i_entry = raise_exception_method()->get_c2i_entry(); - assert(c2i_entry, "method must be linked"); + assert(raise_exception_method()->from_compiled_entry(), "method must be linked"); const Register rdi_pc = rax; __ pop(rdi_pc); // caller PC @@ -461,7 +460,7 @@ Register rbx_method = rbx_temp; Label L_no_method; - // FIXME: fill in _raise_exception_method with a suitable sun.dyn method + // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method)); __ testptr(rbx_method, rbx_method); __ jccb(Assembler::zero, L_no_method); @@ -472,13 +471,10 @@ __ jccb(Assembler::zero, L_no_method); __ verify_oop(rbx_method); - // 32-bit: push remaining arguments as if coming from the compiler. NOT_LP64(__ push(rarg2_required)); + __ push(rdi_pc); // restore caller PC + __ jmp(rbx_method_fce); // jump to compiled entry - __ push(rdi_pc); // restore caller PC - __ jump(ExternalAddress(c2i_entry)); // do C2I transition - - // If we get here, the Java runtime did not do its job of creating the exception. // Do something that is at least causes a valid throw from the interpreter. __ bind(L_no_method); __ push(rarg2_required);
--- a/src/cpu/x86/vm/nativeInst_x86.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/nativeInst_x86.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -519,7 +519,11 @@ class NativeTstRegMem: public NativeInstruction { public: enum Intel_specific_constants { - instruction_code_memXregl = 0x85 + instruction_rex_prefix_mask = 0xF0, + instruction_rex_prefix = Assembler::REX, + instruction_code_memXregl = 0x85, + modrm_mask = 0x38, // select reg from the ModRM byte + modrm_reg = 0x00 // rax }; }; @@ -533,12 +537,25 @@ (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } inline bool NativeInstruction::is_safepoint_poll() { #ifdef AMD64 - if ( ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && - ubyte_at(1) == 0x05 ) { // 00 rax 101 - address fault = addr_at(6) + int_at(2); - return os::is_poll_address(fault); + if (Assembler::is_polling_page_far()) { + // two cases, depending on the choice of the base register in the address. + if (((ubyte_at(0) & NativeTstRegMem::instruction_rex_prefix_mask) == NativeTstRegMem::instruction_rex_prefix && + ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl && + (ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) || + ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && + (ubyte_at(1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) { + return true; + } else { + return false; + } } else { - return false; + if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && + ubyte_at(1) == 0x05) { // 00 rax 101 + address fault = addr_at(6) + int_at(2); + return os::is_poll_address(fault); + } else { + return false; + } } #else return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg ||
--- a/src/cpu/x86/vm/relocInfo_x86.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/relocInfo_x86.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ #include "runtime/safepoint.hpp" -void Relocation::pd_set_data_value(address x, intptr_t o) { +void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { #ifdef AMD64 x += o; typedef Assembler::WhichOperand WhichOperand; @@ -40,19 +40,35 @@ which == Assembler::narrow_oop_operand || which == Assembler::imm_operand, "format unpacks ok"); if (which == Assembler::imm_operand) { - *pd_address_in_code() = x; + if (verify_only) { + assert(*pd_address_in_code() == x, "instructions must match"); + } else { + *pd_address_in_code() = x; + } } else if (which == Assembler::narrow_oop_operand) { address disp = Assembler::locate_operand(addr(), which); - *(int32_t*) disp = oopDesc::encode_heap_oop((oop)x); + if (verify_only) { + assert(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match"); + } else { + *(int32_t*) disp = oopDesc::encode_heap_oop((oop)x); + } } else { // Note: Use runtime_call_type relocations for call32_operand. address ip = addr(); address disp = Assembler::locate_operand(ip, which); address next_ip = Assembler::locate_next_instruction(ip); - *(int32_t*) disp = x - next_ip; + if (verify_only) { + assert(*(int32_t*) disp == (x - next_ip), "instructions must match"); + } else { + *(int32_t*) disp = x - next_ip; + } } #else - *pd_address_in_code() = x + o; + if (verify_only) { + assert(*pd_address_in_code() == (x + o), "instructions must match"); + } else { + *pd_address_in_code() = x + o; + } #endif // AMD64 } @@ -182,41 +198,44 @@ void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { #ifdef _LP64 - typedef Assembler::WhichOperand WhichOperand; - WhichOperand which = (WhichOperand) format(); - // This format is imm but it is really disp32 - which = Assembler::disp32_operand; - address orig_addr = old_addr_for(addr(), src, dest); - NativeInstruction* oni = nativeInstruction_at(orig_addr); - int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which); - // This poll_addr is incorrect by the size of the instruction it is irrelevant - intptr_t poll_addr = (intptr_t)oni + *orig_disp; + if (!Assembler::is_polling_page_far()) { + typedef Assembler::WhichOperand WhichOperand; + WhichOperand which = (WhichOperand) format(); + // This format is imm but it is really disp32 + which = Assembler::disp32_operand; + address orig_addr = old_addr_for(addr(), src, dest); + NativeInstruction* oni = nativeInstruction_at(orig_addr); + int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which); + // This poll_addr is incorrect by the size of the instruction it is irrelevant + intptr_t poll_addr = (intptr_t)oni + *orig_disp; - NativeInstruction* ni = nativeInstruction_at(addr()); - intptr_t new_disp = poll_addr - (intptr_t) ni; + NativeInstruction* ni = nativeInstruction_at(addr()); + intptr_t new_disp = poll_addr - (intptr_t) ni; - int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which); - * disp = (int32_t)new_disp; - + int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which); + * disp = (int32_t)new_disp; + } #endif // _LP64 } void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { #ifdef _LP64 - typedef Assembler::WhichOperand WhichOperand; - WhichOperand which = (WhichOperand) format(); - // This format is imm but it is really disp32 - which = Assembler::disp32_operand; - address orig_addr = old_addr_for(addr(), src, dest); - NativeInstruction* oni = nativeInstruction_at(orig_addr); - int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which); - // This poll_addr is incorrect by the size of the instruction it is irrelevant - intptr_t poll_addr = (intptr_t)oni + *orig_disp; + if (!Assembler::is_polling_page_far()) { + typedef Assembler::WhichOperand WhichOperand; + WhichOperand which = (WhichOperand) format(); + // This format is imm but it is really disp32 + which = Assembler::disp32_operand; + address orig_addr = old_addr_for(addr(), src, dest); + NativeInstruction* oni = nativeInstruction_at(orig_addr); + int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which); + // This poll_addr is incorrect by the size of the instruction it is irrelevant + intptr_t poll_addr = (intptr_t)oni + *orig_disp; - NativeInstruction* ni = nativeInstruction_at(addr()); - intptr_t new_disp = poll_addr - (intptr_t) ni; + NativeInstruction* ni = nativeInstruction_at(addr()); + intptr_t new_disp = poll_addr - (intptr_t) ni; - int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which); - * disp = (int32_t)new_disp; + int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which); + * disp = (int32_t)new_disp; + } #endif // _LP64 }
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1111,6 +1111,7 @@ // returns. nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, methodHandle method, + int compile_id, int total_in_args, int comp_args_on_stack, BasicType *in_sig_bt, @@ -1854,6 +1855,7 @@ __ flush(); nmethod *nm = nmethod::new_native_nmethod(method, + compile_id, masm->code(), vep_offset, frame_complete,
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1174,6 +1174,7 @@ // returns. nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, methodHandle method, + int compile_id, int total_in_args, int comp_args_on_stack, BasicType *in_sig_bt, @@ -1881,6 +1882,7 @@ __ flush(); nmethod *nm = nmethod::new_native_nmethod(method, + compile_id, masm->code(), vep_offset, frame_complete,
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -439,10 +439,6 @@ // Verify that there is really a valid exception in RAX. __ verify_oop(exception_oop); - // Restore SP from BP if the exception PC is a MethodHandle call site. - __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); - __ cmovptr(Assembler::notEqual, rsp, rbp); - // continue at exception handler (return address removed) // rax: exception // rbx: exception handler @@ -733,18 +729,19 @@ // Input: // start - starting address // count - element count - void gen_write_ref_array_pre_barrier(Register start, Register count) { + void gen_write_ref_array_pre_barrier(Register start, Register count, bool uninitialized_target) { assert_different_registers(start, count); BarrierSet* bs = Universe::heap()->barrier_set(); switch (bs->kind()) { case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: - { - __ pusha(); // push registers - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), - start, count); - __ popa(); - } + // With G1, don't generate the call if we statically know that the target in uninitialized + if (!uninitialized_target) { + __ pusha(); // push registers + __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), + start, count); + __ popa(); + } break; case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: @@ -923,7 +920,8 @@ address generate_disjoint_copy(BasicType t, bool aligned, Address::ScaleFactor sf, - address* entry, const char *name) { + address* entry, const char *name, + bool dest_uninitialized = false) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -945,16 +943,19 @@ __ movptr(from , Address(rsp, 12+ 4)); __ movptr(to , Address(rsp, 12+ 8)); __ movl(count, Address(rsp, 12+ 12)); + + if (entry != NULL) { + *entry = __ pc(); // Entry point from conjoint arraycopy stub. + BLOCK_COMMENT("Entry:"); + } + if (t == T_OBJECT) { __ testl(count, count); __ jcc(Assembler::zero, L_0_count); - gen_write_ref_array_pre_barrier(to, count); + gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); __ mov(saved_to, to); // save 'to' } - *entry = __ pc(); // Entry point from conjoint arraycopy stub. - BLOCK_COMMENT("Entry:"); - __ subptr(to, from); // to --> to_from __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp @@ -1085,7 +1086,8 @@ address generate_conjoint_copy(BasicType t, bool aligned, Address::ScaleFactor sf, address nooverlap_target, - address* entry, const char *name) { + address* entry, const char *name, + bool dest_uninitialized = false) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -1108,23 +1110,17 @@ __ movptr(src , Address(rsp, 12+ 4)); // from __ movptr(dst , Address(rsp, 12+ 8)); // to __ movl2ptr(count, Address(rsp, 12+12)); // count - if (t == T_OBJECT) { - gen_write_ref_array_pre_barrier(dst, count); - } if (entry != NULL) { *entry = __ pc(); // Entry point from generic arraycopy stub. BLOCK_COMMENT("Entry:"); } - if (t == T_OBJECT) { - __ testl(count, count); - __ jcc(Assembler::zero, L_0_count); - } + // nooverlap_target expects arguments in rsi and rdi. __ mov(from, src); __ mov(to , dst); - // arrays overlap test + // arrays overlap test: dispatch to disjoint stub if necessary. RuntimeAddress nooverlap(nooverlap_target); __ cmpptr(dst, src); __ lea(end, Address(src, count, sf, 0)); // src + count * elem_size @@ -1132,6 +1128,12 @@ __ cmpptr(dst, end); __ jump_cc(Assembler::aboveEqual, nooverlap); + if (t == T_OBJECT) { + __ testl(count, count); + __ jcc(Assembler::zero, L_0_count); + gen_write_ref_array_pre_barrier(dst, count, dest_uninitialized); + } + // copy from high to low __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp @@ -1416,7 +1418,7 @@ // rax, == 0 - success // rax, == -1^K - failure, where K is partial transfer count // - address generate_checkcast_copy(const char *name, address* entry) { + address generate_checkcast_copy(const char *name, address* entry, bool dest_uninitialized = false) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -1451,8 +1453,10 @@ __ movptr(to, to_arg); __ movl2ptr(length, length_arg); - *entry = __ pc(); // Entry point from generic arraycopy stub. - BLOCK_COMMENT("Entry:"); + if (entry != NULL) { + *entry = __ pc(); // Entry point from generic arraycopy stub. + BLOCK_COMMENT("Entry:"); + } //--------------------------------------------------------------- // Assembler stub will be used for this call to arraycopy @@ -1475,7 +1479,7 @@ Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes()); // Copy from low to high addresses, indexed from the end of each array. - gen_write_ref_array_pre_barrier(to, count); + gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); __ lea(end_from, end_from_addr); __ lea(end_to, end_to_addr); assert(length == count, ""); // else fix next line: @@ -2038,6 +2042,15 @@ generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, &entry_oop_arraycopy, "oop_arraycopy"); + StubRoutines::_oop_disjoint_arraycopy_uninit = + generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, + "oop_disjoint_arraycopy_uninit", + /*dest_uninitialized*/true); + StubRoutines::_oop_arraycopy_uninit = + generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, + NULL, "oop_arraycopy_uninit", + /*dest_uninitialized*/true); + StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy"); StubRoutines::_jlong_arraycopy = @@ -2051,20 +2064,20 @@ StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); - StubRoutines::_arrayof_jint_disjoint_arraycopy = - StubRoutines::_jint_disjoint_arraycopy; - StubRoutines::_arrayof_oop_disjoint_arraycopy = - StubRoutines::_oop_disjoint_arraycopy; - StubRoutines::_arrayof_jlong_disjoint_arraycopy = - StubRoutines::_jlong_disjoint_arraycopy; + StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; + StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; + StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; + StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; - StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; - StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; - StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; + StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; + StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; + StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; + StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; StubRoutines::_checkcast_arraycopy = - generate_checkcast_copy("checkcast_arraycopy", - &entry_checkcast_arraycopy); + generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); + StubRoutines::_checkcast_arraycopy_uninit = + generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, /*dest_uninitialized*/true); StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -144,8 +144,11 @@ // [ return_from_Java ] <--- rsp // [ argument word n ] // ... - // -8 [ argument word 1 ] - // -7 [ saved r15 ] <--- rsp_after_call + // -28 [ argument word 1 ] + // -27 [ saved xmm15 ] <--- rsp_after_call + // [ saved xmm7-xmm14 ] + // -9 [ saved xmm6 ] (each xmm register takes 2 slots) + // -7 [ saved r15 ] // -6 [ saved r14 ] // -5 [ saved r13 ] // -4 [ saved r12 ] @@ -169,8 +172,11 @@ // Call stub stack layout word offsets from rbp enum call_stub_layout { #ifdef _WIN64 - rsp_after_call_off = -7, - r15_off = rsp_after_call_off, + xmm_save_first = 6, // save from xmm6 + xmm_save_last = 15, // to xmm15 + xmm_save_base = -9, + rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 + r15_off = -7, r14_off = -6, r13_off = -5, r12_off = -4, @@ -208,6 +214,13 @@ #endif }; +#ifdef _WIN64 + Address xmm_save(int reg) { + assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); + return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); + } +#endif + address generate_call_stub(address& return_address) { assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, @@ -256,8 +269,11 @@ __ movptr(r13_save, r13); __ movptr(r14_save, r14); __ movptr(r15_save, r15); - #ifdef _WIN64 + for (int i = 6; i <= 15; i++) { + __ movdqu(xmm_save(i), as_XMMRegister(i)); + } + const Address rdi_save(rbp, rdi_off * wordSize); const Address rsi_save(rbp, rsi_off * wordSize); @@ -360,6 +376,11 @@ #endif // restore regs belonging to calling function +#ifdef _WIN64 + for (int i = 15; i >= 6; i--) { + __ movdqu(as_XMMRegister(i), xmm_save(i)); + } +#endif __ movptr(r15, r15_save); __ movptr(r14, r14_save); __ movptr(r13, r13_save); @@ -1057,20 +1078,6 @@ return start; } - static address disjoint_byte_copy_entry; - static address disjoint_short_copy_entry; - static address disjoint_int_copy_entry; - static address disjoint_long_copy_entry; - static address disjoint_oop_copy_entry; - - static address byte_copy_entry; - static address short_copy_entry; - static address int_copy_entry; - static address long_copy_entry; - static address oop_copy_entry; - - static address checkcast_copy_entry; - // // Verify that a register contains clean 32-bits positive value // (high 32-bits are 0) so it could be used in 64-bits shifts. @@ -1173,34 +1180,35 @@ // Generate code for an array write pre barrier // // addr - starting address - // count - element count + // count - element count + // tmp - scratch register // // Destroy no registers! // - void gen_write_ref_array_pre_barrier(Register addr, Register count) { + void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { BarrierSet* bs = Universe::heap()->barrier_set(); switch (bs->kind()) { case BarrierSet::G1SATBCT: case BarrierSet::G1SATBCTLogging: - { - __ pusha(); // push registers - if (count == c_rarg0) { - if (addr == c_rarg1) { - // exactly backwards!! - __ xchgptr(c_rarg1, c_rarg0); - } else { - __ movptr(c_rarg1, count); - __ movptr(c_rarg0, addr); - } - - } else { - __ movptr(c_rarg0, addr); - __ movptr(c_rarg1, count); - } - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); - __ popa(); + // With G1, don't generate the call if we statically know that the target in uninitialized + if (!dest_uninitialized) { + __ pusha(); // push registers + if (count == c_rarg0) { + if (addr == c_rarg1) { + // exactly backwards!! + __ xchgptr(c_rarg1, c_rarg0); + } else { + __ movptr(c_rarg1, count); + __ movptr(c_rarg0, addr); + } + } else { + __ movptr(c_rarg0, addr); + __ movptr(c_rarg1, count); + } + __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); + __ popa(); } - break; + break; case BarrierSet::CardTableModRef: case BarrierSet::CardTableExtension: case BarrierSet::ModRef: @@ -1379,7 +1387,7 @@ // disjoint_byte_copy_entry is set to the no-overlap entry point // used by generate_conjoint_byte_copy(). // - address generate_disjoint_byte_copy(bool aligned, const char *name) { + address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -1399,9 +1407,11 @@ __ enter(); // required for proper stackwalking of RuntimeStub frame assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. - disjoint_byte_copy_entry = __ pc(); - BLOCK_COMMENT("Entry:"); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); + } setup_arg_regs(); // from => rdi, to => rsi, count => rdx // r9 and r10 may be used to save non-volatile registers @@ -1479,7 +1489,8 @@ // dwords or qwords that span cache line boundaries will still be loaded // and stored atomically. // - address generate_conjoint_byte_copy(bool aligned, const char *name) { + address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, + address* entry, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -1494,11 +1505,13 @@ __ enter(); // required for proper stackwalking of RuntimeStub frame assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. - byte_copy_entry = __ pc(); - BLOCK_COMMENT("Entry:"); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - - array_overlap_test(disjoint_byte_copy_entry, Address::times_1); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); + } + + array_overlap_test(nooverlap_target, Address::times_1); setup_arg_regs(); // from => rdi, to => rsi, count => rdx // r9 and r10 may be used to save non-volatile registers @@ -1574,7 +1587,7 @@ // disjoint_short_copy_entry is set to the no-overlap entry point // used by generate_conjoint_short_copy(). // - address generate_disjoint_short_copy(bool aligned, const char *name) { + address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -1593,9 +1606,11 @@ __ enter(); // required for proper stackwalking of RuntimeStub frame assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. - disjoint_short_copy_entry = __ pc(); - BLOCK_COMMENT("Entry:"); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); + } setup_arg_regs(); // from => rdi, to => rsi, count => rdx // r9 and r10 may be used to save non-volatile registers @@ -1686,7 +1701,8 @@ // or qwords that span cache line boundaries will still be loaded // and stored atomically. // - address generate_conjoint_short_copy(bool aligned, const char *name) { + address generate_conjoint_short_copy(bool aligned, address nooverlap_target, + address *entry, const char *name) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -1701,11 +1717,13 @@ __ enter(); // required for proper stackwalking of RuntimeStub frame assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. - short_copy_entry = __ pc(); - BLOCK_COMMENT("Entry:"); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - - array_overlap_test(disjoint_short_copy_entry, Address::times_2); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); + } + + array_overlap_test(nooverlap_target, Address::times_2); setup_arg_regs(); // from => rdi, to => rsi, count => rdx // r9 and r10 may be used to save non-volatile registers @@ -1773,7 +1791,8 @@ // disjoint_int_copy_entry is set to the no-overlap entry point // used by generate_conjoint_int_oop_copy(). // - address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) { + address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, + const char *name, bool dest_uninitialized = false) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -1793,21 +1812,17 @@ __ enter(); // required for proper stackwalking of RuntimeStub frame assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. - (is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry) = __ pc(); - - if (is_oop) { - // no registers are destroyed by this call - gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); } - BLOCK_COMMENT("Entry:"); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - setup_arg_regs(); // from => rdi, to => rsi, count => rdx // r9 and r10 may be used to save non-volatile registers - if (is_oop) { __ movq(saved_to, to); + gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); } // 'from', 'to' and 'count' are now valid @@ -1867,7 +1882,9 @@ // the hardware handle it. The two dwords within qwords that span // cache line boundaries will still be loaded and stored atomicly. // - address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) { + address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, + address *entry, const char *name, + bool dest_uninitialized = false) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -1882,20 +1899,21 @@ __ enter(); // required for proper stackwalking of RuntimeStub frame assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); + } + + array_overlap_test(nooverlap_target, Address::times_4); + setup_arg_regs(); // from => rdi, to => rsi, count => rdx + // r9 and r10 may be used to save non-volatile registers + if (is_oop) { // no registers are destroyed by this call - gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); + gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); } - (is_oop ? oop_copy_entry : int_copy_entry) = __ pc(); - BLOCK_COMMENT("Entry:"); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - - array_overlap_test(is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry, - Address::times_4); - setup_arg_regs(); // from => rdi, to => rsi, count => rdx - // r9 and r10 may be used to save non-volatile registers - assert_clean_int(count, rax); // Make sure 'count' is clean int. // 'from', 'to' and 'count' are now valid __ movptr(dword_count, count); @@ -1959,7 +1977,8 @@ // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the // no-overlap entry point used by generate_conjoint_long_oop_copy(). // - address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) { + address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, + const char *name, bool dest_uninitialized = false) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -1978,20 +1997,19 @@ // Save no-overlap entry point for generate_conjoint_long_oop_copy() assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. - if (is_oop) { - disjoint_oop_copy_entry = __ pc(); - // no registers are destroyed by this call - gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); - } else { - disjoint_long_copy_entry = __ pc(); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); } - BLOCK_COMMENT("Entry:"); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) setup_arg_regs(); // from => rdi, to => rsi, count => rdx // r9 and r10 may be used to save non-volatile registers - // 'from', 'to' and 'qword_count' are now valid + if (is_oop) { + // no registers are destroyed by this call + gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); + } // Copy from low to high addresses. Use 'to' as scratch. __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); @@ -2045,7 +2063,9 @@ // c_rarg1 - destination array address // c_rarg2 - element count, treated as ssize_t, can be zero // - address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) { + address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, + address nooverlap_target, address *entry, + const char *name, bool dest_uninitialized = false) { __ align(CodeEntryAlignment); StubCodeMark mark(this, "StubRoutines", name); address start = __ pc(); @@ -2059,31 +2079,21 @@ __ enter(); // required for proper stackwalking of RuntimeStub frame assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. - address disjoint_copy_entry = NULL; - if (is_oop) { - assert(!UseCompressedOops, "shouldn't be called for compressed oops"); - disjoint_copy_entry = disjoint_oop_copy_entry; - oop_copy_entry = __ pc(); - array_overlap_test(disjoint_oop_copy_entry, Address::times_8); - } else { - disjoint_copy_entry = disjoint_long_copy_entry; - long_copy_entry = __ pc(); - array_overlap_test(disjoint_long_copy_entry, Address::times_8); + if (entry != NULL) { + *entry = __ pc(); + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) + BLOCK_COMMENT("Entry:"); } - BLOCK_COMMENT("Entry:"); - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) - - array_overlap_test(disjoint_copy_entry, Address::times_8); + + array_overlap_test(nooverlap_target, Address::times_8); setup_arg_regs(); // from => rdi, to => rsi, count => rdx // r9 and r10 may be used to save non-volatile registers - // 'from', 'to' and 'qword_count' are now valid - if (is_oop) { // Save to and count for store barrier __ movptr(saved_count, qword_count); // No registers are destroyed by this call - gen_write_ref_array_pre_barrier(to, saved_count); + gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); } __ jmp(L_copy_32_bytes); @@ -2162,7 +2172,8 @@ // rax == 0 - success // rax == -1^K - failure, where K is partial transfer count // - address generate_checkcast_copy(const char *name) { + address generate_checkcast_copy(const char *name, address *entry, + bool dest_uninitialized = false) { Label L_load_element, L_store_element, L_do_card_marks, L_done; @@ -2216,8 +2227,10 @@ #endif // Caller of this entry point must set up the argument registers. - checkcast_copy_entry = __ pc(); - BLOCK_COMMENT("Entry:"); + if (entry != NULL) { + *entry = __ pc(); + BLOCK_COMMENT("Entry:"); + } // allocate spill slots for r13, r14 enum { @@ -2254,7 +2267,7 @@ Address from_element_addr(end_from, count, TIMES_OOP, 0); Address to_element_addr(end_to, count, TIMES_OOP, 0); - gen_write_ref_array_pre_barrier(to, count); + gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); // Copy from low to high addresses, indexed from the end of each array. __ lea(end_from, end_from_addr); @@ -2334,7 +2347,9 @@ // Examines the alignment of the operands and dispatches // to a long, int, short, or byte copy loop. // - address generate_unsafe_copy(const char *name) { + address generate_unsafe_copy(const char *name, + address byte_copy_entry, address short_copy_entry, + address int_copy_entry, address long_copy_entry) { Label L_long_aligned, L_int_aligned, L_short_aligned; @@ -2432,7 +2447,10 @@ // rax == 0 - success // rax == -1^K - failure, where K is partial transfer count // - address generate_generic_copy(const char *name) { + address generate_generic_copy(const char *name, + address byte_copy_entry, address short_copy_entry, + address int_copy_entry, address oop_copy_entry, + address long_copy_entry, address checkcast_copy_entry) { Label L_failed, L_failed_0, L_objArray; Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; @@ -2725,33 +2743,75 @@ } void generate_arraycopy_stubs() { - // Call the conjoint generation methods immediately after - // the disjoint ones so that short branches from the former - // to the latter can be generated. - StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy"); - StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy"); - - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); - StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); - - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, "jint_disjoint_arraycopy"); - StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, "jint_arraycopy"); - - StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy"); - StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy"); + address entry; + address entry_jbyte_arraycopy; + address entry_jshort_arraycopy; + address entry_jint_arraycopy; + address entry_oop_arraycopy; + address entry_jlong_arraycopy; + address entry_checkcast_arraycopy; + + StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, + "jbyte_disjoint_arraycopy"); + StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, + "jbyte_arraycopy"); + + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, + "jshort_disjoint_arraycopy"); + StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, + "jshort_arraycopy"); + + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, + "jint_disjoint_arraycopy"); + StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, + &entry_jint_arraycopy, "jint_arraycopy"); + + StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, + "jlong_disjoint_arraycopy"); + StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, + &entry_jlong_arraycopy, "jlong_arraycopy"); if (UseCompressedOops) { - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, "oop_disjoint_arraycopy"); - StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, "oop_arraycopy"); + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, + "oop_disjoint_arraycopy"); + StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, + &entry_oop_arraycopy, "oop_arraycopy"); + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, + "oop_disjoint_arraycopy_uninit", + /*dest_uninitialized*/true); + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, + NULL, "oop_arraycopy_uninit", + /*dest_uninitialized*/true); } else { - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy"); - StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy"); + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, + "oop_disjoint_arraycopy"); + StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, + &entry_oop_arraycopy, "oop_arraycopy"); + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, + "oop_disjoint_arraycopy_uninit", + /*dest_uninitialized*/true); + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, + NULL, "oop_arraycopy_uninit", + /*dest_uninitialized*/true); } - StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy"); - StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy"); - StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy"); + StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, + /*dest_uninitialized*/true); + + StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", + entry_jbyte_arraycopy, + entry_jshort_arraycopy, + entry_jint_arraycopy, + entry_jlong_arraycopy); + StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", + entry_jbyte_arraycopy, + entry_jshort_arraycopy, + entry_jint_arraycopy, + entry_oop_arraycopy, + entry_jlong_arraycopy, + entry_checkcast_arraycopy); StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); @@ -2776,6 +2836,9 @@ StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; + + StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; + StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; } void generate_math_stubs() { @@ -3069,20 +3132,6 @@ } }; // end class declaration -address StubGenerator::disjoint_byte_copy_entry = NULL; -address StubGenerator::disjoint_short_copy_entry = NULL; -address StubGenerator::disjoint_int_copy_entry = NULL; -address StubGenerator::disjoint_long_copy_entry = NULL; -address StubGenerator::disjoint_oop_copy_entry = NULL; - -address StubGenerator::byte_copy_entry = NULL; -address StubGenerator::short_copy_entry = NULL; -address StubGenerator::int_copy_entry = NULL; -address StubGenerator::long_copy_entry = NULL; -address StubGenerator::oop_copy_entry = NULL; - -address StubGenerator::checkcast_copy_entry = NULL; - void StubGenerator_generate(CodeBuffer* code, bool all) { StubGenerator g(code, all); }
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1527,7 +1527,7 @@ if (interpreter_frame != NULL) { #ifdef ASSERT - if (!EnableMethodHandles) + if (!EnableInvokeDynamic) // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences? // Probably, since deoptimization doesn't work yet. assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1541,7 +1541,7 @@ tempcount* Interpreter::stackElementWords + popframe_extra_args; if (interpreter_frame != NULL) { #ifdef ASSERT - if (!EnableMethodHandles) + if (!EnableInvokeDynamic) // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences? // Probably, since deoptimization doesn't work yet. assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "asm/assembler.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "interpreter/templateTable.hpp" @@ -391,8 +392,8 @@ void TemplateTable::fast_aldc(bool wide) { transition(vtos, atos); - if (!EnableMethodHandles) { - // We should not encounter this bytecode if !EnableMethodHandles. + if (!EnableInvokeDynamic) { + // We should not encounter this bytecode if !EnableInvokeDynamic. // The verifier will stop it. However, if we get past the verifier, // this will stop the thread in a reasonable way, without crashing the JVM. __ call_VM(noreg, CAST_FROM_FN_PTR(address, @@ -1939,18 +1940,10 @@ __ movl(temp, Address(array, h, Address::times_8, 0*wordSize)); __ bswapl(temp); __ cmpl(key, temp); - if (VM_Version::supports_cmov()) { - __ cmovl(Assembler::less , j, h); // j = h if (key < array[h].fast_match()) - __ cmovl(Assembler::greaterEqual, i, h); // i = h if (key >= array[h].fast_match()) - } else { - Label set_i, end_of_if; - __ jccb(Assembler::greaterEqual, set_i); // { - __ mov(j, h); // j = h; - __ jmp(end_of_if); // } - __ bind(set_i); // else { - __ mov(i, h); // i = h; - __ bind(end_of_if); // } - } + // j = h if (key < array[h].fast_match()) + __ cmov32(Assembler::less , j, h); + // i = h if (key >= array[h].fast_match()) + __ cmov32(Assembler::greaterEqual, i, h); // while (i+1 < j) __ bind(entry); __ leal(h, Address(i, 1)); // i+1 @@ -3110,7 +3103,7 @@ __ profile_call(rsi); } - __ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx))); + __ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx))); __ null_check(rcx_method_handle); __ prepare_to_jump_from_interpreted(); __ jump_to_method_handle_entry(rcx_method_handle, rdx); @@ -3478,22 +3471,14 @@ // find a free slot in the monitor block (result in rdx) { Label entry, loop, exit; - __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry - __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block + __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry + + __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block __ jmpb(entry); __ bind(loop); __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used - -// TODO - need new func here - kbt - if (VM_Version::supports_cmov()) { - __ cmov(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx - } else { - Label L; - __ jccb(Assembler::notEqual, L); - __ mov(rdx, rcx); // if not used then remember entry in rdx - __ bind(L); - } + __ cmovptr(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object __ jccb(Assembler::equal, exit); // if same object then stop searching __ addptr(rcx, entry_size); // otherwise advance to next entry
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -405,8 +405,8 @@ void TemplateTable::fast_aldc(bool wide) { transition(vtos, atos); - if (!EnableMethodHandles) { - // We should not encounter this bytecode if !EnableMethodHandles. + if (!EnableInvokeDynamic) { + // We should not encounter this bytecode if !EnableInvokeDynamic. // The verifier will stop it. However, if we get past the verifier, // this will stop the thread in a reasonable way, without crashing the JVM. __ call_VM(noreg, CAST_FROM_FN_PTR(address, @@ -3145,7 +3145,7 @@ __ profile_call(r13); } - __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx))); + __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx))); __ null_check(rcx_method_handle); __ prepare_to_jump_from_interpreted(); __ jump_to_method_handle_entry(rcx_method_handle, rdx);
--- a/src/cpu/x86/vm/vm_version_x86.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/vm_version_x86.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All Rights Reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -348,7 +348,7 @@ } char buf[256]; - jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", cores_per_cpu(), threads_per_core(), cpu_family(), _model, _stepping, (supports_cmov() ? ", cmov" : ""), @@ -363,8 +363,7 @@ (supports_sse4_2() ? ", sse4.2" : ""), (supports_popcnt() ? ", popcnt" : ""), (supports_mmx_ext() ? ", mmxext" : ""), - (supports_3dnow() ? ", 3dnow" : ""), - (supports_3dnow2() ? ", 3dnowext" : ""), + (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), (supports_lzcnt() ? ", lzcnt": ""), (supports_sse4a() ? ", sse4a": ""), (supports_ht() ? ", ht": "")); @@ -429,6 +428,11 @@ UseXmmI2D = false; } } + if( FLAG_IS_DEFAULT(UseSSE42Intrinsics) ) { + if( supports_sse4_2() && UseSSE >= 4 ) { + UseSSE42Intrinsics = true; + } + } // Use count leading zeros count instruction if available. if (supports_lzcnt()) { @@ -436,6 +440,13 @@ UseCountLeadingZerosInstruction = true; } } + + // On family 21 processors default is no sw prefetch + if ( cpu_family() == 21 ) { + if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { + AllocatePrefetchStyle = 0; + } + } } if( is_intel() ) { // Intel cpus specific settings @@ -510,13 +521,13 @@ // set valid Prefetch instruction if( ReadPrefetchInstr < 0 ) ReadPrefetchInstr = 0; if( ReadPrefetchInstr > 3 ) ReadPrefetchInstr = 3; - if( ReadPrefetchInstr == 3 && !supports_3dnow() ) ReadPrefetchInstr = 0; - if( !supports_sse() && supports_3dnow() ) ReadPrefetchInstr = 3; + if( ReadPrefetchInstr == 3 && !supports_3dnow_prefetch() ) ReadPrefetchInstr = 0; + if( !supports_sse() && supports_3dnow_prefetch() ) ReadPrefetchInstr = 3; if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0; if( AllocatePrefetchInstr > 3 ) AllocatePrefetchInstr = 3; - if( AllocatePrefetchInstr == 3 && !supports_3dnow() ) AllocatePrefetchInstr=0; - if( !supports_sse() && supports_3dnow() ) AllocatePrefetchInstr = 3; + if( AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch() ) AllocatePrefetchInstr=0; + if( !supports_sse() && supports_3dnow_prefetch() ) AllocatePrefetchInstr = 3; // Allocation prefetch settings intx cache_line_size = L1_data_cache_line_size(); @@ -564,10 +575,10 @@ logical_processors_per_package()); tty->print_cr("UseSSE=%d",UseSSE); tty->print("Allocation: "); - if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow()) { + if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow_prefetch()) { tty->print_cr("no prefetching"); } else { - if (UseSSE == 0 && supports_3dnow()) { + if (UseSSE == 0 && supports_3dnow_prefetch()) { tty->print("PREFETCHW"); } else if (UseSSE >= 1) { if (AllocatePrefetchInstr == 0) {
--- a/src/cpu/x86/vm/vm_version_x86.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/vm_version_x86.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -188,7 +188,8 @@ CPU_FXSR = (1 << 2), CPU_HT = (1 << 3), CPU_MMX = (1 << 4), - CPU_3DNOW = (1 << 5), // 3DNow comes from cpuid 0x80000001 (EDX) + CPU_3DNOW_PREFETCH = (1 << 5), // Processor supports 3dnow prefetch and prefetchw instructions + // may not necessarily support other 3dnow instructions CPU_SSE = (1 << 6), CPU_SSE2 = (1 << 7), CPU_SSE3 = (1 << 8), // SSE3 comes from cpuid 1 (ECX) @@ -328,8 +329,9 @@ // AMD features. if (is_amd()) { - if (_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) - result |= CPU_3DNOW; + if ((_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) || + (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0)) + result |= CPU_3DNOW_PREFETCH; if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0) result |= CPU_LZCNT; if (_cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0) @@ -446,9 +448,8 @@ // // AMD features // - static bool supports_3dnow() { return (_cpuFeatures & CPU_3DNOW) != 0; } + static bool supports_3dnow_prefetch() { return (_cpuFeatures & CPU_3DNOW_PREFETCH) != 0; } static bool supports_mmx_ext() { return is_amd() && _cpuid_info.ext_cpuid1_edx.bits.mmx_amd != 0; } - static bool supports_3dnow2() { return is_amd() && _cpuid_info.ext_cpuid1_edx.bits.tdnow2 != 0; } static bool supports_lzcnt() { return (_cpuFeatures & CPU_LZCNT) != 0; } static bool supports_sse4a() { return (_cpuFeatures & CPU_SSE4A) != 0; }
--- a/src/cpu/x86/vm/x86_32.ad Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/x86_32.ad Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ // -// Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -1393,6 +1393,10 @@ // registers? True for Intel but false for most RISCs const bool Matcher::clone_shift_expressions = true; +// Do we need to mask the count passed to shift instructions or does +// the cpu only look at the lower 5/6 bits anyway? +const bool Matcher::need_masked_shift_count = false; + bool Matcher::narrow_oop_use_complex_address() { ShouldNotCallThis(); return true; @@ -3419,7 +3423,7 @@ masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2] // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes - if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) { + if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { // prefetchw [eax + Offset(_owner)-2] masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2)); } @@ -3463,7 +3467,7 @@ masm.movptr(boxReg, tmpReg) ; // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes - if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) { + if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { // prefetchw [eax + Offset(_owner)-2] masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2)); } @@ -3610,7 +3614,7 @@ // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html. masm.get_thread (boxReg) ; - if ((EmitSync & 4096) && VM_Version::supports_3dnow() && os::is_MP()) { + if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { // prefetchw [ebx + Offset(_owner)-2] masm.prefetchw(Address(rbx, ObjectMonitor::owner_offset_in_bytes()-2)); } @@ -7329,7 +7333,7 @@ // Must be safe to execute with invalid address (cannot fault). instruct prefetchr0( memory mem ) %{ - predicate(UseSSE==0 && !VM_Version::supports_3dnow()); + predicate(UseSSE==0 && !VM_Version::supports_3dnow_prefetch()); match(PrefetchRead mem); ins_cost(0); size(0); @@ -7339,7 +7343,7 @@ %} instruct prefetchr( memory mem ) %{ - predicate(UseSSE==0 && VM_Version::supports_3dnow() || ReadPrefetchInstr==3); + predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch() || ReadPrefetchInstr==3); match(PrefetchRead mem); ins_cost(100); @@ -7383,7 +7387,7 @@ %} instruct prefetchw0( memory mem ) %{ - predicate(UseSSE==0 && !VM_Version::supports_3dnow()); + predicate(UseSSE==0 && !VM_Version::supports_3dnow_prefetch()); match(PrefetchWrite mem); ins_cost(0); size(0); @@ -7393,7 +7397,7 @@ %} instruct prefetchw( memory mem ) %{ - predicate(UseSSE==0 && VM_Version::supports_3dnow() || AllocatePrefetchInstr==3); + predicate(UseSSE==0 && VM_Version::supports_3dnow_prefetch() || AllocatePrefetchInstr==3); match( PrefetchWrite mem ); ins_cost(100); @@ -12658,17 +12662,46 @@ ins_pipe( pipe_slow ); %} +// fast search of substring with known size. +instruct string_indexof_con(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_cnt2, + eBXRegI result, regXD vec, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{ + predicate(UseSSE42Intrinsics); + match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); + effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr); + + format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %} + ins_encode %{ + int icnt2 = (int)$int_cnt2$$constant; + if (icnt2 >= 8) { + // IndexOf for constant substrings with size >= 8 elements + // which don't need to be loaded through stack. + __ string_indexofC8($str1$$Register, $str2$$Register, + $cnt1$$Register, $cnt2$$Register, + icnt2, $result$$Register, + $vec$$XMMRegister, $tmp$$Register); + } else { + // Small strings are loaded through stack if they cross page boundary. + __ string_indexof($str1$$Register, $str2$$Register, + $cnt1$$Register, $cnt2$$Register, + icnt2, $result$$Register, + $vec$$XMMRegister, $tmp$$Register); + } + %} + ins_pipe( pipe_slow ); +%} + instruct string_indexof(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, eAXRegI cnt2, - eBXRegI result, regXD tmp1, eCXRegI tmp2, eFlagsReg cr) %{ + eBXRegI result, regXD vec, eCXRegI tmp, eFlagsReg cr) %{ predicate(UseSSE42Intrinsics); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); - effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp2, KILL cr); - - format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp2, $tmp1" %} + effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr); + + format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %} ins_encode %{ __ string_indexof($str1$$Register, $str2$$Register, - $cnt1$$Register, $cnt2$$Register, $result$$Register, - $tmp1$$XMMRegister, $tmp2$$Register); + $cnt1$$Register, $cnt2$$Register, + (-1), $result$$Register, + $vec$$XMMRegister, $tmp$$Register); %} ins_pipe( pipe_slow ); %}
--- a/src/cpu/x86/vm/x86_64.ad Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/x86/vm/x86_64.ad Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ // -// Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -574,12 +574,11 @@ // In os_cpu .ad file // int MachCallRuntimeNode::ret_addr_offset() -// Indicate if the safepoint node needs the polling page as an input. -// Since amd64 does not have absolute addressing but RIP-relative -// addressing and the polling page is within 2G, it doesn't. +// Indicate if the safepoint node needs the polling page as an input, +// it does if the polling page is more than disp32 away. bool SafePointNode::needs_polling_address_input() { - return false; + return Assembler::is_polling_page_far(); } // @@ -992,15 +991,21 @@ framesize -= 2*wordSize; if (framesize) { - st->print_cr("addq\trsp, %d\t# Destroy frame", framesize); + st->print_cr("addq rsp, %d\t# Destroy frame", framesize); st->print("\t"); } - st->print_cr("popq\trbp"); + st->print_cr("popq rbp"); if (do_polling() && C->is_method_compilation()) { - st->print_cr("\ttestl\trax, [rip + #offset_to_poll_page]\t" - "# Safepoint: poll for GC"); st->print("\t"); + if (Assembler::is_polling_page_far()) { + st->print_cr("movq rscratch1, #polling_page_address\n\t" + "testl rax, [rscratch1]\t" + "# Safepoint: poll for GC"); + } else { + st->print_cr("testl rax, [rip + #offset_to_poll_page]\t" + "# Safepoint: poll for GC"); + } } } #endif @@ -1033,45 +1038,22 @@ emit_opcode(cbuf, 0x58 | RBP_enc); if (do_polling() && C->is_method_compilation()) { - // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes - // XXX reg_mem doesn't support RIP-relative addressing yet - cbuf.set_insts_mark(); - cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_return_type, 0); // XXX - emit_opcode(cbuf, 0x85); // testl - emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5 - // cbuf.insts_mark() is beginning of instruction - emit_d32_reloc(cbuf, os::get_polling_page()); -// relocInfo::poll_return_type, + MacroAssembler _masm(&cbuf); + AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type); + if (Assembler::is_polling_page_far()) { + __ lea(rscratch1, polling_page); + __ relocate(relocInfo::poll_return_type); + __ testl(rax, Address(rscratch1, 0)); + } else { + __ testl(rax, polling_page); + } } } uint MachEpilogNode::size(PhaseRegAlloc* ra_) const { - Compile* C = ra_->C; - int framesize = C->frame_slots() << LogBytesPerInt; - assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); - // Remove word for return adr already pushed - // and RBP - framesize -= 2*wordSize; - - uint size = 0; - - if (do_polling() && C->is_method_compilation()) { - size += 6; - } - - // count popq rbp - size++; - - if (framesize) { - if (framesize < 0x80) { - size += 4; - } else if (framesize) { - size += 7; - } - } - - return size; + return MachNode::size(ra_); // too many variables; just compute it + // the hard way } int MachEpilogNode::reloc() const @@ -2000,6 +1982,10 @@ // into registers? True for Intel but false for most RISCs const bool Matcher::clone_shift_expressions = true; +// Do we need to mask the count passed to shift instructions or does +// the cpu only look at the lower 5/6 bits anyway? +const bool Matcher::need_masked_shift_count = false; + bool Matcher::narrow_oop_use_complex_address() { assert(UseCompressedOops, "only for compressed oops code"); return (LogMinObjAlignmentInBytes <= 3); @@ -3406,8 +3392,8 @@ } if (EmitSync & 1) { // Without cast to int32_t a movptr will destroy r10 which is typically obj - masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ; - masm.cmpptr(rsp, (int32_t)NULL_WORD) ; + masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ; + masm.cmpptr(rsp, (int32_t)NULL_WORD) ; } else if (EmitSync & 2) { Label DONE_LABEL; @@ -3435,10 +3421,10 @@ } else { Label DONE_LABEL, IsInflated, Egress; - masm.movptr(tmpReg, Address(objReg, 0)) ; + masm.movptr(tmpReg, Address(objReg, 0)) ; masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased - masm.jcc (Assembler::notZero, IsInflated) ; - + masm.jcc (Assembler::notZero, IsInflated) ; + // it's stack-locked, biased or neutral // TODO: optimize markword triage order to reduce the number of // conditional branches in the most common cases. @@ -3452,9 +3438,9 @@ } // was q will it destroy high? - masm.orl (tmpReg, 1) ; - masm.movptr(Address(boxReg, 0), tmpReg) ; - if (os::is_MP()) { masm.lock(); } + masm.orl (tmpReg, 1) ; + masm.movptr(Address(boxReg, 0), tmpReg) ; + if (os::is_MP()) { masm.lock(); } masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg if (_counters != NULL) { masm.cond_inc32(Assembler::equal, @@ -3481,16 +3467,16 @@ // fetched _owner. If the CAS is successful we may // avoid an RTO->RTS upgrade on the $line. // Without cast to int32_t a movptr will destroy r10 which is typically obj - masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ; - - masm.mov (boxReg, tmpReg) ; - masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; - masm.testptr(tmpReg, tmpReg) ; - masm.jcc (Assembler::notZero, DONE_LABEL) ; + masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ; + + masm.mov (boxReg, tmpReg) ; + masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; + masm.testptr(tmpReg, tmpReg) ; + masm.jcc (Assembler::notZero, DONE_LABEL) ; // It's inflated and appears unlocked - if (os::is_MP()) { masm.lock(); } - masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; + if (os::is_MP()) { masm.lock(); } + masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // Intentional fall-through into DONE_LABEL ... masm.bind (DONE_LABEL) ; @@ -3509,8 +3495,8 @@ Register tmpReg = as_Register($tmp$$reg); MacroAssembler masm(&cbuf); - if (EmitSync & 4) { - masm.cmpptr(rsp, 0) ; + if (EmitSync & 4) { + masm.cmpptr(rsp, 0) ; } else if (EmitSync & 8) { Label DONE_LABEL; @@ -3537,25 +3523,25 @@ if (UseBiasedLocking && !UseOptoBiasInlining) { masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL); } - - masm.movptr(tmpReg, Address(objReg, 0)) ; - masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ; - masm.jcc (Assembler::zero, DONE_LABEL) ; - masm.testl (tmpReg, 0x02) ; - masm.jcc (Assembler::zero, Stacked) ; - + + masm.movptr(tmpReg, Address(objReg, 0)) ; + masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ; + masm.jcc (Assembler::zero, DONE_LABEL) ; + masm.testl (tmpReg, 0x02) ; + masm.jcc (Assembler::zero, Stacked) ; + // It's inflated - masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; - masm.xorptr(boxReg, r15_thread) ; - masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; - masm.jcc (Assembler::notZero, DONE_LABEL) ; - masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; - masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; - masm.jcc (Assembler::notZero, CheckSucc) ; - masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ; - masm.jmp (DONE_LABEL) ; - - if ((EmitSync & 65536) == 0) { + masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; + masm.xorptr(boxReg, r15_thread) ; + masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; + masm.jcc (Assembler::notZero, DONE_LABEL) ; + masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; + masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; + masm.jcc (Assembler::notZero, CheckSucc) ; + masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ; + masm.jmp (DONE_LABEL) ; + + if ((EmitSync & 65536) == 0) { Label LSuccess, LGoSlowPath ; masm.bind (CheckSucc) ; masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ; @@ -3587,9 +3573,9 @@ masm.jmp (DONE_LABEL) ; } - masm.bind (Stacked) ; + masm.bind (Stacked) ; masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch - if (os::is_MP()) { masm.lock(); } + if (os::is_MP()) { masm.lock(); } masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box if (EmitSync & 65536) { @@ -3910,22 +3896,6 @@ // done: %} - - // Safepoint Poll. This polls the safepoint page, and causes an - // exception if it is not readable. Unfortunately, it kills - // RFLAGS in the process. - enc_class enc_safepoint_poll - %{ - // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes - // XXX reg_mem doesn't support RIP-relative addressing yet - cbuf.set_insts_mark(); - cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_type, 0); // XXX - emit_opcode(cbuf, 0x85); // testl - emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5 - // cbuf.insts_mark() is beginning of instruction - emit_d32_reloc(cbuf, os::get_polling_page()); -// relocInfo::poll_type, - %} %} @@ -4229,6 +4199,15 @@ interface(CONST_INTER); %} +operand immP_poll() %{ + predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page()); + match(ConP); + + // formats are generated automatically for constants and base registers + format %{ %} + interface(CONST_INTER); +%} + // Pointer Immediate operand immN() %{ match(ConN); @@ -4836,7 +4815,7 @@ %} // Double register operands -operand regD() +operand regD() %{ constraint(ALLOC_IN_RC(double_reg)); match(RegD); @@ -6564,6 +6543,16 @@ ins_pipe(ialu_reg); %} +instruct loadConP_poll(rRegP dst, immP_poll src) %{ + match(Set dst src); + format %{ "movq $dst, $src\t!ptr" %} + ins_encode %{ + AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type); + __ lea($dst$$Register, polling_page); + %} + ins_pipe(ialu_reg_fat); +%} + instruct loadConP31(rRegP dst, immP31 src, rFlagsReg cr) %{ match(Set dst src); @@ -7237,11 +7226,11 @@ instruct bytes_reverse_unsigned_short(rRegI dst) %{ match(Set dst (ReverseBytesUS dst)); - format %{ "bswapl $dst\n\t" + format %{ "bswapl $dst\n\t" "shrl $dst,16\n\t" %} ins_encode %{ __ bswapl($dst$$Register); - __ shrl($dst$$Register, 16); + __ shrl($dst$$Register, 16); %} ins_pipe( ialu_reg ); %} @@ -7249,11 +7238,11 @@ instruct bytes_reverse_short(rRegI dst) %{ match(Set dst (ReverseBytesS dst)); - format %{ "bswapl $dst\n\t" + format %{ "bswapl $dst\n\t" "sar $dst,16\n\t" %} ins_encode %{ __ bswapl($dst$$Register); - __ sarl($dst$$Register, 16); + __ sarl($dst$$Register, 16); %} ins_pipe( ialu_reg ); %} @@ -7476,7 +7465,7 @@ effect(KILL cr); ins_cost(400); - format %{ + format %{ $$template if (os::is_MP()) { $$emit$$"lock addl [rsp + #0], 0\t! membar_volatile" @@ -8287,7 +8276,7 @@ rFlagsReg cr) %{ match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval))); - + format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) " "If rax == $heap_top_ptr then store $newval into $heap_top_ptr" %} opcode(0x0F, 0xB1); @@ -9850,9 +9839,9 @@ // Xor Register with Immediate -1 instruct xorI_rReg_im1(rRegI dst, immI_M1 imm) %{ - match(Set dst (XorI dst imm)); - - format %{ "not $dst" %} + match(Set dst (XorI dst imm)); + + format %{ "not $dst" %} ins_encode %{ __ notl($dst$$Register); %} @@ -10093,9 +10082,9 @@ // Xor Register with Immediate -1 instruct xorL_rReg_im1(rRegL dst, immL_M1 imm) %{ - match(Set dst (XorL dst imm)); - - format %{ "notq $dst" %} + match(Set dst (XorL dst imm)); + + format %{ "notq $dst" %} ins_encode %{ __ notq($dst$$Register); %} @@ -11598,18 +11587,48 @@ ins_pipe( pipe_slow ); %} +// fast search of substring with known size. +instruct string_indexof_con(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2, + rbx_RegI result, regD vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr) +%{ + predicate(UseSSE42Intrinsics); + match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); + effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr); + + format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %} + ins_encode %{ + int icnt2 = (int)$int_cnt2$$constant; + if (icnt2 >= 8) { + // IndexOf for constant substrings with size >= 8 elements + // which don't need to be loaded through stack. + __ string_indexofC8($str1$$Register, $str2$$Register, + $cnt1$$Register, $cnt2$$Register, + icnt2, $result$$Register, + $vec$$XMMRegister, $tmp$$Register); + } else { + // Small strings are loaded through stack if they cross page boundary. + __ string_indexof($str1$$Register, $str2$$Register, + $cnt1$$Register, $cnt2$$Register, + icnt2, $result$$Register, + $vec$$XMMRegister, $tmp$$Register); + } + %} + ins_pipe( pipe_slow ); +%} + instruct string_indexof(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2, - rbx_RegI result, regD tmp1, rcx_RegI tmp2, rFlagsReg cr) + rbx_RegI result, regD vec, rcx_RegI tmp, rFlagsReg cr) %{ predicate(UseSSE42Intrinsics); match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); - effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp2, KILL cr); - - format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1, $tmp2" %} + effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr); + + format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %} ins_encode %{ __ string_indexof($str1$$Register, $str2$$Register, - $cnt1$$Register, $cnt2$$Register, $result$$Register, - $tmp1$$XMMRegister, $tmp2$$Register); + $cnt1$$Register, $cnt2$$Register, + (-1), $result$$Register, + $vec$$XMMRegister, $tmp$$Register); %} ins_pipe( pipe_slow ); %} @@ -12439,14 +12458,33 @@ // Safepoint Instructions instruct safePoint_poll(rFlagsReg cr) %{ + predicate(!Assembler::is_polling_page_far()); match(SafePoint); effect(KILL cr); - format %{ "testl rax, [rip + #offset_to_poll_page]\t" + format %{ "testl rax, [rip + #offset_to_poll_page]\t" "# Safepoint: poll for GC" %} - size(6); // Opcode + ModRM + Disp32 == 6 bytes ins_cost(125); - ins_encode(enc_safepoint_poll); + ins_encode %{ + AddressLiteral addr(os::get_polling_page(), relocInfo::poll_type); + __ testl(rax, addr); + %} + ins_pipe(ialu_reg_mem); +%} + +instruct safePoint_poll_far(rFlagsReg cr, rRegP poll) +%{ + predicate(Assembler::is_polling_page_far()); + match(SafePoint poll); + effect(KILL cr, USE poll); + + format %{ "testl rax, [$poll]\t" + "# Safepoint: poll for GC" %} + ins_cost(125); + ins_encode %{ + __ relocate(relocInfo::poll_type); + __ testl(rax, Address($poll$$Register, 0)); + %} ins_pipe(ialu_reg_mem); %}
--- a/src/cpu/zero/vm/bytecodeInterpreter_zero.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/zero/vm/bytecodeInterpreter_zero.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,6 +1,6 @@ /* * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright 2007, 2008 Red Hat, Inc. + * Copyright 2007, 2008, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -150,4 +150,22 @@ #define SET_LOCALS_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = \ ((VMJavaVal64*)(addr))->l) +// VMSlots implementation + +#define VMSLOTS_SLOT(offset) ((intptr_t*)&vmslots[(offset)]) +#define VMSLOTS_ADDR(offset) ((address)vmslots[(offset)]) +#define VMSLOTS_INT(offset) (*((jint*)&vmslots[(offset)])) +#define VMSLOTS_FLOAT(offset) (*((jfloat*)&vmslots[(offset)])) +#define VMSLOTS_OBJECT(offset) ((oop)vmslots[(offset)]) +#define VMSLOTS_DOUBLE(offset) (((VMJavaVal64*)&vmslots[(offset) - 1])->d) +#define VMSLOTS_LONG(offset) (((VMJavaVal64*)&vmslots[(offset) - 1])->l) + +#define SET_VMSLOTS_SLOT(value, offset) (*(intptr_t*)&vmslots[(offset)] = *(intptr_t *)(value)) +#define SET_VMSLOTS_ADDR(value, offset) (*((address *)&vmslots[(offset)]) = (value)) +#define SET_VMSLOTS_INT(value, offset) (*((jint *)&vmslots[(offset)]) = (value)) +#define SET_VMSLOTS_FLOAT(value, offset) (*((jfloat *)&vmslots[(offset)]) = (value)) +#define SET_VMSLOTS_OBJECT(value, offset) (*((oop *)&vmslots[(offset)]) = (value)) +#define SET_VMSLOTS_DOUBLE(value, offset) (((VMJavaVal64*)&vmslots[(offset) - 1])->d = (value)) +#define SET_VMSLOTS_LONG(value, offset) (((VMJavaVal64*)&vmslots[(offset) - 1])->l = (value)) + #endif // CPU_ZERO_VM_BYTECODEINTERPRETER_ZERO_HPP
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,6 +1,6 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,10 +56,13 @@ #define fixup_after_potential_safepoint() \ method = istate->method() -#define CALL_VM_NOCHECK(func) \ +#define CALL_VM_NOCHECK_NOFIX(func) \ thread->set_last_Java_frame(); \ func; \ - thread->reset_last_Java_frame(); \ + thread->reset_last_Java_frame(); + +#define CALL_VM_NOCHECK(func) \ + CALL_VM_NOCHECK_NOFIX(func) \ fixup_after_potential_safepoint() int CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) { @@ -177,6 +180,25 @@ method, istate->osr_entry(), istate->osr_buf(), THREAD); return; } + else if (istate->msg() == BytecodeInterpreter::call_method_handle) { + oop method_handle = istate->callee(); + + // Trim back the stack to put the parameters at the top + stack->set_sp(istate->stack() + 1); + + // Make the call + process_method_handle(method_handle, THREAD); + fixup_after_potential_safepoint(); + + // Convert the result + istate->set_stack(stack->sp() - 1); + + // Restore the stack + stack->set_sp(istate->stack_limit() + 1); + + // Resume the interpreter + istate->set_msg(BytecodeInterpreter::method_resume); + } else { ShouldNotReachHere(); } @@ -281,7 +303,7 @@ if (method->is_static()) { istate->set_oop_temp( - method->constants()->pool_holder()->klass_part()->java_mirror()); + method->constants()->pool_holder()->java_mirror()); mirror = istate->oop_temp_addr(); *(dst++) = &mirror; } @@ -607,6 +629,549 @@ return 0; } +int CppInterpreter::method_handle_entry(methodOop method, + intptr_t UNUSED, TRAPS) { + JavaThread *thread = (JavaThread *) THREAD; + ZeroStack *stack = thread->zero_stack(); + int argument_slots = method->size_of_parameters(); + int result_slots = type2size[result_type_of(method)]; + intptr_t *vmslots = stack->sp(); + intptr_t *unwind_sp = vmslots + argument_slots; + + // Find the MethodType + address p = (address) method; + for (jint* pc = method->method_type_offsets_chain(); (*pc) != -1; pc++) { + p = *(address*)(p + (*pc)); + } + oop method_type = (oop) p; + + // The MethodHandle is in the slot after the arguments + oop form = java_lang_invoke_MethodType::form(method_type); + int num_vmslots = java_lang_invoke_MethodTypeForm::vmslots(form); + assert(argument_slots == num_vmslots + 1, "should be"); + oop method_handle = VMSLOTS_OBJECT(num_vmslots); + + // InvokeGeneric requires some extra shuffling + oop mhtype = java_lang_invoke_MethodHandle::type(method_handle); + bool is_exact = mhtype == method_type; + if (!is_exact) { + if (method->intrinsic_id() == vmIntrinsics::_invokeExact) { + CALL_VM_NOCHECK_NOFIX( + InterpreterRuntime::throw_WrongMethodTypeException( + thread, method_type, mhtype)); + // NB all oops trashed! + assert(HAS_PENDING_EXCEPTION, "should do"); + stack->set_sp(unwind_sp); + return 0; + } + assert(method->intrinsic_id() == vmIntrinsics::_invokeGeneric, "should be"); + + // Load up an adapter from the calling type + // NB the x86 code for this (in methodHandles_x86.cpp, search for + // "genericInvoker") is really really odd. I'm hoping it's trying + // to accomodate odd VM/class library combinations I can ignore. + oop adapter = java_lang_invoke_MethodTypeForm::genericInvoker(form); + if (adapter == NULL) { + CALL_VM_NOCHECK_NOFIX( + InterpreterRuntime::throw_WrongMethodTypeException( + thread, method_type, mhtype)); + // NB all oops trashed! + assert(HAS_PENDING_EXCEPTION, "should do"); + stack->set_sp(unwind_sp); + return 0; + } + + // Adapters are shared among form-families of method-type. The + // type being called is passed as a trusted first argument so that + // the adapter knows the actual types of its arguments and return + // values. + insert_vmslots(num_vmslots + 1, 1, THREAD); + if (HAS_PENDING_EXCEPTION) { + // NB all oops trashed! + stack->set_sp(unwind_sp); + return 0; + } + + vmslots = stack->sp(); + num_vmslots++; + SET_VMSLOTS_OBJECT(method_type, num_vmslots); + + method_handle = adapter; + } + + // Start processing + process_method_handle(method_handle, THREAD); + if (HAS_PENDING_EXCEPTION) + result_slots = 0; + + // If this is an invokeExact then the eventual callee will not + // have unwound the method handle argument so we have to do it. + // If a result is being returned the it will be above the method + // handle argument we're unwinding. + if (is_exact) { + intptr_t result[2]; + for (int i = 0; i < result_slots; i++) + result[i] = stack->pop(); + stack->pop(); + for (int i = result_slots - 1; i >= 0; i--) + stack->push(result[i]); + } + + // Check + assert(stack->sp() == unwind_sp - result_slots, "should be"); + + // No deoptimized frames on the stack + return 0; +} + +void CppInterpreter::process_method_handle(oop method_handle, TRAPS) { + JavaThread *thread = (JavaThread *) THREAD; + ZeroStack *stack = thread->zero_stack(); + intptr_t *vmslots = stack->sp(); + + bool direct_to_method = false; + BasicType src_rtype = T_ILLEGAL; + BasicType dst_rtype = T_ILLEGAL; + + MethodHandleEntry *entry = + java_lang_invoke_MethodHandle::vmentry(method_handle); + MethodHandles::EntryKind entry_kind = + (MethodHandles::EntryKind) (((intptr_t) entry) & 0xffffffff); + + methodOop method = NULL; + switch (entry_kind) { + case MethodHandles::_invokestatic_mh: + direct_to_method = true; + break; + + case MethodHandles::_invokespecial_mh: + case MethodHandles::_invokevirtual_mh: + case MethodHandles::_invokeinterface_mh: + { + oop receiver = + VMSLOTS_OBJECT( + java_lang_invoke_MethodHandle::vmslots(method_handle) - 1); + if (receiver == NULL) { + stack->set_sp(calculate_unwind_sp(stack, method_handle)); + CALL_VM_NOCHECK_NOFIX( + throw_exception( + thread, vmSymbols::java_lang_NullPointerException())); + // NB all oops trashed! + assert(HAS_PENDING_EXCEPTION, "should do"); + return; + } + if (entry_kind != MethodHandles::_invokespecial_mh) { + int index = java_lang_invoke_DirectMethodHandle::vmindex(method_handle); + instanceKlass* rcvrKlass = + (instanceKlass *) receiver->klass()->klass_part(); + if (entry_kind == MethodHandles::_invokevirtual_mh) { + method = (methodOop) rcvrKlass->start_of_vtable()[index]; + } + else { + oop iclass = java_lang_invoke_MethodHandle::vmtarget(method_handle); + itableOffsetEntry* ki = + (itableOffsetEntry *) rcvrKlass->start_of_itable(); + int i, length = rcvrKlass->itable_length(); + for (i = 0; i < length; i++, ki++ ) { + if (ki->interface_klass() == iclass) + break; + } + if (i == length) { + stack->set_sp(calculate_unwind_sp(stack, method_handle)); + CALL_VM_NOCHECK_NOFIX( + throw_exception( + thread, vmSymbols::java_lang_IncompatibleClassChangeError())); + // NB all oops trashed! + assert(HAS_PENDING_EXCEPTION, "should do"); + return; + } + itableMethodEntry* im = ki->first_method_entry(receiver->klass()); + method = im[index].method(); + if (method == NULL) { + stack->set_sp(calculate_unwind_sp(stack, method_handle)); + CALL_VM_NOCHECK_NOFIX( + throw_exception( + thread, vmSymbols::java_lang_AbstractMethodError())); + // NB all oops trashed! + assert(HAS_PENDING_EXCEPTION, "should do"); + return; + } + } + } + } + direct_to_method = true; + break; + + case MethodHandles::_bound_ref_direct_mh: + case MethodHandles::_bound_int_direct_mh: + case MethodHandles::_bound_long_direct_mh: + direct_to_method = true; + // fall through + case MethodHandles::_bound_ref_mh: + case MethodHandles::_bound_int_mh: + case MethodHandles::_bound_long_mh: + { + BasicType arg_type = T_ILLEGAL; + int arg_mask = -1; + int arg_slots = -1; + MethodHandles::get_ek_bound_mh_info( + entry_kind, arg_type, arg_mask, arg_slots); + int arg_slot = + java_lang_invoke_BoundMethodHandle::vmargslot(method_handle); + + // Create the new slot(s) + intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); + insert_vmslots(arg_slot, arg_slots, THREAD); + if (HAS_PENDING_EXCEPTION) { + // all oops trashed + stack->set_sp(unwind_sp); + return; + } + vmslots = stack->sp(); + + // Store bound argument into new stack slot + oop arg = java_lang_invoke_BoundMethodHandle::argument(method_handle); + if (arg_type == T_OBJECT) { + assert(arg_slots == 1, "should be"); + SET_VMSLOTS_OBJECT(arg, arg_slot); + } + else { + jvalue arg_value; + arg_type = java_lang_boxing_object::get_value(arg, &arg_value); + switch (arg_type) { + case T_BOOLEAN: + SET_VMSLOTS_INT(arg_value.z, arg_slot); + break; + case T_CHAR: + SET_VMSLOTS_INT(arg_value.c, arg_slot); + break; + case T_BYTE: + SET_VMSLOTS_INT(arg_value.b, arg_slot); + break; + case T_SHORT: + SET_VMSLOTS_INT(arg_value.s, arg_slot); + break; + case T_INT: + SET_VMSLOTS_INT(arg_value.i, arg_slot); + break; + case T_FLOAT: + SET_VMSLOTS_FLOAT(arg_value.f, arg_slot); + break; + case T_LONG: + SET_VMSLOTS_LONG(arg_value.j, arg_slot + 1); + break; + case T_DOUBLE: + SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot + 1); + break; + default: + tty->print_cr("unhandled type %s", type2name(arg_type)); + ShouldNotReachHere(); + } + } + } + break; + + case MethodHandles::_adapter_retype_only: + case MethodHandles::_adapter_retype_raw: + src_rtype = result_type_of_handle( + java_lang_invoke_MethodHandle::vmtarget(method_handle)); + dst_rtype = result_type_of_handle(method_handle); + break; + + case MethodHandles::_adapter_check_cast: + { + int arg_slot = + java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); + oop arg = VMSLOTS_OBJECT(arg_slot); + if (arg != NULL) { + klassOop objKlassOop = arg->klass(); + klassOop klassOf = java_lang_Class::as_klassOop( + java_lang_invoke_AdapterMethodHandle::argument(method_handle)); + + if (objKlassOop != klassOf && + !objKlassOop->klass_part()->is_subtype_of(klassOf)) { + ResourceMark rm(THREAD); + const char* objName = Klass::cast(objKlassOop)->external_name(); + const char* klassName = Klass::cast(klassOf)->external_name(); + char* message = SharedRuntime::generate_class_cast_message( + objName, klassName); + + stack->set_sp(calculate_unwind_sp(stack, method_handle)); + CALL_VM_NOCHECK_NOFIX( + throw_exception( + thread, vmSymbols::java_lang_ClassCastException(), message)); + // NB all oops trashed! + assert(HAS_PENDING_EXCEPTION, "should do"); + return; + } + } + } + break; + + case MethodHandles::_adapter_dup_args: + { + int arg_slot = + java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); + int conv = + java_lang_invoke_AdapterMethodHandle::conversion(method_handle); + int num_slots = -MethodHandles::adapter_conversion_stack_move(conv); + assert(num_slots > 0, "should be"); + + // Create the new slot(s) + intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); + stack->overflow_check(num_slots, THREAD); + if (HAS_PENDING_EXCEPTION) { + // all oops trashed + stack->set_sp(unwind_sp); + return; + } + + // Duplicate the arguments + for (int i = num_slots - 1; i >= 0; i--) + stack->push(*VMSLOTS_SLOT(arg_slot + i)); + + vmslots = stack->sp(); // unused, but let the compiler figure that out + } + break; + + case MethodHandles::_adapter_drop_args: + { + int arg_slot = + java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); + int conv = + java_lang_invoke_AdapterMethodHandle::conversion(method_handle); + int num_slots = MethodHandles::adapter_conversion_stack_move(conv); + assert(num_slots > 0, "should be"); + + remove_vmslots(arg_slot, num_slots, THREAD); // doesn't trap + vmslots = stack->sp(); // unused, but let the compiler figure that out + } + break; + + case MethodHandles::_adapter_opt_swap_1: + case MethodHandles::_adapter_opt_swap_2: + case MethodHandles::_adapter_opt_rot_1_up: + case MethodHandles::_adapter_opt_rot_1_down: + case MethodHandles::_adapter_opt_rot_2_up: + case MethodHandles::_adapter_opt_rot_2_down: + { + int arg1 = + java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); + int conv = + java_lang_invoke_AdapterMethodHandle::conversion(method_handle); + int arg2 = MethodHandles::adapter_conversion_vminfo(conv); + + int swap_bytes = 0, rotate = 0; + MethodHandles::get_ek_adapter_opt_swap_rot_info( + entry_kind, swap_bytes, rotate); + int swap_slots = swap_bytes >> LogBytesPerWord; + + intptr_t tmp; + switch (rotate) { + case 0: // swap + for (int i = 0; i < swap_slots; i++) { + tmp = *VMSLOTS_SLOT(arg1 + i); + SET_VMSLOTS_SLOT(VMSLOTS_SLOT(arg2 + i), arg1 + i); + SET_VMSLOTS_SLOT(&tmp, arg2 + i); + } + break; + + case 1: // up + assert(arg1 - swap_slots > arg2, "should be"); + + tmp = *VMSLOTS_SLOT(arg1); + for (int i = arg1 - swap_slots; i >= arg2; i--) + SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i + swap_slots); + SET_VMSLOTS_SLOT(&tmp, arg2); + + break; + + case -1: // down + assert(arg2 - swap_slots > arg1, "should be"); + + tmp = *VMSLOTS_SLOT(arg1); + for (int i = arg1 + swap_slots; i <= arg2; i++) + SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i - swap_slots); + SET_VMSLOTS_SLOT(&tmp, arg2); + break; + + default: + ShouldNotReachHere(); + } + } + break; + + case MethodHandles::_adapter_opt_i2l: + { + int arg_slot = + java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); + int arg = VMSLOTS_INT(arg_slot); + intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); + insert_vmslots(arg_slot, 1, THREAD); + if (HAS_PENDING_EXCEPTION) { + // all oops trashed + stack->set_sp(unwind_sp); + return; + } + vmslots = stack->sp(); + arg_slot++; + SET_VMSLOTS_LONG(arg, arg_slot); + } + break; + + case MethodHandles::_adapter_opt_unboxi: + case MethodHandles::_adapter_opt_unboxl: + { + int arg_slot = + java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle); + oop arg = VMSLOTS_OBJECT(arg_slot); + jvalue arg_value; + BasicType arg_type = java_lang_boxing_object::get_value(arg, &arg_value); + if (arg_type == T_LONG || arg_type == T_DOUBLE) { + intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle); + insert_vmslots(arg_slot, 1, THREAD); + if (HAS_PENDING_EXCEPTION) { + // all oops trashed + stack->set_sp(unwind_sp); + return; + } + vmslots = stack->sp(); + arg_slot++; + } + switch (arg_type) { + case T_BOOLEAN: + SET_VMSLOTS_INT(arg_value.z, arg_slot); + break; + case T_CHAR: + SET_VMSLOTS_INT(arg_value.c, arg_slot); + break; + case T_BYTE: + SET_VMSLOTS_INT(arg_value.b, arg_slot); + break; + case T_SHORT: + SET_VMSLOTS_INT(arg_value.s, arg_slot); + break; + case T_INT: + SET_VMSLOTS_INT(arg_value.i, arg_slot); + break; + case T_FLOAT: + SET_VMSLOTS_FLOAT(arg_value.f, arg_slot); + break; + case T_LONG: + SET_VMSLOTS_LONG(arg_value.j, arg_slot); + break; + case T_DOUBLE: + SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot); + break; + default: + tty->print_cr("unhandled type %s", type2name(arg_type)); + ShouldNotReachHere(); + } + } + break; + + default: + tty->print_cr("unhandled entry_kind %s", + MethodHandles::entry_name(entry_kind)); + ShouldNotReachHere(); + } + + // Continue along the chain + if (direct_to_method) { + if (method == NULL) { + method = + (methodOop) java_lang_invoke_MethodHandle::vmtarget(method_handle); + } + address entry_point = method->from_interpreted_entry(); + Interpreter::invoke_method(method, entry_point, THREAD); + } + else { + process_method_handle( + java_lang_invoke_MethodHandle::vmtarget(method_handle), THREAD); + } + // NB all oops now trashed + + // Adapt the result type, if necessary + if (src_rtype != dst_rtype && !HAS_PENDING_EXCEPTION) { + switch (dst_rtype) { + case T_VOID: + for (int i = 0; i < type2size[src_rtype]; i++) + stack->pop(); + return; + + case T_INT: + switch (src_rtype) { + case T_VOID: + stack->overflow_check(1, CHECK); + stack->push(0); + return; + + case T_BOOLEAN: + case T_CHAR: + case T_BYTE: + case T_SHORT: + return; + } + } + + tty->print_cr("unhandled conversion:"); + tty->print_cr("src_rtype = %s", type2name(src_rtype)); + tty->print_cr("dst_rtype = %s", type2name(dst_rtype)); + ShouldNotReachHere(); + } +} + +// The new slots will be inserted before slot insert_before. +// Slots < insert_before will have the same slot number after the insert. +// Slots >= insert_before will become old_slot + num_slots. +void CppInterpreter::insert_vmslots(int insert_before, int num_slots, TRAPS) { + JavaThread *thread = (JavaThread *) THREAD; + ZeroStack *stack = thread->zero_stack(); + + // Allocate the space + stack->overflow_check(num_slots, CHECK); + stack->alloc(num_slots * wordSize); + intptr_t *vmslots = stack->sp(); + + // Shuffle everything up + for (int i = 0; i < insert_before; i++) + SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i + num_slots), i); +} + +void CppInterpreter::remove_vmslots(int first_slot, int num_slots, TRAPS) { + JavaThread *thread = (JavaThread *) THREAD; + ZeroStack *stack = thread->zero_stack(); + intptr_t *vmslots = stack->sp(); + + // Move everything down + for (int i = first_slot - 1; i >= 0; i--) + SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i + num_slots); + + // Deallocate the space + stack->set_sp(stack->sp() + num_slots); +} + +BasicType CppInterpreter::result_type_of_handle(oop method_handle) { + oop method_type = java_lang_invoke_MethodHandle::type(method_handle); + oop return_type = java_lang_invoke_MethodType::rtype(method_type); + return java_lang_Class::as_BasicType(return_type, (klassOop *) NULL); +} + +intptr_t* CppInterpreter::calculate_unwind_sp(ZeroStack* stack, + oop method_handle) { + oop method_type = java_lang_invoke_MethodHandle::type(method_handle); + oop form = java_lang_invoke_MethodType::form(method_type); + int argument_slots = java_lang_invoke_MethodTypeForm::vmslots(form); + + return stack->sp() + argument_slots; +} + +IRT_ENTRY(void, CppInterpreter::throw_exception(JavaThread* thread, + Symbol* name, + char* message)) + THROW_MSG(name, message); +IRT_END + InterpreterFrame *InterpreterFrame::build(const methodOop method, TRAPS) { JavaThread *thread = (JavaThread *) THREAD; ZeroStack *stack = thread->zero_stack(); @@ -667,7 +1232,7 @@ (BasicObjectLock *) stack->alloc(monitor_words * wordSize); oop object; if (method->is_static()) - object = method->constants()->pool_holder()->klass_part()->java_mirror(); + object = method->constants()->pool_holder()->java_mirror(); else object = (oop) locals[0]; monitor->set_obj(object);
--- a/src/cpu/zero/vm/cppInterpreter_zero.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/zero/vm/cppInterpreter_zero.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,6 +1,6 @@ /* * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright 2007, 2008, 2010 Red Hat, Inc. + * Copyright 2007, 2008, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,12 +36,22 @@ static int native_entry(methodOop method, intptr_t UNUSED, TRAPS); static int accessor_entry(methodOop method, intptr_t UNUSED, TRAPS); static int empty_entry(methodOop method, intptr_t UNUSED, TRAPS); + static int method_handle_entry(methodOop method, intptr_t UNUSED, TRAPS); public: // Main loop of normal_entry static void main_loop(int recurse, TRAPS); private: + // Helpers for method_handle_entry + static void process_method_handle(oop method_handle, TRAPS); + static void insert_vmslots(int insert_before, int num_slots, TRAPS); + static void remove_vmslots(int first_slot, int num_slots, TRAPS); + static BasicType result_type_of_handle(oop method_handle); + static intptr_t* calculate_unwind_sp(ZeroStack* stack, oop method_handle); + static void throw_exception(JavaThread* thread, Symbol* name,char *msg=NULL); + + private: // Fast result type determination static BasicType result_type_of(methodOop method);
--- a/src/cpu/zero/vm/globals_zero.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/zero/vm/globals_zero.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,6 +1,6 @@ /* * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. + * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,4 +54,6 @@ define_pd_global(bool, UseMembar, false); +// GC Ergo Flags +define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread #endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
--- a/src/cpu/zero/vm/interpreter_zero.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/zero/vm/interpreter_zero.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,6 +1,6 @@ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. + * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,6 +49,9 @@ #ifdef COMPILER1 #include "c1/c1_Runtime1.hpp" #endif +#ifdef CC_INTERP +#include "interpreter/cppInterpreter.hpp" +#endif address AbstractInterpreterGenerator::generate_slow_signature_handler() { _masm->advance(1); @@ -64,11 +67,15 @@ } address InterpreterGenerator::generate_abstract_entry() { - return ShouldNotCallThisEntry(); + return generate_entry((address) ShouldNotCallThisEntry()); } address InterpreterGenerator::generate_method_handle_entry() { - return ShouldNotCallThisEntry(); +#ifdef CC_INTERP + return generate_entry((address) CppInterpreter::method_handle_entry); +#else + return generate_entry((address) ShouldNotCallThisEntry()); +#endif // CC_INTERP } bool AbstractInterpreter::can_be_compiled(methodHandle m) {
--- a/src/cpu/zero/vm/jni_zero.h Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/zero/vm/jni_zero.h Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright 2009 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. *
--- a/src/cpu/zero/vm/methodHandles_zero.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/zero/vm/methodHandles_zero.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,6 +1,6 @@ /* * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright 2009, 2010 Red Hat, Inc. + * Copyright 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,10 +29,21 @@ #include "prims/methodHandles.hpp" int MethodHandles::adapter_conversion_ops_supported_mask() { - ShouldNotCallThis(); + return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS) + |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS) + //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG! + ); + // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS. } void MethodHandles::generate_method_handle_stub(MacroAssembler* masm, MethodHandles::EntryKind ek) { - ShouldNotCallThis(); + init_entry(ek, (MethodHandleEntry *) ek); }
--- a/src/cpu/zero/vm/relocInfo_zero.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/zero/vm/relocInfo_zero.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,6 +1,6 @@ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright 2007, 2009 Red Hat, Inc. + * Copyright 2007, 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ #include "oops/oop.inline.hpp" #include "runtime/safepoint.hpp" -void Relocation::pd_set_data_value(address x, intptr_t o) { +void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { ShouldNotCallThis(); }
--- a/src/cpu/zero/vm/sharedRuntime_zero.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/cpu/zero/vm/sharedRuntime_zero.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,6 +1,6 @@ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. + * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,15 +78,17 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm, methodHandle method, - int total_in_args, - int comp_args_on_stack, - BasicType *in_sig_bt, - VMRegPair *in_regs, + int compile_id, + int total_args_passed, + int max_arg, + BasicType *sig_bt, + VMRegPair *regs, BasicType ret_type) { #ifdef SHARK return SharkCompiler::compiler()->generate_native_wrapper(masm, method, - in_sig_bt, + compile_id, + sig_bt, ret_type); #else ShouldNotCallThis();
--- a/src/os/linux/vm/jvm_linux.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os/linux/vm/jvm_linux.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/linux/vm/osThread_linux.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os/linux/vm/osThread_linux.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/linux/vm/os_linux.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os/linux/vm/os_linux.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -2213,7 +2213,7 @@ if (rp == NULL) return; - if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) { + if (Arguments::created_by_gamma_launcher()) { // Support for the gamma launcher. Typical value for buf is // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at // the right place in the string, then assume we are installed in a JDK and @@ -2648,45 +2648,39 @@ // writing thread stacks don't use growable mappings (i.e. those // creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this // only applies to the main thread. -static bool -get_stack_bounds(uintptr_t *bottom, uintptr_t *top) -{ - FILE *f = fopen("/proc/self/maps", "r"); - if (f == NULL) + +static +bool get_stack_bounds(uintptr_t *bottom, uintptr_t *top) { + + char buf[128]; + int fd, sz; + + if ((fd = ::open("/proc/self/maps", O_RDONLY)) < 0) { return false; - - while (!feof(f)) { - size_t dummy; - char *str = NULL; - ssize_t len = getline(&str, &dummy, f); - if (len == -1) { - fclose(f); - return false; - } - - if (len > 0 && str[len-1] == '\n') { - str[len-1] = 0; - len--; - } - - static const char *stack_str = "[stack]"; - if (len > (ssize_t)strlen(stack_str) - && (strcmp(str + len - strlen(stack_str), stack_str) == 0)) { - if (sscanf(str, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) { - uintptr_t sp = (uintptr_t)__builtin_frame_address(0); - if (sp >= *bottom && sp <= *top) { - free(str); - fclose(f); - return true; + } + + const char kw[] = "[stack]"; + const int kwlen = sizeof(kw)-1; + + // Address part of /proc/self/maps couldn't be more than 128 bytes + while ((sz = os::get_line_chars(fd, buf, sizeof(buf))) > 0) { + if (sz > kwlen && ::memcmp(buf+sz-kwlen, kw, kwlen) == 0) { + // Extract addresses + if (sscanf(buf, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) { + uintptr_t sp = (uintptr_t) __builtin_frame_address(0); + if (sp >= *bottom && sp <= *top) { + ::close(fd); + return true; + } } - } - } - free(str); + } } - fclose(f); + + ::close(fd); return false; } + // If the (growable) stack mapping already extends beyond the point // where we're going to put our guard pages, truncate the mapping at // that point by munmap()ping it. This ensures that when we later
--- a/src/os/linux/vm/os_linux.inline.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os/linux/vm/os_linux.inline.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/linux/vm/thread_linux.inline.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os/linux/vm/thread_linux.inline.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/posix/vm/os_posix.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os/posix/vm/os_posix.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -59,3 +59,12 @@ VMError::report_coredump_status(buffer, success); } +bool os::is_debugger_attached() { + // not implemented + return false; +} + +void os::wait_for_keypress_at_exit(void) { + // don't do anything on posix platforms + return; +}
--- a/src/os/solaris/dtrace/generateJvmOffsets.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os/solaris/dtrace/generateJvmOffsets.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/solaris/dtrace/jhelper.d Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os/solaris/dtrace/jhelper.d Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/solaris/dtrace/libjvm_db.c Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os/solaris/dtrace/libjvm_db.c Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -524,6 +524,8 @@ CHECK_FAIL(err); err = read_pointer(J, constantPool + nameIndex * POINTER_SIZE + SIZE_constantPoolOopDesc, &nameSymbol); CHECK_FAIL(err); + // The symbol is a CPSlot and has lower bit set to indicate metadata + nameSymbol &= (~1); // remove metadata lsb err = ps_pread(J->P, nameSymbol + OFFSET_Symbol_length, &nameSymbolLength, 2); CHECK_FAIL(err); nameString = (char*)calloc(nameSymbolLength + 1, 1); @@ -535,6 +537,7 @@ CHECK_FAIL(err); err = read_pointer(J, constantPool + signatureIndex * POINTER_SIZE + SIZE_constantPoolOopDesc, &signatureSymbol); CHECK_FAIL(err); + signatureSymbol &= (~1); // remove metadata lsb err = ps_pread(J->P, signatureSymbol + OFFSET_Symbol_length, &signatureSymbolLength, 2); CHECK_FAIL(err); signatureString = (char*)calloc(signatureSymbolLength + 1, 1);
--- a/src/os/solaris/vm/dtraceJSDT_solaris.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os/solaris/vm/dtraceJSDT_solaris.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os/solaris/vm/os_solaris.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os/solaris/vm/os_solaris.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -2511,7 +2511,7 @@ assert(ret != 0, "cannot locate libjvm"); realpath((char *)dlinfo.dli_fname, buf); - if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) { + if (Arguments::created_by_gamma_launcher()) { // Support for the gamma launcher. Typical value for buf is // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at // the right place in the string, then assume we are installed in a JDK and
--- a/src/os/windows/vm/os_windows.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os/windows/vm/os_windows.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -22,10 +22,8 @@ * */ -#ifdef _WIN64 -// Must be at least Windows 2000 or XP to use VectoredExceptions +// Must be at least Windows 2000 or XP to use VectoredExceptions and IsDebuggerPresent #define _WIN32_WINNT 0x500 -#endif // no precompiled headers #include "classfile/classLoader.hpp" @@ -1788,7 +1786,7 @@ } buf[0] = '\0'; - if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) { + if (Arguments::created_by_gamma_launcher()) { // Support for the gamma launcher. Check for an // JAVA_HOME environment variable // and fix up the path so it looks like @@ -3299,9 +3297,14 @@ "possibility of dangling Thread pointer"); OSThread* osthread = thread->osthread(); - bool interrupted; - interrupted = osthread->interrupted(); - if (clear_interrupted == true) { + bool interrupted = osthread->interrupted(); + // There is no synchronization between the setting of the interrupt + // and it being cleared here. It is critical - see 6535709 - that + // we only clear the interrupt state, and reset the interrupt event, + // if we are going to report that we were indeed interrupted - else + // an interrupt can be "lost", leading to spurious wakeups or lost wakeups + // depending on the timing + if (interrupted && clear_interrupted) { osthread->set_interrupted(false); ResetEvent(osthread->interrupt_event()); } // Otherwise leave the interrupted state alone @@ -3418,6 +3421,19 @@ } +bool os::is_debugger_attached() { + return IsDebuggerPresent() ? true : false; +} + + +void os::wait_for_keypress_at_exit(void) { + if (PauseAtExit) { + fprintf(stderr, "Press any key to continue...\n"); + fgetc(stdin); + } +} + + int os::message_box(const char* title, const char* message) { int result = MessageBox(NULL, message, title, MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
--- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. *
--- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/tools/ProjectCreator/Util.java Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/tools/ProjectCreator/Util.java Fri Apr 22 15:30:53 2011 +0200 @@ -47,18 +47,6 @@ return sb.toString(); } - static String join(String padder, String v[]) { - StringBuffer sb = new StringBuffer(); - - for (int i=0; i<v.length; i++) { - sb.append(v[i]); - if (i < (v.length - 1)) sb.append(padder); - } - - return sb.toString(); - } - - static String prefixed_join(String padder, Vector v, boolean quoted) { StringBuffer sb = new StringBuffer();
--- a/src/share/tools/ProjectCreator/WinGammaPlatform.java Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/tools/ProjectCreator/WinGammaPlatform.java Fri Apr 22 15:30:53 2011 +0200 @@ -587,7 +587,6 @@ Vector allConfigs = new Vector(); allConfigs.add(new C1DebugConfig()); - allConfigs.add(new C1FastDebugConfig()); allConfigs.add(new C1ProductConfig()); @@ -655,6 +654,10 @@ boolean isHeader() { return attr.shortName.endsWith(".h") || attr.shortName.endsWith(".hpp"); } + + boolean isCpp() { + return attr.shortName.endsWith(".cpp"); + } } @@ -708,7 +711,7 @@ PrintWriter printWriter; public void writeProjectFile(String projectFileName, String projectName, - Vector allConfigs) throws IOException { + Vector<BuildConfig> allConfigs) throws IOException { throw new RuntimeException("use compiler version specific version"); } }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,547 @@ +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.UnsupportedEncodingException; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.TreeSet; +import java.util.UUID; +import java.util.Vector; + +public class WinGammaPlatformVC10 extends WinGammaPlatformVC7 { + + @Override + protected String getProjectExt() { + return ".vcxproj"; + } + + @Override + public void writeProjectFile(String projectFileName, String projectName, + Vector<BuildConfig> allConfigs) throws IOException { + System.out.println(); + System.out.print(" Writing .vcxproj file: " + projectFileName); + + String projDir = Util.normalize(new File(projectFileName).getParent()); + + printWriter = new PrintWriter(projectFileName, "UTF-8"); + printWriter.println("<?xml version=\"1.0\" encoding=\"utf-8\"?>"); + startTag("Project", + "DefaultTargets", "Build", + "ToolsVersion", "4.0", + "xmlns", "http://schemas.microsoft.com/developer/msbuild/2003"); + startTag("ItemGroup", + "Label", "ProjectConfigurations"); + for (BuildConfig cfg : allConfigs) { + startTag("ProjectConfiguration", + "Include", cfg.get("Name")); + tagData("Configuration", cfg.get("Id")); + tagData("Platform", cfg.get("PlatformName")); + endTag("ProjectConfiguration"); + } + endTag("ItemGroup"); + + startTag("PropertyGroup", "Label", "Globals"); + tagData("ProjectGuid", "{8822CB5C-1C41-41C2-8493-9F6E1994338B}"); + tag("SccProjectName"); + tag("SccLocalPath"); + endTag("PropertyGroup"); + + tag("Import", "Project", "$(VCTargetsPath)\\Microsoft.Cpp.Default.props"); + + for (BuildConfig cfg : allConfigs) { + startTag(cfg, "PropertyGroup", "Label", "Configuration"); + tagData("ConfigurationType", "DynamicLibrary"); + tagData("UseOfMfc", "false"); + endTag("PropertyGroup"); + } + + tag("Import", "Project", "$(VCTargetsPath)\\Microsoft.Cpp.props"); + startTag("ImportGroup", "Label", "ExtensionSettings"); + endTag("ImportGroup"); + for (BuildConfig cfg : allConfigs) { + startTag(cfg, "ImportGroup", "Label", "PropertySheets"); + tag("Import", + "Project", "$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props", + "Condition", "exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')", + "Label", "LocalAppDataPlatform"); + endTag("ImportGroup"); + } + + tag("PropertyGroup", "Label", "UserMacros"); + + startTag("PropertyGroup"); + tagData("_ProjectFileVersion", "10.0.30319.1"); + for (BuildConfig cfg : allConfigs) { + tagData(cfg, "OutDir", cfg.get("OutputDir") + Util.sep); + tagData(cfg, "IntDir", cfg.get("OutputDir") + Util.sep); + tagData(cfg, "LinkIncremental", "false"); + } + for (BuildConfig cfg : allConfigs) { + tagData(cfg, "CodeAnalysisRuleSet", "AllRules.ruleset"); + tag(cfg, "CodeAnalysisRules"); + tag(cfg, "CodeAnalysisRuleAssemblies"); + } + endTag("PropertyGroup"); + + for (BuildConfig cfg : allConfigs) { + startTag(cfg, "ItemDefinitionGroup"); + startTag("ClCompile"); + tagV(cfg.getV("CompilerFlags")); + endTag("ClCompile"); + + startTag("Link"); + tagV(cfg.getV("LinkerFlags")); + endTag("Link"); + + startTag("PostBuildEvent"); + tagData("Message", BuildConfig.getFieldString(null, "PostbuildDescription")); + tagData("Command", cfg.expandFormat(BuildConfig.getFieldString(null, "PostbuildCommand").replace("\t", "\r\n"))); + endTag("PostBuildEvent"); + + startTag("PreLinkEvent"); + tagData("Message", BuildConfig.getFieldString(null, "PrelinkDescription")); + tagData("Command", cfg.expandFormat(BuildConfig.getFieldString(null, "PrelinkCommand").replace("\t", "\r\n"))); + endTag("PreLinkEvent"); + + endTag("ItemDefinitionGroup"); + } + + writeFiles(allConfigs, projDir); + + tag("Import", "Project", "$(VCTargetsPath)\\Microsoft.Cpp.targets"); + startTag("ImportGroup", "Label", "ExtensionTargets"); + endTag("ImportGroup"); + + endTag("Project"); + printWriter.close(); + System.out.println(" Done."); + + writeFilterFile(projectFileName, projectName, allConfigs, projDir); + writeUserFile(projectFileName, allConfigs); + } + + + private void writeUserFile(String projectFileName, Vector<BuildConfig> allConfigs) throws FileNotFoundException, UnsupportedEncodingException { + String userFileName = projectFileName + ".user"; + if (new File(userFileName).exists()) { + return; + } + System.out.print(" Writing .vcxproj.user file: " + userFileName); + printWriter = new PrintWriter(userFileName, "UTF-8"); + + printWriter.println("<?xml version=\"1.0\" encoding=\"utf-8\"?>"); + startTag("Project", + "ToolsVersion", "4.0", + "xmlns", "http://schemas.microsoft.com/developer/msbuild/2003"); + + for (BuildConfig cfg : allConfigs) { + startTag(cfg, "PropertyGroup"); + tagData("LocalDebuggerCommand", "$(TargetDir)/hotspot.exe"); + endTag("PropertyGroup"); + } + + endTag("Project"); + printWriter.close(); + System.out.println(" Done."); + } + + private void writeFilterFile(String projectFileName, String projectName, + Vector<BuildConfig> allConfigs, String base) throws FileNotFoundException, UnsupportedEncodingException { + String filterFileName = projectFileName + ".filters"; + System.out.print(" Writing .vcxproj.filters file: " + filterFileName); + printWriter = new PrintWriter(filterFileName, "UTF-8"); + + printWriter.println("<?xml version=\"1.0\" encoding=\"utf-8\"?>"); + startTag("Project", + "ToolsVersion", "4.0", + "xmlns", "http://schemas.microsoft.com/developer/msbuild/2003"); + + Hashtable<String, FileAttribute> allFiles = computeAttributedFiles(allConfigs); + TreeSet<FileInfo> sortedFiles = sortFiles(allFiles); + Vector<NameFilter> filters = makeFilters(sortedFiles); + + // first all filters + startTag("ItemGroup"); + for (NameFilter filter : filters) { + doWriteFilter(filter, ""); + } + startTag("Filter", "Include", "Resource Files"); + UUID uuid = UUID.randomUUID(); + tagData("UniqueIdentifier", "{" + uuid.toString() + "}"); + tagData("Extensions", "ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe"); + endTag("Filter"); + endTag("ItemGroup"); + + // then all cpp files + startTag("ItemGroup"); + for (NameFilter filter : filters) { + doWriteFiles(sortedFiles, filter, "", "ClCompile", new Evaluator() { + public boolean pick(FileInfo fi) { + return fi.isCpp(); + } + }, base); + } + endTag("ItemGroup"); + + // then all header files + startTag("ItemGroup"); + for (NameFilter filter : filters) { + doWriteFiles(sortedFiles, filter, "", "ClInclude", new Evaluator() { + public boolean pick(FileInfo fi) { + return fi.isHeader(); + } + }, base); + } + endTag("ItemGroup"); + + // then all other files + startTag("ItemGroup"); + for (NameFilter filter : filters) { + doWriteFiles(sortedFiles, filter, "", "None", new Evaluator() { + public boolean pick(FileInfo fi) { + return true; + } + }, base); + } + endTag("ItemGroup"); + + endTag("Project"); + printWriter.close(); + System.out.println(" Done."); + } + + + private void doWriteFilter(NameFilter filter, String start) { + startTag("Filter", "Include", start + filter.fname); + UUID uuid = UUID.randomUUID(); + tagData("UniqueIdentifier", "{" + uuid.toString() + "}"); + endTag("Filter"); + if (filter instanceof ContainerFilter) { + Iterator i = ((ContainerFilter)filter).babies(); + while (i.hasNext()) { + doWriteFilter((NameFilter)i.next(), start + filter.fname + "\\"); + } + } + } + + interface Evaluator { + boolean pick(FileInfo fi); + } + + private void doWriteFiles(TreeSet<FileInfo> allFiles, NameFilter filter, String start, String tool, Evaluator eval, String base) { + if (filter instanceof ContainerFilter) { + Iterator i = ((ContainerFilter)filter).babies(); + while (i.hasNext()) { + doWriteFiles(allFiles, (NameFilter)i.next(), start + filter.fname + "\\", tool, eval, base); + } + } + else { + Iterator i = allFiles.iterator(); + while (i.hasNext()) { + FileInfo fi = (FileInfo)i.next(); + + if (!filter.match(fi)) { + continue; + } + if (eval.pick(fi)) { + startTag(tool, "Include", rel(fi.full, base)); + tagData("Filter", start + filter.fname); + endTag(tool); + + // we not gonna look at this file anymore (sic!) + i.remove(); + } + } + } + } + + + void writeFiles(Vector<BuildConfig> allConfigs, String projDir) { + Hashtable<String, FileAttribute> allFiles = computeAttributedFiles(allConfigs); + TreeSet<FileInfo> sortedFiles = sortFiles(allFiles); + + // first cpp-files + startTag("ItemGroup"); + for (FileInfo fi : sortedFiles) { + if (!fi.isCpp()) { + continue; + } + writeFile("ClCompile", allConfigs, fi, projDir); + } + endTag("ItemGroup"); + + // then header-files + startTag("ItemGroup"); + for (FileInfo fi : sortedFiles) { + if (!fi.isHeader()) { + continue; + } + writeFile("ClInclude", allConfigs, fi, projDir); + } + endTag("ItemGroup"); + + // then others + startTag("ItemGroup"); + for (FileInfo fi : sortedFiles) { + if (fi.isHeader() || fi.isCpp()) { + continue; + } + writeFile("None", allConfigs, fi, projDir); + } + endTag("ItemGroup"); + } + + /** + * Make "path" into a relative path using "base" as the base. + * + * path and base are assumed to be normalized with / as the file separator. + * returned path uses "\\" as file separator + */ + private String rel(String path, String base) + { + if(!base.endsWith("/")) { + base += "/"; + } + String[] pathTok = path.split("/"); + String[] baseTok = base.split("/"); + int pi = 0; + int bi = 0; + StringBuilder newPath = new StringBuilder(); + + // first step past all path components that are the same + while (pi < pathTok.length && + bi < baseTok.length && + pathTok[pi].equals(baseTok[bi])) { + pi++; + bi++; + } + + // for each path component left in base, add "../" + while (bi < baseTok.length) { + bi++; + newPath.append("..\\"); + } + + // now add everything left in path + while (pi < pathTok.length) { + newPath.append(pathTok[pi]); + pi++; + if (pi != pathTok.length) { + newPath.append("\\"); + } + } + return newPath.toString(); + } + + private void writeFile(String tool, Vector<BuildConfig> allConfigs, FileInfo fi, String base) { + if (fi.attr.configs == null && fi.attr.pchRoot == false && fi.attr.noPch == false) { + tag(tool, "Include", rel(fi.full, base)); + } + else { + startTag(tool, "Include", rel(fi.full, base)); + for (BuildConfig cfg : allConfigs) { + if (fi.attr.configs != null && !fi.attr.configs.contains(cfg.get("Name"))) { + tagData(cfg, "ExcludedFromBuild", "true"); + } + if (fi.attr.pchRoot) { + tagData(cfg, "PrecompiledHeader", "Create"); + } + if (fi.attr.noPch) { + startTag(cfg, "PrecompiledHeader"); + endTag("PrecompiledHeader"); + } + } + endTag(tool); + } + } + + String buildCond(BuildConfig cfg) { + return "'$(Configuration)|$(Platform)'=='"+cfg.get("Name")+"'"; + } + + + void tagV(Vector<String> v) { + Iterator<String> i = v.iterator(); + while(i.hasNext()) { + String name = i.next(); + String data = i.next(); + tagData(name, data); + } + } + + void tagData(BuildConfig cfg, String name, String data) { + tagData(name, data, "Condition", buildCond(cfg)); + } + + void tag(BuildConfig cfg, String name, String... attrs) { + String[] ss = new String[attrs.length + 2]; + ss[0] = "Condition"; + ss[1] = buildCond(cfg); + System.arraycopy(attrs, 0, ss, 2, attrs.length); + + tag(name, ss); + } + + void startTag(BuildConfig cfg, String name, String... attrs) { + String[] ss = new String[attrs.length + 2]; + ss[0] = "Condition"; + ss[1] = buildCond(cfg); + System.arraycopy(attrs, 0, ss, 2, attrs.length); + + startTag(name, ss); + } +} + +class CompilerInterfaceVC10 extends CompilerInterface { + + @Override + Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir) { + Vector rv = new Vector(); + + addAttr(rv, "AdditionalIncludeDirectories", Util.join(";", includes)); + addAttr(rv, "PreprocessorDefinitions", + Util.join(";", defines).replace("\\\"", "\"")); + addAttr(rv, "PrecompiledHeaderFile", "precompiled.hpp"); + addAttr(rv, "PrecompiledHeaderOutputFile", outDir+Util.sep+"vm.pch"); + addAttr(rv, "AssemblerListingLocation", outDir); + addAttr(rv, "ObjectFileName", outDir+Util.sep); + addAttr(rv, "ProgramDataBaseFileName", outDir+Util.sep+"jvm.pdb"); + // Set /nologo option + addAttr(rv, "SuppressStartupBanner", "true"); + // Surpass the default /Tc or /Tp. + addAttr(rv, "CompileAs", "Default"); + // Set /W3 option. + addAttr(rv, "WarningLevel", "Level3"); + // Set /WX option, + addAttr(rv, "TreatWarningAsError", "true"); + // Set /GS option + addAttr(rv, "BufferSecurityCheck", "false"); + // Set /Zi option. + addAttr(rv, "DebugInformationFormat", "ProgramDatabase"); + // Set /Yu option. + addAttr(rv, "PrecompiledHeader", "Use"); + // Set /EHsc- option + addAttr(rv, "ExceptionHandling", ""); + + addAttr(rv, "MultiProcessorCompilation", "true"); + + return rv; + } + + @Override + Vector getDebugCompilerFlags(String opt) { + Vector rv = new Vector(); + + // Set /On option + addAttr(rv, "Optimization", opt); + // Set /FR option. + addAttr(rv, "BrowseInformation", "true"); + addAttr(rv, "BrowseInformationFile", "$(IntDir)"); + // Set /MD option. + addAttr(rv, "RuntimeLibrary", "MultiThreadedDLL"); + // Set /Oy- option + addAttr(rv, "OmitFramePointers", "false"); + + return rv; + } + + @Override + Vector getProductCompilerFlags() { + Vector rv = new Vector(); + + // Set /O2 option. + addAttr(rv, "Optimization", "MaxSpeed"); + // Set /Oy- option + addAttr(rv, "OmitFramePointers", "false"); + // Set /Ob option. 1 is expandOnlyInline + addAttr(rv, "InlineFunctionExpansion", "OnlyExplicitInline"); + // Set /GF option. + addAttr(rv, "StringPooling", "true"); + // Set /MD option. 2 is rtMultiThreadedDLL + addAttr(rv, "RuntimeLibrary", "MultiThreadedDLL"); + // Set /Gy option + addAttr(rv, "FunctionLevelLinking", "true"); + + return rv; + } + + @Override + Vector getBaseLinkerFlags(String outDir, String outDll, String platformName) { + Vector rv = new Vector(); + + addAttr(rv, "AdditionalOptions", + "/export:JNI_GetDefaultJavaVMInitArgs " + + "/export:JNI_CreateJavaVM " + + "/export:JVM_FindClassFromBootLoader "+ + "/export:JNI_GetCreatedJavaVMs "+ + "/export:jio_snprintf /export:jio_printf "+ + "/export:jio_fprintf /export:jio_vfprintf "+ + "/export:jio_vsnprintf "+ + "/export:JVM_GetVersionInfo "+ + "/export:JVM_GetThreadStateNames "+ + "/export:JVM_GetThreadStateValues "+ + "/export:JVM_InitAgentProperties"); + addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib"); + addAttr(rv, "OutputFile", outDll); + addAttr(rv, "SuppressStartupBanner", "true"); + addAttr(rv, "ModuleDefinitionFile", outDir+Util.sep+"vm.def"); + addAttr(rv, "ProgramDatabaseFile", outDir+Util.sep+"jvm.pdb"); + addAttr(rv, "SubSystem", "Windows"); + addAttr(rv, "BaseAddress", "0x8000000"); + addAttr(rv, "ImportLibrary", outDir+Util.sep+"jvm.lib"); + + if(platformName.equals("Win32")) { + addAttr(rv, "TargetMachine", "MachineX86"); + } else { + addAttr(rv, "TargetMachine", "MachineX64"); + } + + // We always want the /DEBUG option to get full symbol information in the pdb files + addAttr(rv, "GenerateDebugInformation", "true"); + + return rv; + } + + @Override + Vector getDebugLinkerFlags() { + Vector rv = new Vector(); + + // Empty now that /DEBUG option is used by all configs + + return rv; + } + + @Override + Vector getProductLinkerFlags() { + Vector rv = new Vector(); + + // Set /OPT:REF option. + addAttr(rv, "OptimizeReferences", "true"); + // Set /OPT:ICF option. + addAttr(rv, "EnableCOMDATFolding", "true"); + + return rv; + } + + @Override + void getAdditionalNonKernelLinkerFlags(Vector rv) { + extAttr(rv, "AdditionalOptions", " /export:AsyncGetCallTrace"); + } + + @Override + String getOptFlag() { + return "MaxSpeed"; + } + + @Override + String getNoOptFlag() { + return "Disabled"; + } + + @Override + String makeCfgName(String flavourBuild, String platform) { + return flavourBuild + "|" + platform; + } + +}
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java Fri Apr 22 15:30:53 2011 +0200 @@ -35,7 +35,7 @@ String projectVersion() {return "7.10";}; public void writeProjectFile(String projectFileName, String projectName, - Vector allConfigs) throws IOException { + Vector<BuildConfig> allConfigs) throws IOException { System.out.println(); System.out.println(" Writing .vcproj file: "+projectFileName); // If we got this far without an error, we're safe to actually @@ -54,11 +54,11 @@ "SccLocalPath", "" } ); - startTag("Platforms", null); + startTag("Platforms"); tag("Platform", new String[] {"Name", (String) BuildConfig.getField(null, "PlatformName")}); endTag("Platforms"); - startTag("Configurations", null); + startTag("Configurations"); for (Iterator i = allConfigs.iterator(); i.hasNext(); ) { writeConfiguration((BuildConfig)i.next()); @@ -66,11 +66,11 @@ endTag("Configurations"); - tag("References", null); + tag("References"); writeFiles(allConfigs); - tag("Globals", null); + tag("Globals"); endTag("VisualStudioProject"); printWriter.close(); @@ -190,28 +190,6 @@ } } - class TypeFilter extends NameFilter { - String[] exts; - - TypeFilter(String fname, String[] exts) { - this.fname = fname; - this.exts = exts; - } - - boolean match(FileInfo fi) { - for (int i=0; i<exts.length; i++) { - if (fi.full.endsWith(exts[i])) { - return true; - } - } - return false; - } - - String filterString() { - return Util.join(";", exts); - } - } - class TerminatorFilter extends NameFilter { TerminatorFilter(String fname) { this.fname = fname; @@ -299,8 +277,8 @@ // - container filter just provides a container to group together real filters // - real filter can select elements from the set according to some rule, put it into XML // and remove from the list - Vector makeFilters(TreeSet<FileInfo> files) { - Vector rv = new Vector(); + Vector<NameFilter> makeFilters(TreeSet<FileInfo> files) { + Vector<NameFilter> rv = new Vector<NameFilter>(); String sbase = Util.normalize(BuildConfig.getFieldString(null, "SourceBase")+"/src/"); String currentDir = ""; @@ -370,13 +348,12 @@ rv.add(new SpecificNameFilter("Precompiled Header", new String[] {"precompiled.hpp"})); // this one is to catch files not caught by other filters - //rv.add(new TypeFilter("Header Files", new String[] {"h", "hpp", "hxx", "hm", "inl", "fi", "fd"})); rv.add(new TerminatorFilter("Source Files")); return rv; } - void writeFiles(Vector allConfigs) { + void writeFiles(Vector<BuildConfig> allConfigs) { Hashtable allFiles = computeAttributedFiles(allConfigs); @@ -387,7 +364,7 @@ TreeSet sortedFiles = sortFiles(allFiles); - startTag("Files", null); + startTag("Files"); for (Iterator i = makeFilters(sortedFiles).iterator(); i.hasNext(); ) { doWriteFiles(sortedFiles, allConfigNames, (NameFilter)i.next()); @@ -556,34 +533,39 @@ int indent; private void startTagPrim(String name, + String[] attrs, + boolean close) { + startTagPrim(name, attrs, close, true); + } + + private void startTagPrim(String name, String[] attrs, - boolean close) { + boolean close, + boolean newline) { doIndent(); printWriter.print("<"+name); indent++; - if (attrs != null) { - printWriter.println(); + if (attrs != null && attrs.length > 0) { for (int i=0; i<attrs.length; i+=2) { - doIndent(); printWriter.print(" " + attrs[i]+"=\""+attrs[i+1]+"\""); if (i < attrs.length - 2) { - printWriter.println(); } } } if (close) { indent--; - //doIndent(); - printWriter.println("/>"); + printWriter.print(" />"); } else { - //doIndent(); - printWriter.println(">"); + printWriter.print(">"); + } + if(newline) { + printWriter.println(); } } - void startTag(String name, String[] attrs) { + void startTag(String name, String... attrs) { startTagPrim(name, attrs, false); } @@ -601,11 +583,25 @@ printWriter.println("</"+name+">"); } - void tag(String name, String[] attrs) { + void tag(String name, String... attrs) { startTagPrim(name, attrs, true); } - void tagV(String name, Vector attrs) { + void tagData(String name, String data) { + doIndent(); + printWriter.print("<"+name+">"); + printWriter.print(data); + printWriter.println("</"+name+">"); + } + + void tagData(String name, String data, String... attrs) { + startTagPrim(name, attrs, false, false); + printWriter.print(data); + printWriter.println("</"+name+">"); + indent--; + } + + void tagV(String name, Vector attrs) { String s[] = new String [attrs.size()]; for (int i=0; i<attrs.size(); i++) { s[i] = (String)attrs.elementAt(i); @@ -616,7 +612,7 @@ void doIndent() { for (int i=0; i<indent; i++) { - printWriter.print(" "); + printWriter.print(" "); } }
--- a/src/share/tools/hsdis/hsdis-demo.c Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/tools/hsdis/hsdis-demo.c Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,6 @@ * */ -#include "precompiled.hpp" - /* hsdis-demo.c -- dump a range of addresses as native instructions This demonstrates the protocol required by the HotSpot PrintAssembly option. */
--- a/src/share/tools/hsdis/hsdis.c Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/tools/hsdis/hsdis.c Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,6 @@ * */ -#include "precompiled.hpp" - /* hsdis.c -- dump a range of addresses as native instructions This implements the plugin protocol required by the HotSpot PrintAssembly option.
--- a/src/share/vm/adlc/main.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/adlc/main.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -240,6 +240,11 @@ AD.addInclude(AD._CPP_file, "nativeInst_sparc.hpp"); AD.addInclude(AD._CPP_file, "vmreg_sparc.inline.hpp"); #endif +#ifdef TARGET_ARCH_arm + AD.addInclude(AD._CPP_file, "assembler_arm.inline.hpp"); + AD.addInclude(AD._CPP_file, "nativeInst_arm.hpp"); + AD.addInclude(AD._CPP_file, "vmreg_arm.inline.hpp"); +#endif AD.addInclude(AD._HPP_file, "memory/allocation.hpp"); AD.addInclude(AD._HPP_file, "opto/machnode.hpp"); AD.addInclude(AD._HPP_file, "opto/node.hpp");
--- a/src/share/vm/adlc/output_c.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/adlc/output_c.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/asm/assembler.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/asm/assembler.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/asm/assembler.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/asm/assembler.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/asm/codeBuffer.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/asm/codeBuffer.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_Canonicalizer.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_Canonicalizer.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -209,7 +209,7 @@ // limit this optimization to current block if (value != NULL && in_current_block(conv)) { set_canonical(new StoreField(x->obj(), x->offset(), x->field(), value, x->is_static(), - x->state_before(), x->is_loaded(), x->is_initialized())); + x->state_before(), x->needs_patching())); return; } }
--- a/src/share/vm/c1/c1_Compilation.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_Compilation.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_Defs.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_Defs.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_FpuStackSim.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_FpuStackSim.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_FrameMap.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_FrameMap.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_FrameMap.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_FrameMap.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_GraphBuilder.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -30,6 +30,7 @@ #include "c1/c1_InstructionPrinter.hpp" #include "ci/ciField.hpp" #include "ci/ciKlass.hpp" +#include "compiler/compileBroker.hpp" #include "interpreter/bytecode.hpp" #include "runtime/sharedRuntime.hpp" #include "utilities/bitMap.inline.hpp" @@ -1456,12 +1457,12 @@ BasicType field_type = field->type()->basic_type(); ValueType* type = as_ValueType(field_type); // call will_link again to determine if the field is valid. - const bool is_loaded = holder->is_loaded() && - field->will_link(method()->holder(), code); - const bool is_initialized = is_loaded && holder->is_initialized(); + const bool needs_patching = !holder->is_loaded() || + !field->will_link(method()->holder(), code) || + PatchALot; ValueStack* state_before = NULL; - if (!is_initialized || PatchALot) { + if (!holder->is_initialized() || needs_patching) { // save state before instruction for debug info when // deoptimization happens during patching state_before = copy_state_before(); @@ -1469,20 +1470,16 @@ Value obj = NULL; if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) { - // commoning of class constants should only occur if the class is - // fully initialized and resolved in this constant pool. The will_link test - // above essentially checks if this class is resolved in this constant pool - // so, the is_initialized flag should be suffiect. if (state_before != NULL) { // build a patching constant - obj = new Constant(new ClassConstant(holder), state_before); + obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before); } else { - obj = new Constant(new ClassConstant(holder)); + obj = new Constant(new InstanceConstant(holder->java_mirror())); } } - const int offset = is_loaded ? field->offset() : -1; + const int offset = !needs_patching ? field->offset() : -1; switch (code) { case Bytecodes::_getstatic: { // check for compile-time constants, i.e., initialized static final fields @@ -1509,7 +1506,7 @@ state_before = copy_state_for_exception(); } push(type, append(new LoadField(append(obj), offset, field, true, - state_before, is_loaded, is_initialized))); + state_before, needs_patching))); } break; } @@ -1518,7 +1515,7 @@ if (state_before == NULL) { state_before = copy_state_for_exception(); } - append(new StoreField(append(obj), offset, field, val, true, state_before, is_loaded, is_initialized)); + append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching)); } break; case Bytecodes::_getfield : @@ -1526,8 +1523,8 @@ if (state_before == NULL) { state_before = copy_state_for_exception(); } - LoadField* load = new LoadField(apop(), offset, field, false, state_before, is_loaded, true); - Value replacement = is_loaded ? _memory->load(load) : load; + LoadField* load = new LoadField(apop(), offset, field, false, state_before, needs_patching); + Value replacement = !needs_patching ? _memory->load(load) : load; if (replacement != load) { assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked"); push(type, replacement); @@ -1542,8 +1539,8 @@ if (state_before == NULL) { state_before = copy_state_for_exception(); } - StoreField* store = new StoreField(apop(), offset, field, val, false, state_before, is_loaded, true); - if (is_loaded) store = _memory->store(store); + StoreField* store = new StoreField(apop(), offset, field, val, false, state_before, needs_patching); + if (!needs_patching) store = _memory->store(store); if (store != NULL) { append(store); } @@ -2827,7 +2824,7 @@ int idx = 0; if (!method()->is_static()) { // we should always see the receiver - state->store_local(idx, new Local(objectType, idx)); + state->store_local(idx, new Local(method()->holder(), objectType, idx)); idx = 1; } @@ -2839,7 +2836,7 @@ // don't allow T_ARRAY to propagate into locals types if (basic_type == T_ARRAY) basic_type = T_OBJECT; ValueType* vt = as_ValueType(basic_type); - state->store_local(idx, new Local(vt, idx)); + state->store_local(idx, new Local(type, vt, idx)); idx += type->size(); } @@ -3308,22 +3305,23 @@ Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI); assert(exception->is_pinned(), "must be"); + int bci = SynchronizationEntryBCI; if (compilation()->env()->dtrace_method_probes()) { - // Report exit from inline methods + // Report exit from inline methods. We don't have a stream here + // so pass an explicit bci of SynchronizationEntryBCI. Values* args = new Values(1); - args->push(append(new Constant(new ObjectConstant(method())))); - append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args)); + args->push(append_with_bci(new Constant(new ObjectConstant(method())), bci)); + append_with_bci(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args), bci); } - int bci = SynchronizationEntryBCI; if (lock) { assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing"); if (!lock->is_linked()) { - lock = append_with_bci(lock, -1); + lock = append_with_bci(lock, bci); } // exit the monitor in the context of the synchronized method - monitorexit(lock, SynchronizationEntryBCI); + monitorexit(lock, bci); // exit the context of the synchronized method if (!default_handler) { @@ -3778,24 +3776,7 @@ #ifndef PRODUCT void GraphBuilder::print_inline_result(ciMethod* callee, bool res) { - const char sync_char = callee->is_synchronized() ? 's' : ' '; - const char exception_char = callee->has_exception_handlers() ? '!' : ' '; - const char monitors_char = callee->has_monitor_bytecodes() ? 'm' : ' '; - tty->print(" %c%c%c ", sync_char, exception_char, monitors_char); - for (int i = 0; i < scope()->level(); i++) tty->print(" "); - if (res) { - tty->print(" "); - } else { - tty->print("- "); - } - tty->print("@ %d ", bci()); - callee->print_short_name(); - tty->print(" (%d bytes)", callee->code_size()); - if (_inline_bailout_msg) { - tty->print(" %s", _inline_bailout_msg); - } - tty->cr(); - + CompileTask::print_inlining(callee, scope()->level(), bci(), _inline_bailout_msg); if (res && CIPrintMethodCodes) { callee->print_codes(); }
--- a/src/share/vm/c1/c1_Instruction.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_Instruction.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -135,6 +135,33 @@ } +ciType* Local::exact_type() const { + ciType* type = declared_type(); + + // for primitive arrays, the declared type is the exact type + if (type->is_type_array_klass()) { + return type; + } else if (type->is_instance_klass()) { + ciInstanceKlass* ik = (ciInstanceKlass*)type; + if (ik->is_loaded() && ik->is_final() && !ik->is_interface()) { + return type; + } + } else if (type->is_obj_array_klass()) { + ciObjArrayKlass* oak = (ciObjArrayKlass*)type; + ciType* base = oak->base_element_type(); + if (base->is_instance_klass()) { + ciInstanceKlass* ik = base->as_instance_klass(); + if (ik->is_loaded() && ik->is_final()) { + return type; + } + } else if (base->is_primitive_type()) { + return type; + } + } + return NULL; +} + + ciType* LoadIndexed::exact_type() const { ciType* array_type = array()->exact_type(); if (array_type == NULL) { @@ -189,16 +216,21 @@ return ciTypeArrayKlass::make(elt_type()); } - ciType* NewObjectArray::exact_type() const { return ciObjArrayKlass::make(klass()); } +ciType* NewArray::declared_type() const { + return exact_type(); +} ciType* NewInstance::exact_type() const { return klass(); } +ciType* NewInstance::declared_type() const { + return exact_type(); +} ciType* CheckCast::declared_type() const { return klass(); @@ -349,6 +381,11 @@ if (state() != NULL) state()->values_do(f); } +ciType* Invoke::declared_type() const { + ciType *t = _target->signature()->return_type(); + assert(t->basic_type() != T_VOID, "need return value of void method?"); + return t; +} // Implementation of Contant intx Constant::hash() const {
--- a/src/share/vm/c1/c1_Instruction.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_Instruction.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -323,8 +323,6 @@ CanTrapFlag, DirectCompareFlag, IsEliminatedFlag, - IsInitializedFlag, - IsLoadedFlag, IsSafepointFlag, IsStaticFlag, IsStrictfpFlag, @@ -623,16 +621,21 @@ LEAF(Local, Instruction) private: int _java_index; // the local index within the method to which the local belongs + ciType* _declared_type; public: // creation - Local(ValueType* type, int index) + Local(ciType* declared, ValueType* type, int index) : Instruction(type) , _java_index(index) + , _declared_type(declared) {} // accessors int java_index() const { return _java_index; } + ciType* declared_type() const { return _declared_type; } + ciType* exact_type() const; + // generic virtual void input_values_do(ValueVisitor* f) { /* no values */ } }; @@ -693,7 +696,7 @@ public: // creation AccessField(Value obj, int offset, ciField* field, bool is_static, - ValueStack* state_before, bool is_loaded, bool is_initialized) + ValueStack* state_before, bool needs_patching) : Instruction(as_ValueType(field->type()->basic_type()), state_before) , _obj(obj) , _offset(offset) @@ -701,16 +704,9 @@ , _explicit_null_check(NULL) { set_needs_null_check(!is_static); - set_flag(IsLoadedFlag, is_loaded); - set_flag(IsInitializedFlag, is_initialized); set_flag(IsStaticFlag, is_static); + set_flag(NeedsPatchingFlag, needs_patching); ASSERT_VALUES - if (!is_loaded || (PatchALot && !field->is_volatile())) { - // need to patch if the holder wasn't loaded or we're testing - // using PatchALot. Don't allow PatchALot for fields which are - // known to be volatile they aren't patchable. - set_flag(NeedsPatchingFlag, true); - } // pin of all instructions with memory access pin(); } @@ -721,11 +717,14 @@ ciField* field() const { return _field; } BasicType field_type() const { return _field->type()->basic_type(); } bool is_static() const { return check_flag(IsStaticFlag); } - bool is_loaded() const { return check_flag(IsLoadedFlag); } - bool is_initialized() const { return check_flag(IsInitializedFlag); } NullCheck* explicit_null_check() const { return _explicit_null_check; } bool needs_patching() const { return check_flag(NeedsPatchingFlag); } + // Unresolved getstatic and putstatic can cause initialization. + // Technically it occurs at the Constant that materializes the base + // of the static fields but it's simpler to model it here. + bool is_init_point() const { return is_static() && (needs_patching() || !_field->holder()->is_initialized()); } + // manipulation // Under certain circumstances, if a previous NullCheck instruction @@ -745,15 +744,15 @@ public: // creation LoadField(Value obj, int offset, ciField* field, bool is_static, - ValueStack* state_before, bool is_loaded, bool is_initialized) - : AccessField(obj, offset, field, is_static, state_before, is_loaded, is_initialized) + ValueStack* state_before, bool needs_patching) + : AccessField(obj, offset, field, is_static, state_before, needs_patching) {} ciType* declared_type() const; ciType* exact_type() const; // generic - HASHING2(LoadField, is_loaded() && !field()->is_volatile(), obj()->subst(), offset()) // cannot be eliminated if not yet loaded or if volatile + HASHING2(LoadField, !needs_patching() && !field()->is_volatile(), obj()->subst(), offset()) // cannot be eliminated if needs patching or if volatile }; @@ -764,8 +763,8 @@ public: // creation StoreField(Value obj, int offset, ciField* field, Value value, bool is_static, - ValueStack* state_before, bool is_loaded, bool is_initialized) - : AccessField(obj, offset, field, is_static, state_before, is_loaded, is_initialized) + ValueStack* state_before, bool needs_patching) + : AccessField(obj, offset, field, is_static, state_before, needs_patching) , _value(value) { set_flag(NeedsWriteBarrierFlag, as_ValueType(field_type())->is_object()); @@ -1152,6 +1151,8 @@ BasicTypeList* signature() const { return _signature; } ciMethod* target() const { return _target; } + ciType* declared_type() const; + // Returns false if target is not loaded bool target_is_final() const { return check_flag(TargetIsFinalFlag); } bool target_is_loaded() const { return check_flag(TargetIsLoadedFlag); } @@ -1193,6 +1194,7 @@ // generic virtual bool can_trap() const { return true; } ciType* exact_type() const; + ciType* declared_type() const; }; @@ -1214,6 +1216,8 @@ virtual bool needs_exception_state() const { return false; } + ciType* declared_type() const; + // generic virtual bool can_trap() const { return true; } virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_length); } @@ -1403,6 +1407,7 @@ vmIntrinsics::ID _id; Values* _args; Value _recv; + int _nonnull_state; // mask identifying which args are nonnull public: // preserves_state can be set to true for Intrinsics @@ -1423,6 +1428,7 @@ , _id(id) , _args(args) , _recv(NULL) + , _nonnull_state(AllBits) { assert(args != NULL, "args must exist"); ASSERT_VALUES @@ -1448,6 +1454,23 @@ Value receiver() const { assert(has_receiver(), "must have receiver"); return _recv; } bool preserves_state() const { return check_flag(PreservesStateFlag); } + bool arg_needs_null_check(int i) { + if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) { + return is_set_nth_bit(_nonnull_state, i); + } + return true; + } + + void set_arg_needs_null_check(int i, bool check) { + if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) { + if (check) { + _nonnull_state |= nth_bit(i); + } else { + _nonnull_state &= ~(nth_bit(i)); + } + } + } + // generic virtual bool can_trap() const { return check_flag(CanTrapFlag); } virtual void input_values_do(ValueVisitor* f) {
--- a/src/share/vm/c1/c1_LIR.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_LIR.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1156,7 +1156,7 @@ return is_invokedynamic() // An invokedynamic is always a MethodHandle call site. || - (method()->holder()->name() == ciSymbol::java_dyn_MethodHandle() && + (method()->holder()->name() == ciSymbol::java_lang_invoke_MethodHandle() && methodOopDesc::is_method_handle_invoke_name(method()->name()->sid())); } @@ -1215,7 +1215,11 @@ src_range_check = 1 << 5, dst_range_check = 1 << 6, type_check = 1 << 7, - all_flags = (1 << 8) - 1 + overlapping = 1 << 8, + unaligned = 1 << 9, + src_objarray = 1 << 10, + dst_objarray = 1 << 11, + all_flags = (1 << 12) - 1 }; LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
--- a/src/share/vm/c1/c1_LIRAssembler.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -836,6 +836,9 @@ _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size); } } + check_codespace(); + CHECK_BAILOUT(); + s.next(); } VerifyOops = v;
--- a/src/share/vm/c1/c1_LIRAssembler.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_LIRGenerator.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -706,6 +706,38 @@ } } +static Value maxvalue(IfOp* ifop) { + switch (ifop->cond()) { + case If::eql: return NULL; + case If::neq: return NULL; + case If::lss: // x < y ? x : y + case If::leq: // x <= y ? x : y + if (ifop->x() == ifop->tval() && + ifop->y() == ifop->fval()) return ifop->y(); + return NULL; + + case If::gtr: // x > y ? y : x + case If::geq: // x >= y ? y : x + if (ifop->x() == ifop->tval() && + ifop->y() == ifop->fval()) return ifop->y(); + return NULL; + + } +} + +static ciType* phi_declared_type(Phi* phi) { + ciType* t = phi->operand_at(0)->declared_type(); + if (t == NULL) { + return NULL; + } + for(int i = 1; i < phi->operand_count(); i++) { + if (t != phi->operand_at(i)->declared_type()) { + return NULL; + } + } + return t; +} + void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) { Instruction* src = x->argument_at(0); Instruction* src_pos = x->argument_at(1); @@ -715,12 +747,20 @@ // first try to identify the likely type of the arrays involved ciArrayKlass* expected_type = NULL; - bool is_exact = false; + bool is_exact = false, src_objarray = false, dst_objarray = false; { ciArrayKlass* src_exact_type = as_array_klass(src->exact_type()); ciArrayKlass* src_declared_type = as_array_klass(src->declared_type()); + Phi* phi; + if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) { + src_declared_type = as_array_klass(phi_declared_type(phi)); + } ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type()); ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type()); + if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) { + dst_declared_type = as_array_klass(phi_declared_type(phi)); + } + if (src_exact_type != NULL && src_exact_type == dst_exact_type) { // the types exactly match so the type is fully known is_exact = true; @@ -744,17 +784,60 @@ if (expected_type == NULL) expected_type = dst_exact_type; if (expected_type == NULL) expected_type = src_declared_type; if (expected_type == NULL) expected_type = dst_declared_type; + + src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass()); + dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass()); } // if a probable array type has been identified, figure out if any // of the required checks for a fast case can be elided. int flags = LIR_OpArrayCopy::all_flags; + + if (!src_objarray) + flags &= ~LIR_OpArrayCopy::src_objarray; + if (!dst_objarray) + flags &= ~LIR_OpArrayCopy::dst_objarray; + + if (!x->arg_needs_null_check(0)) + flags &= ~LIR_OpArrayCopy::src_null_check; + if (!x->arg_needs_null_check(2)) + flags &= ~LIR_OpArrayCopy::dst_null_check; + + if (expected_type != NULL) { - // try to skip null checks - if (src->as_NewArray() != NULL) + Value length_limit = NULL; + + IfOp* ifop = length->as_IfOp(); + if (ifop != NULL) { + // look for expressions like min(v, a.length) which ends up as + // x > y ? y : x or x >= y ? y : x + if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) && + ifop->x() == ifop->fval() && + ifop->y() == ifop->tval()) { + length_limit = ifop->y(); + } + } + + // try to skip null checks and range checks + NewArray* src_array = src->as_NewArray(); + if (src_array != NULL) { flags &= ~LIR_OpArrayCopy::src_null_check; - if (dst->as_NewArray() != NULL) + if (length_limit != NULL && + src_array->length() == length_limit && + is_constant_zero(src_pos)) { + flags &= ~LIR_OpArrayCopy::src_range_check; + } + } + + NewArray* dst_array = dst->as_NewArray(); + if (dst_array != NULL) { flags &= ~LIR_OpArrayCopy::dst_null_check; + if (length_limit != NULL && + dst_array->length() == length_limit && + is_constant_zero(dst_pos)) { + flags &= ~LIR_OpArrayCopy::dst_range_check; + } + } // check from incoming constant values if (positive_constant(src_pos)) @@ -788,6 +871,28 @@ } } + IntConstant* src_int = src_pos->type()->as_IntConstant(); + IntConstant* dst_int = dst_pos->type()->as_IntConstant(); + if (src_int && dst_int) { + int s_offs = src_int->value(); + int d_offs = dst_int->value(); + if (src_int->value() >= dst_int->value()) { + flags &= ~LIR_OpArrayCopy::overlapping; + } + if (expected_type != NULL) { + BasicType t = expected_type->element_type()->basic_type(); + int element_size = type2aelembytes(t); + if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) && + ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) { + flags &= ~LIR_OpArrayCopy::unaligned; + } + } + } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) { + // src and dest positions are the same, or dst is zero so assume + // nonoverlapping copy. + flags &= ~LIR_OpArrayCopy::overlapping; + } + if (src == dst) { // moving within a single array so no type checks are needed if (flags & LIR_OpArrayCopy::type_check) { @@ -1351,7 +1456,7 @@ if (addr->is_address()) { LIR_Address* address = addr->as_address_ptr(); - LIR_Opr ptr = new_register(T_OBJECT); + LIR_Opr ptr = new_pointer_register(); if (!address->index()->is_valid() && address->disp() == 0) { __ move(address->base(), ptr); } else { @@ -1403,7 +1508,9 @@ LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base); if (addr->is_address()) { LIR_Address* address = addr->as_address_ptr(); - LIR_Opr ptr = new_register(T_OBJECT); + // ptr cannot be an object because we use this barrier for array card marks + // and addr can point in the middle of an array. + LIR_Opr ptr = new_pointer_register(); if (!address->index()->is_valid() && address->disp() == 0) { __ move(address->base(), ptr); } else { @@ -1559,9 +1666,7 @@ (info ? new CodeEmitInfo(info) : NULL)); } - if (is_volatile) { - assert(!needs_patching && x->is_loaded(), - "how do we know it's volatile if it's not loaded"); + if (is_volatile && !needs_patching) { volatile_field_store(value.result(), address, info); } else { LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; @@ -1627,9 +1732,7 @@ address = generate_address(object.result(), x->offset(), field_type); } - if (is_volatile) { - assert(!needs_patching && x->is_loaded(), - "how do we know it's volatile if it's not loaded"); + if (is_volatile && !needs_patching) { volatile_field_load(address, reg, info); } else { LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; @@ -2516,7 +2619,7 @@ __ load(new LIR_Address(tmp, (int)call_site_offset, T_OBJECT), tmp); // Load target MethodHandle from CallSite object. - __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver); + __ load(new LIR_Address(tmp, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver); __ call_dynamic(target, receiver, result_register, SharedRuntime::get_resolve_opt_virtual_call_stub(),
--- a/src/share/vm/c1/c1_LinearScan.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_LinearScan.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -2703,7 +2703,7 @@ assert(_fpu_stack_allocator != NULL, "must be present"); opr = _fpu_stack_allocator->to_fpu_stack(opr); - assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)"); + assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrLo is used)"); #endif #ifdef SPARC assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)"); @@ -2715,7 +2715,12 @@ assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)"); #endif +#ifdef VM_LITTLE_ENDIAN + VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrLo()); +#else VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi()); +#endif + #ifdef _LP64 first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first)); second = &_int_0_scope_value;
--- a/src/share/vm/c1/c1_LinearScan.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_LinearScan.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_MacroAssembler.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_MacroAssembler.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_Optimizer.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_Optimizer.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -644,7 +644,7 @@ void NullCheckVisitor::do_InstanceOf (InstanceOf* x) {} void NullCheckVisitor::do_MonitorEnter (MonitorEnter* x) { nce()->handle_AccessMonitor(x); } void NullCheckVisitor::do_MonitorExit (MonitorExit* x) { nce()->handle_AccessMonitor(x); } -void NullCheckVisitor::do_Intrinsic (Intrinsic* x) { nce()->clear_last_explicit_null_check(); } +void NullCheckVisitor::do_Intrinsic (Intrinsic* x) { nce()->handle_Intrinsic(x); } void NullCheckVisitor::do_BlockBegin (BlockBegin* x) {} void NullCheckVisitor::do_Goto (Goto* x) {} void NullCheckVisitor::do_If (If* x) {} @@ -1023,6 +1023,12 @@ void NullCheckEliminator::handle_Intrinsic(Intrinsic* x) { if (!x->has_receiver()) { + if (x->id() == vmIntrinsics::_arraycopy) { + for (int i = 0; i < x->number_of_arguments(); i++) { + x->set_arg_needs_null_check(i, !set_contains(x->argument_at(i))); + } + } + // Be conservative clear_last_explicit_null_check(); return;
--- a/src/share/vm/c1/c1_Runtime1.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_Runtime1.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -103,7 +103,10 @@ int Runtime1::_generic_arraycopy_cnt = 0; int Runtime1::_primitive_arraycopy_cnt = 0; int Runtime1::_oop_arraycopy_cnt = 0; +int Runtime1::_generic_arraycopystub_cnt = 0; int Runtime1::_arraycopy_slowcase_cnt = 0; +int Runtime1::_arraycopy_checkcast_cnt = 0; +int Runtime1::_arraycopy_checkcast_attempt_cnt = 0; int Runtime1::_new_type_array_slowcase_cnt = 0; int Runtime1::_new_object_array_slowcase_cnt = 0; int Runtime1::_new_instance_slowcase_cnt = 0; @@ -119,6 +122,32 @@ int Runtime1::_throw_incompatible_class_change_error_count = 0; int Runtime1::_throw_array_store_exception_count = 0; int Runtime1::_throw_count = 0; + +static int _byte_arraycopy_cnt = 0; +static int _short_arraycopy_cnt = 0; +static int _int_arraycopy_cnt = 0; +static int _long_arraycopy_cnt = 0; +static int _oop_arraycopy_cnt = 0; + +address Runtime1::arraycopy_count_address(BasicType type) { + switch (type) { + case T_BOOLEAN: + case T_BYTE: return (address)&_byte_arraycopy_cnt; + case T_CHAR: + case T_SHORT: return (address)&_short_arraycopy_cnt; + case T_FLOAT: + case T_INT: return (address)&_int_arraycopy_cnt; + case T_DOUBLE: + case T_LONG: return (address)&_long_arraycopy_cnt; + case T_ARRAY: + case T_OBJECT: return (address)&_oop_arraycopy_cnt; + default: + ShouldNotReachHere(); + return NULL; + } +} + + #endif // Simple helper to see if the caller of a runtime stub which @@ -433,10 +462,9 @@ // been deoptimized. If that is the case we return the deopt blob // unpack_with_exception entry instead. This makes life for the exception blob easier // because making that same check and diverting is painful from assembly language. -// - - JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm)) + // Reset method handle flag. + thread->set_is_method_handle_return(false); Handle exception(thread, ex); if (UseC1X && exception.is_null()) { @@ -490,11 +518,12 @@ return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); } - // ExceptionCache is used only for exceptions at call and not for implicit exceptions + // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions if (guard_pages_enabled) { address fast_continuation = nm->handler_for_exception_and_pc(exception, pc); if (fast_continuation != NULL) { - if (fast_continuation == ExceptionCache::unwind_handler()) fast_continuation = NULL; + // Set flag if return address is a method handle call site. + thread->set_is_method_handle_return(nm->is_method_handle_return(pc)); return fast_continuation; } } @@ -532,14 +561,14 @@ thread->set_exception_pc(pc); // the exception cache is used only by non-implicit exceptions - if (continuation == NULL) { - nm->add_handler_for_exception_and_pc(exception, pc, ExceptionCache::unwind_handler()); - } else { + if (continuation != NULL) { nm->add_handler_for_exception_and_pc(exception, pc, continuation); } } thread->set_vm_result(exception()); + // Set flag if return address is a method handle call site. + thread->set_is_method_handle_return(nm->is_method_handle_return(pc)); if (TraceExceptions) { ttyLocker ttyl; @@ -552,20 +581,19 @@ JRT_END // Enter this method from compiled code only if there is a Java exception handler -// in the method handling the exception +// in the method handling the exception. // We are entering here from exception stub. We don't do a normal VM transition here. // We do it in a helper. This is so we can check to see if the nmethod we have just // searched for an exception handler has been deoptimized in the meantime. -address Runtime1::exception_handler_for_pc(JavaThread* thread) { +address Runtime1::exception_handler_for_pc(JavaThread* thread) { oop exception = thread->exception_oop(); address pc = thread->exception_pc(); // Still in Java mode - debug_only(ResetNoHandleMark rnhm); + DEBUG_ONLY(ResetNoHandleMark rnhm); nmethod* nm = NULL; address continuation = NULL; { // Enter VM mode by calling the helper - ResetNoHandleMark rnhm; continuation = exception_handler_for_pc_helper(thread, exception, pc, nm); } @@ -573,11 +601,11 @@ // Now check to see if the nmethod we were called from is now deoptimized. // If so we must return to the deopt blob and deoptimize the nmethod - if (nm != NULL && caller_is_deopted()) { continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); } + assert(continuation != NULL, "no handler found"); return continuation; } @@ -849,7 +877,7 @@ { klassOop klass = resolve_field_return_klass(caller_method, bci, CHECK); // Save a reference to the class that has to be checked for initialization init_klass = KlassHandle(THREAD, klass); - k = klass; + k = klass->java_mirror(); } break; case Bytecodes::_new: @@ -1272,9 +1300,17 @@ tty->print_cr(" _handle_wrong_method_cnt: %d", SharedRuntime::_wrong_method_ctr); tty->print_cr(" _ic_miss_cnt: %d", SharedRuntime::_ic_miss_ctr); tty->print_cr(" _generic_arraycopy_cnt: %d", _generic_arraycopy_cnt); + tty->print_cr(" _generic_arraycopystub_cnt: %d", _generic_arraycopystub_cnt); + tty->print_cr(" _byte_arraycopy_cnt: %d", _byte_arraycopy_cnt); + tty->print_cr(" _short_arraycopy_cnt: %d", _short_arraycopy_cnt); + tty->print_cr(" _int_arraycopy_cnt: %d", _int_arraycopy_cnt); + tty->print_cr(" _long_arraycopy_cnt: %d", _long_arraycopy_cnt); tty->print_cr(" _primitive_arraycopy_cnt: %d", _primitive_arraycopy_cnt); - tty->print_cr(" _oop_arraycopy_cnt: %d", _oop_arraycopy_cnt); + tty->print_cr(" _oop_arraycopy_cnt (C): %d", Runtime1::_oop_arraycopy_cnt); + tty->print_cr(" _oop_arraycopy_cnt (stub): %d", _oop_arraycopy_cnt); tty->print_cr(" _arraycopy_slowcase_cnt: %d", _arraycopy_slowcase_cnt); + tty->print_cr(" _arraycopy_checkcast_cnt: %d", _arraycopy_checkcast_cnt); + tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt); tty->print_cr(" _new_type_array_slowcase_cnt: %d", _new_type_array_slowcase_cnt); tty->print_cr(" _new_object_array_slowcase_cnt: %d", _new_object_array_slowcase_cnt);
--- a/src/share/vm/c1/c1_Runtime1.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_Runtime1.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -54,6 +54,7 @@ stub(new_multi_array) \ stub(handle_exception_nofpu) /* optimized version that does not preserve fpu registers */ \ stub(handle_exception) \ + stub(handle_exception_from_callee) \ stub(throw_array_store_exception) \ stub(throw_class_cast_exception) \ stub(throw_incompatible_class_change_error) \ @@ -103,7 +104,10 @@ static int _generic_arraycopy_cnt; static int _primitive_arraycopy_cnt; static int _oop_arraycopy_cnt; + static int _generic_arraycopystub_cnt; static int _arraycopy_slowcase_cnt; + static int _arraycopy_checkcast_cnt; + static int _arraycopy_checkcast_attempt_cnt; static int _new_type_array_slowcase_cnt; static int _new_object_array_slowcase_cnt; static int _new_instance_slowcase_cnt; @@ -126,12 +130,12 @@ static const char* _blob_names[]; // stub generation - static void generate_blob_for(BufferBlob* blob, StubID id); - static OopMapSet* generate_code_for(StubID id, StubAssembler* masm); + static void generate_blob_for(BufferBlob* blob, StubID id); + static OopMapSet* generate_code_for(StubID id, StubAssembler* sasm); static OopMapSet* generate_exception_throw(StubAssembler* sasm, address target, bool has_argument); - static void generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool ignore_fpu_registers = false); - static void c1x_generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map); - static void generate_unwind_exception(StubAssembler *sasm); + static OopMapSet* generate_handle_exception(StubID id, StubAssembler* sasm); + static void generate_unwind_exception(StubAssembler *sasm); + static void c1x_generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map); static OopMapSet* generate_patching(StubAssembler* sasm, address target); static OopMapSet* generate_stub_call(StubAssembler* sasm, Register result, address entry, @@ -184,7 +188,8 @@ static void trace_block_entry(jint block_id); #ifndef PRODUCT - static address throw_count_address() { return (address)&_throw_count; } + static address throw_count_address() { return (address)&_throw_count; } + static address arraycopy_count_address(BasicType type); #endif // directly accessible leaf routine
--- a/src/share/vm/c1/c1_ValueMap.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_ValueMap.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -141,7 +141,8 @@ // visitor functions void do_StoreField (StoreField* x) { - if (!x->is_initialized()) { + if (x->is_init_point()) { + // putstatic is an initialization point so treat it as a wide kill kill_memory(); } else { kill_field(x->field()); @@ -159,7 +160,8 @@ void do_Local (Local* x) { /* nothing to do */ } void do_Constant (Constant* x) { /* nothing to do */ } void do_LoadField (LoadField* x) { - if (!x->is_initialized()) { + if (x->is_init_point()) { + // getstatic is an initialization point so treat it as a wide kill kill_memory(); } }
--- a/src/share/vm/c1/c1_globals.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/c1/c1_globals.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciCPCache.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciCPCache.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,8 +46,7 @@ // ciCPCache::is_f1_null_at bool ciCPCache::is_f1_null_at(int index) { VM_ENTRY_MARK; - oop f1 = entry_at(index)->f1(); - return (f1 == NULL); + return entry_at(index)->is_f1_null(); }
--- a/src/share/vm/ci/ciCallSite.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciCallSite.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ // Return the target MethodHandle of this CallSite. ciMethodHandle* ciCallSite::get_target() const { VM_ENTRY_MARK; - oop method_handle_oop = java_dyn_CallSite::target(get_oop()); + oop method_handle_oop = java_lang_invoke_CallSite::target(get_oop()); return CURRENT_ENV->get_object(method_handle_oop)->as_method_handle(); }
--- a/src/share/vm/ci/ciCallSite.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciCallSite.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ // ciCallSite // -// The class represents a java.dyn.CallSite object. +// The class represents a java.lang.invoke.CallSite object. class ciCallSite : public ciInstance { public: ciCallSite(instanceHandle h_i) : ciInstance(h_i) {}
--- a/src/share/vm/ci/ciClassList.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciClassList.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciEnv.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciEnv.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciEnv.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciEnv.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciField.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciField.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -177,7 +177,7 @@ // Never trust strangely unstable finals: System.out, etc. return false; // Even if general trusting is disabled, trust system-built closures in these packages. - if (holder->is_in_package("java/dyn") || holder->is_in_package("sun/dyn")) + if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke")) return true; return TrustFinalNonStaticFields; } @@ -191,8 +191,9 @@ // Check to see if the field is constant. if (_holder->is_initialized() && this->is_final()) { if (!this->is_static()) { - // A field can be constant if it's a final static field or if it's - // a final non-static field of a trusted class ({java,sun}.dyn). + // A field can be constant if it's a final static field or if + // it's a final non-static field of a trusted class (classes in + // java.lang.invoke and sun.invoke packages and subpackages). if (trust_final_non_static_fields(_holder)) { _is_constant = true; return; @@ -212,7 +213,7 @@ // may change. The three examples are java.lang.System.in, // java.lang.System.out, and java.lang.System.err. - Handle k = _holder->get_klassOop(); + KlassHandle k = _holder->get_klassOop(); assert( SystemDictionary::System_klass() != NULL, "Check once per vm"); if( k() == SystemDictionary::System_klass() ) { // Check offsets for case 2: System.in, System.out, or System.err @@ -224,36 +225,38 @@ } } + Handle mirror = k->java_mirror(); + _is_constant = true; switch(type()->basic_type()) { case T_BYTE: - _constant_value = ciConstant(type()->basic_type(), k->byte_field(_offset)); + _constant_value = ciConstant(type()->basic_type(), mirror->byte_field(_offset)); break; case T_CHAR: - _constant_value = ciConstant(type()->basic_type(), k->char_field(_offset)); + _constant_value = ciConstant(type()->basic_type(), mirror->char_field(_offset)); break; case T_SHORT: - _constant_value = ciConstant(type()->basic_type(), k->short_field(_offset)); + _constant_value = ciConstant(type()->basic_type(), mirror->short_field(_offset)); break; case T_BOOLEAN: - _constant_value = ciConstant(type()->basic_type(), k->bool_field(_offset)); + _constant_value = ciConstant(type()->basic_type(), mirror->bool_field(_offset)); break; case T_INT: - _constant_value = ciConstant(type()->basic_type(), k->int_field(_offset)); + _constant_value = ciConstant(type()->basic_type(), mirror->int_field(_offset)); break; case T_FLOAT: - _constant_value = ciConstant(k->float_field(_offset)); + _constant_value = ciConstant(mirror->float_field(_offset)); break; case T_DOUBLE: - _constant_value = ciConstant(k->double_field(_offset)); + _constant_value = ciConstant(mirror->double_field(_offset)); break; case T_LONG: - _constant_value = ciConstant(k->long_field(_offset)); + _constant_value = ciConstant(mirror->long_field(_offset)); break; case T_OBJECT: case T_ARRAY: { - oop o = k->obj_field(_offset); + oop o = mirror->obj_field(_offset); // A field will be "constant" if it is known always to be // a non-null reference to an instance of a particular class,
--- a/src/share/vm/ci/ciInstance.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciInstance.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,8 +66,8 @@ "invalid access"); VM_ENTRY_MARK; ciConstant result; - oop obj = get_oop(); - assert(obj != NULL, "bad oop"); + Handle obj = get_oop(); + assert(!obj.is_null(), "bad oop"); BasicType field_btype = field->type()->basic_type(); int offset = field->offset(); @@ -138,3 +138,9 @@ st->print(" type="); klass()->print(st); } + + +ciKlass* ciInstance::java_lang_Class_klass() { + VM_ENTRY_MARK; + return CURRENT_ENV->get_object(java_lang_Class::as_klassOop(get_oop()))->as_klass(); +}
--- a/src/share/vm/ci/ciInstance.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciInstance.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,6 +64,8 @@ // Constant value of a field at the specified offset. ciConstant field_value_by_offset(int field_offset); + + ciKlass* java_lang_Class_klass(); }; #endif // SHARE_VM_CI_CIINSTANCE_HPP
--- a/src/share/vm/ci/ciInstanceKlass.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciInstanceKlass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,7 +84,6 @@ if (h_k() != SystemDictionary::Object_klass()) { super(); } - java_mirror(); //compute_nonstatic_fields(); // done outside of constructor } @@ -319,6 +318,9 @@ // Get the instance of java.lang.Class corresponding to this klass. // Cache it on this->_java_mirror. ciInstance* ciInstanceKlass::java_mirror() { + if (is_shared()) { + return ciKlass::java_mirror(); + } if (_java_mirror == NULL) { _java_mirror = ciKlass::java_mirror(); }
--- a/src/share/vm/ci/ciKlass.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciKlass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciMethod.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciMethod.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -769,7 +769,7 @@ // signature-polymorphic MethodHandle methods, invokeExact or invokeGeneric. bool ciMethod::is_method_handle_invoke() const { if (!is_loaded()) { - bool flag = (holder()->name() == ciSymbol::java_dyn_MethodHandle() && + bool flag = (holder()->name() == ciSymbol::java_lang_invoke_MethodHandle() && methodOopDesc::is_method_handle_invoke_name(name()->sid())); return flag; }
--- a/src/share/vm/ci/ciMethodHandle.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciMethodHandle.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -42,9 +42,20 @@ methodHandle callee(_callee->get_methodOop()); // We catch all exceptions here that could happen in the method // handle compiler and stop the VM. - MethodHandleCompiler mhc(h, callee, is_invokedynamic, CATCH); - methodHandle m = mhc.compile(CATCH); - return CURRENT_ENV->get_object(m())->as_method(); + MethodHandleCompiler mhc(h, callee, is_invokedynamic, THREAD); + if (!HAS_PENDING_EXCEPTION) { + methodHandle m = mhc.compile(THREAD); + if (!HAS_PENDING_EXCEPTION) { + return CURRENT_ENV->get_object(m())->as_method(); + } + } + if (PrintMiscellaneous && (Verbose || WizardMode)) { + tty->print("*** ciMethodHandle::get_adapter => "); + PENDING_EXCEPTION->print(); + tty->print("*** get_adapter (%s): ", is_invokedynamic ? "indy" : "mh"); ((ciObject*)this)->print(); //@@ + } + CLEAR_PENDING_EXCEPTION; + return NULL; }
--- a/src/share/vm/ci/ciMethodHandle.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciMethodHandle.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ // ciMethodHandle // -// The class represents a java.dyn.MethodHandle object. +// The class represents a java.lang.invoke.MethodHandle object. class ciMethodHandle : public ciInstance { private: ciMethod* _callee;
--- a/src/share/vm/ci/ciObjArrayKlass.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciObjArrayKlass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciObject.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciObject.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciObjectFactory.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciObjectFactory.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -350,9 +350,9 @@ return new (arena()) ciMethodData(h_md); } else if (o->is_instance()) { instanceHandle h_i(THREAD, (instanceOop)o); - if (java_dyn_CallSite::is_instance(o)) + if (java_lang_invoke_CallSite::is_instance(o)) return new (arena()) ciCallSite(h_i); - else if (java_dyn_MethodHandle::is_instance(o)) + else if (java_lang_invoke_MethodHandle::is_instance(o)) return new (arena()) ciMethodHandle(h_i); else return new (arena()) ciInstance(h_i); @@ -671,7 +671,7 @@ if (key->is_perm() && _non_perm_count == 0) { return emptyBucket; } else if (key->is_instance()) { - if (key->klass() == SystemDictionary::Class_klass()) { + if (key->klass() == SystemDictionary::Class_klass() && JavaObjectsInPerm) { // class mirror instances are always perm return emptyBucket; }
--- a/src/share/vm/ci/ciObjectFactory.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciObjectFactory.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciSignature.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciSignature.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciSignature.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciSignature.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciStreams.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciStreams.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -380,7 +380,7 @@ bool ignore; // report as InvokeDynamic for invokedynamic, which is syntactically classless if (cur_bc() == Bytecodes::_invokedynamic) - return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_dyn_InvokeDynamic(), false); + return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_lang_invoke_InvokeDynamic(), false); return CURRENT_ENV->get_klass_by_index(cpool, get_method_holder_index(), ignore, _holder); }
--- a/src/share/vm/ci/ciSymbol.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciSymbol.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciSymbol.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciSymbol.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciTypeFlow.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciTypeFlow.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1871,7 +1871,8 @@ // ------------------------------------------------------------------ // ciTypeFlow::Block::print_on void ciTypeFlow::Block::print_on(outputStream* st) const { - if ((Verbose || WizardMode)) { + if ((Verbose || WizardMode) && (limit() >= 0)) { + // Don't print 'dummy' blocks (i.e. blocks with limit() '-1') outer()->method()->print_codes_on(start(), limit(), st); } st->print_cr(" ==================================================== ");
--- a/src/share/vm/ci/ciTypeFlow.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/ciTypeFlow.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -34,6 +34,7 @@ #include "ci/ciEnv.hpp" #include "ci/ciKlass.hpp" #include "ci/ciMethodBlocks.hpp" +#include "shark/shark_globals.hpp" #endif
--- a/src/share/vm/ci/compilerInterface.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/ci/compilerInterface.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/classFileError.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/classFileError.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/classFileParser.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/classFileParser.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ #include "memory/universe.inline.hpp" #include "oops/constantPoolOop.hpp" #include "oops/instanceKlass.hpp" +#include "oops/instanceMirrorKlass.hpp" #include "oops/klass.inline.hpp" #include "oops/klassOop.hpp" #include "oops/klassVtable.hpp" @@ -146,12 +147,14 @@ break; case JVM_CONSTANT_MethodHandle : case JVM_CONSTANT_MethodType : - if (!EnableMethodHandles || - _major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { + if (_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { classfile_parse_error( - (!EnableMethodHandles ? - "This JVM does not support constant tag %u in class file %s" : - "Class file version does not support constant tag %u in class file %s"), + "Class file version does not support constant tag %u in class file %s", + tag, CHECK); + } + if (!EnableInvokeDynamic) { + classfile_parse_error( + "This JVM does not support constant tag %u in class file %s", tag, CHECK); } if (tag == JVM_CONSTANT_MethodHandle) { @@ -167,28 +170,21 @@ ShouldNotReachHere(); } break; - case JVM_CONSTANT_InvokeDynamicTrans : // this tag appears only in old classfiles case JVM_CONSTANT_InvokeDynamic : { - if (!EnableInvokeDynamic || - _major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { + if (_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { classfile_parse_error( - (!EnableInvokeDynamic ? - "This JVM does not support constant tag %u in class file %s" : - "Class file version does not support constant tag %u in class file %s"), + "Class file version does not support constant tag %u in class file %s", + tag, CHECK); + } + if (!EnableInvokeDynamic) { + classfile_parse_error( + "This JVM does not support constant tag %u in class file %s", tag, CHECK); } cfs->guarantee_more(5, CHECK); // bsm_index, nt, tag/access_flags u2 bootstrap_specifier_index = cfs->get_u2_fast(); u2 name_and_type_index = cfs->get_u2_fast(); - if (tag == JVM_CONSTANT_InvokeDynamicTrans) { - if (!AllowTransitionalJSR292) - classfile_parse_error( - "This JVM does not support transitional InvokeDynamic tag %u in class file %s", - tag, CHECK); - cp->invoke_dynamic_trans_at_put(index, bootstrap_specifier_index, name_and_type_index); - break; - } if (_max_bootstrap_specifier_index < (int) bootstrap_specifier_index) _max_bootstrap_specifier_index = (int) bootstrap_specifier_index; // collect for later cp->invoke_dynamic_at_put(index, bootstrap_specifier_index, name_and_type_index); @@ -255,7 +251,7 @@ verify_legal_utf8((unsigned char*)utf8_buffer, utf8_length, CHECK); } - if (AnonymousClasses && has_cp_patch_at(index)) { + if (EnableInvokeDynamic && has_cp_patch_at(index)) { Handle patch = clear_cp_patch_at(index); guarantee_property(java_lang_String::is_instance(patch()), "Illegal utf8 patch at %d in class file %s", @@ -438,7 +434,7 @@ int ref_index = cp->method_handle_index_at(index); check_property( valid_cp_range(ref_index, length) && - EnableMethodHandles, + EnableInvokeDynamic, "Invalid constant pool index %u in class file %s", ref_index, CHECK_(nullHandle)); constantTag tag = cp->tag_at(ref_index); @@ -482,12 +478,11 @@ check_property( valid_cp_range(ref_index, length) && cp->tag_at(ref_index).is_utf8() && - EnableMethodHandles, + EnableInvokeDynamic, "Invalid constant pool index %u in class file %s", ref_index, CHECK_(nullHandle)); } break; - case JVM_CONSTANT_InvokeDynamicTrans : case JVM_CONSTANT_InvokeDynamic : { int name_and_type_ref_index = cp->invoke_dynamic_name_and_type_ref_index_at(index); @@ -496,14 +491,6 @@ "Invalid constant pool index %u in class file %s", name_and_type_ref_index, CHECK_(nullHandle)); - if (tag == JVM_CONSTANT_InvokeDynamicTrans) { - int bootstrap_method_ref_index = cp->invoke_dynamic_bootstrap_method_ref_index_at(index); - check_property(valid_cp_range(bootstrap_method_ref_index, length) && - cp->tag_at(bootstrap_method_ref_index).is_method_handle(), - "Invalid constant pool index %u in class file %s", - bootstrap_method_ref_index, - CHECK_(nullHandle)); - } // bootstrap specifier index must be checked later, when BootstrapMethods attr is available break; } @@ -517,7 +504,7 @@ if (_cp_patches != NULL) { // need to treat this_class specially... - assert(AnonymousClasses, ""); + assert(EnableInvokeDynamic, ""); int this_class_index; { cfs->guarantee_more(8, CHECK_(nullHandle)); // flags, this_class, super_class, infs_len @@ -573,6 +560,7 @@ } break; } + case JVM_CONSTANT_InvokeDynamic: case JVM_CONSTANT_Fieldref: case JVM_CONSTANT_Methodref: case JVM_CONSTANT_InterfaceMethodref: { @@ -672,7 +660,7 @@ void ClassFileParser::patch_constant_pool(constantPoolHandle cp, int index, Handle patch, TRAPS) { - assert(AnonymousClasses, ""); + assert(EnableInvokeDynamic, ""); BasicType patch_type = T_VOID; switch (cp->tag_at(index).value()) { @@ -1616,8 +1604,13 @@ AccessFlags access_flags; if (name == vmSymbols::class_initializer_name()) { - // We ignore the access flags for a class initializer. (JVM Spec. p. 116) - flags = JVM_ACC_STATIC; + // We ignore the other access flags for a valid class initializer. + // (JVM Spec 2nd ed., chapter 4.6) + if (_major_version < 51) { // backward compatibility + flags = JVM_ACC_STATIC; + } else if ((flags & JVM_ACC_STATIC) == JVM_ACC_STATIC) { + flags &= JVM_ACC_STATIC | JVM_ACC_STRICT; + } } else { verify_legal_method_modifiers(flags, is_interface, name, CHECK_(nullHandle)); } @@ -2093,7 +2086,7 @@ _has_vanilla_constructor = true; } - if (EnableMethodHandles && (m->is_method_handle_invoke() || + if (EnableInvokeDynamic && (m->is_method_handle_invoke() || m->is_method_handle_adapter())) { THROW_MSG_(vmSymbols::java_lang_VirtualMachineError(), "Method handle invokers must be defined internally to the VM", nullHandle); @@ -2597,54 +2590,6 @@ } -static void initialize_static_field(fieldDescriptor* fd, TRAPS) { - KlassHandle h_k (THREAD, fd->field_holder()); - assert(h_k.not_null() && fd->is_static(), "just checking"); - if (fd->has_initial_value()) { - BasicType t = fd->field_type(); - switch (t) { - case T_BYTE: - h_k()->byte_field_put(fd->offset(), fd->int_initial_value()); - break; - case T_BOOLEAN: - h_k()->bool_field_put(fd->offset(), fd->int_initial_value()); - break; - case T_CHAR: - h_k()->char_field_put(fd->offset(), fd->int_initial_value()); - break; - case T_SHORT: - h_k()->short_field_put(fd->offset(), fd->int_initial_value()); - break; - case T_INT: - h_k()->int_field_put(fd->offset(), fd->int_initial_value()); - break; - case T_FLOAT: - h_k()->float_field_put(fd->offset(), fd->float_initial_value()); - break; - case T_DOUBLE: - h_k()->double_field_put(fd->offset(), fd->double_initial_value()); - break; - case T_LONG: - h_k()->long_field_put(fd->offset(), fd->long_initial_value()); - break; - case T_OBJECT: - { - #ifdef ASSERT - TempNewSymbol sym = SymbolTable::new_symbol("Ljava/lang/String;", CHECK); - assert(fd->signature() == sym, "just checking"); - #endif - oop string = fd->string_initial_value(CHECK); - h_k()->obj_field_put(fd->offset(), string); - } - break; - default: - THROW_MSG(vmSymbols::java_lang_ClassFormatError(), - "Illegal ConstantValue attribute in class file"); - } - } -} - - void ClassFileParser::java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr, constantPoolHandle cp, FieldAllocationCount *fac_ptr, TRAPS) { // This code is for compatibility with earlier jdk's that do not @@ -2760,8 +2705,8 @@ } -void ClassFileParser::java_lang_Class_fix_pre(objArrayHandle* methods_ptr, - FieldAllocationCount *fac_ptr, TRAPS) { +void ClassFileParser::java_lang_Class_fix_pre(int* nonstatic_field_size, + FieldAllocationCount *fac_ptr) { // Add fake fields for java.lang.Class instances // // This is not particularly nice. We should consider adding a @@ -2778,10 +2723,13 @@ // versions because when the offsets are computed at bootstrap // time we don't know yet which version of the JDK we're running in. - // The values below are fake but will force two non-static oop fields and + // The values below are fake but will force three non-static oop fields and // a corresponding non-static oop map block to be allocated. const int extra = java_lang_Class::number_of_fake_oop_fields; fac_ptr->nonstatic_oop_count += extra; + + // Reserve some leading space for fake ints + *nonstatic_field_size += align_size_up(java_lang_Class::hc_number_of_fake_int_fields * BytesPerInt, heapOopSize) / heapOopSize; } @@ -2797,16 +2745,16 @@ // Force MethodHandle.vmentry to be an unmanaged pointer. // There is no way for a classfile to express this, so we must help it. -void ClassFileParser::java_dyn_MethodHandle_fix_pre(constantPoolHandle cp, +void ClassFileParser::java_lang_invoke_MethodHandle_fix_pre(constantPoolHandle cp, typeArrayHandle fields, FieldAllocationCount *fac_ptr, TRAPS) { - // Add fake fields for java.dyn.MethodHandle instances + // Add fake fields for java.lang.invoke.MethodHandle instances // // This is not particularly nice, but since there is no way to express // a native wordSize field in Java, we must do it at this level. - if (!EnableMethodHandles) return; + if (!EnableInvokeDynamic) return; int word_sig_index = 0; const int cp_size = cp->length(); @@ -2820,7 +2768,7 @@ if (word_sig_index == 0) THROW_MSG(vmSymbols::java_lang_VirtualMachineError(), - "missing I or J signature (for vmentry) in java.dyn.MethodHandle"); + "missing I or J signature (for vmentry) in java.lang.invoke.MethodHandle"); // Find vmentry field and change the signature. bool found_vmentry = false; @@ -2859,7 +2807,7 @@ if (!found_vmentry) THROW_MSG(vmSymbols::java_lang_VirtualMachineError(), - "missing vmentry byte field in java.dyn.MethodHandle"); + "missing vmentry byte field in java.lang.invoke.MethodHandle"); } @@ -3194,9 +3142,7 @@ int next_nonstatic_field_offset; // Calculate the starting byte offsets - next_static_oop_offset = (instanceKlass::header_size() + - align_object_offset(vtable_size) + - align_object_offset(itable_size)) * wordSize; + next_static_oop_offset = instanceMirrorKlass::offset_of_static_fields(); next_static_double_offset = next_static_oop_offset + (fac.static_oop_count * heapOopSize); if ( fac.static_double_count && @@ -3215,18 +3161,19 @@ fac.static_byte_count ), wordSize ); static_field_size = (next_static_type_offset - next_static_oop_offset) / wordSize; - first_nonstatic_field_offset = instanceOopDesc::base_offset_in_bytes() + - nonstatic_field_size * heapOopSize; - next_nonstatic_field_offset = first_nonstatic_field_offset; // Add fake fields for java.lang.Class instances (also see below) if (class_name == vmSymbols::java_lang_Class() && class_loader.is_null()) { - java_lang_Class_fix_pre(&methods, &fac, CHECK_(nullHandle)); + java_lang_Class_fix_pre(&nonstatic_field_size, &fac); } - // adjust the vmentry field declaration in java.dyn.MethodHandle - if (EnableMethodHandles && class_name == vmSymbols::sun_dyn_MethodHandleImpl() && class_loader.is_null()) { - java_dyn_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle)); + first_nonstatic_field_offset = instanceOopDesc::base_offset_in_bytes() + + nonstatic_field_size * heapOopSize; + next_nonstatic_field_offset = first_nonstatic_field_offset; + + // adjust the vmentry field declaration in java.lang.invoke.MethodHandle + if (EnableInvokeDynamic && class_name == vmSymbols::java_lang_invoke_MethodHandle() && class_loader.is_null()) { + java_lang_invoke_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle)); } // Add a fake "discovered" field if it is not present @@ -3546,7 +3493,7 @@ } // We can now create the basic klassOop for this klass - klassOop ik = oopFactory::new_instanceKlass(vtable_size, itable_size, + klassOop ik = oopFactory::new_instanceKlass(name, vtable_size, itable_size, static_field_size, total_oop_map_count, rt, CHECK_(nullHandle)); @@ -3568,7 +3515,7 @@ this_klass->set_class_loader(class_loader()); this_klass->set_nonstatic_field_size(nonstatic_field_size); this_klass->set_has_nonstatic_fields(has_nonstatic_fields); - this_klass->set_static_oop_field_size(fac.static_oop_count); + this_klass->set_static_oop_field_count(fac.static_oop_count); cp->set_pool_holder(this_klass()); error_handler.set_in_error(false); // turn off error handler for cp this_klass->set_constants(cp()); @@ -3629,9 +3576,6 @@ // Make sure this is the end of class file stream guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle)); - // Initialize static fields - this_klass->do_local_static_fields(&initialize_static_field, CHECK_(nullHandle)); - // VerifyOops believes that once this has been set, the object is completely loaded. // Compute transitive closure of interfaces this class implements this_klass->set_transitive_interfaces(transitive_interfaces()); @@ -3665,6 +3609,9 @@ check_illegal_static_method(this_klass, CHECK_(nullHandle)); } + // Allocate mirror and initialize static fields + java_lang_Class::create_mirror(this_klass, CHECK_(nullHandle)); + ClassLoadingService::notify_class_loaded(instanceKlass::cast(this_klass()), false /* not shared class */);
--- a/src/share/vm/classfile/classFileParser.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/classFileParser.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -154,17 +154,18 @@ // Add the "discovered" field to java.lang.ref.Reference if // it does not exist. void java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_ptr, - constantPoolHandle cp, FieldAllocationCount *fac_ptr, TRAPS); + constantPoolHandle cp, + FieldAllocationCount *fac_ptr, TRAPS); // Adjust the field allocation counts for java.lang.Class to add // fake fields. - void java_lang_Class_fix_pre(objArrayHandle* methods_ptr, - FieldAllocationCount *fac_ptr, TRAPS); + void java_lang_Class_fix_pre(int* nonstatic_field_size, + FieldAllocationCount *fac_ptr); // Adjust the next_nonstatic_oop_offset to place the fake fields // before any Java fields. void java_lang_Class_fix_post(int* next_nonstatic_oop_offset); - // Adjust the field allocation counts for java.dyn.MethodHandle to add + // Adjust the field allocation counts for java.lang.invoke.MethodHandle to add // a fake address (void*) field. - void java_dyn_MethodHandle_fix_pre(constantPoolHandle cp, + void java_lang_invoke_MethodHandle_fix_pre(constantPoolHandle cp, typeArrayHandle fields, FieldAllocationCount *fac_ptr, TRAPS); @@ -230,11 +231,11 @@ char* skip_over_field_signature(char* signature, bool void_ok, unsigned int length, TRAPS); bool is_anonymous() { - assert(AnonymousClasses || _host_klass.is_null(), ""); + assert(EnableInvokeDynamic || _host_klass.is_null(), ""); return _host_klass.not_null(); } bool has_cp_patch_at(int index) { - assert(AnonymousClasses, ""); + assert(EnableInvokeDynamic, ""); assert(index >= 0, "oob"); return (_cp_patches != NULL && index < _cp_patches->length() @@ -257,7 +258,7 @@ // constant pool construction, but in later versions they can. // %%% Let's phase out the old is_klass_reference. bool is_klass_reference(constantPoolHandle cp, int index) { - return ((LinkWellKnownClasses || AnonymousClasses) + return ((LinkWellKnownClasses || EnableInvokeDynamic) ? cp->tag_at(index).is_klass_or_reference() : cp->tag_at(index).is_klass_reference()); }
--- a/src/share/vm/classfile/classFileStream.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/classFileStream.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/classLoader.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/classLoader.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1332,7 +1332,7 @@ } if (_compile_the_world_counter >= CompileTheWorldStartAt) { - if (k.is_null() || (exception_occurred && !CompileTheWorldIgnoreInitErrors)) { + if (k.is_null() || exception_occurred) { // If something went wrong (e.g. ExceptionInInitializerError) we skip this class tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_counter, buffer); } else {
--- a/src/share/vm/classfile/classLoader.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/classLoader.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/dictionary.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/dictionary.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/dictionary.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/dictionary.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/javaAssertions.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/javaAssertions.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/javaClasses.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/javaClasses.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,6 +33,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/instanceKlass.hpp" +#include "oops/instanceMirrorKlass.hpp" #include "oops/klass.hpp" #include "oops/klassOop.hpp" #include "oops/methodOop.hpp" @@ -139,7 +140,7 @@ } Handle java_lang_String::create_tenured_from_unicode(jchar* unicode, int length, TRAPS) { - return basic_create_from_unicode(unicode, length, true, CHECK_NH); + return basic_create_from_unicode(unicode, length, JavaObjectsInPerm, CHECK_NH); } oop java_lang_String::create_oop_from_unicode(jchar* unicode, int length, TRAPS) { @@ -278,6 +279,15 @@ return result; } +unsigned int java_lang_String::hash_string(oop java_string) { + typeArrayOop value = java_lang_String::value(java_string); + int offset = java_lang_String::offset(java_string); + int length = java_lang_String::length(java_string); + + if (length == 0) return 0; + return hash_string(value->char_at_addr(offset), length); +} + Symbol* java_lang_String::as_symbol(Handle java_string, TRAPS) { oop obj = java_string(); typeArrayOop value = java_lang_String::value(obj); @@ -369,6 +379,75 @@ } } +static void initialize_static_field(fieldDescriptor* fd, TRAPS) { + Handle mirror (THREAD, fd->field_holder()->java_mirror()); + assert(mirror.not_null() && fd->is_static(), "just checking"); + if (fd->has_initial_value()) { + BasicType t = fd->field_type(); + switch (t) { + case T_BYTE: + mirror()->byte_field_put(fd->offset(), fd->int_initial_value()); + break; + case T_BOOLEAN: + mirror()->bool_field_put(fd->offset(), fd->int_initial_value()); + break; + case T_CHAR: + mirror()->char_field_put(fd->offset(), fd->int_initial_value()); + break; + case T_SHORT: + mirror()->short_field_put(fd->offset(), fd->int_initial_value()); + break; + case T_INT: + mirror()->int_field_put(fd->offset(), fd->int_initial_value()); + break; + case T_FLOAT: + mirror()->float_field_put(fd->offset(), fd->float_initial_value()); + break; + case T_DOUBLE: + mirror()->double_field_put(fd->offset(), fd->double_initial_value()); + break; + case T_LONG: + mirror()->long_field_put(fd->offset(), fd->long_initial_value()); + break; + case T_OBJECT: + { + #ifdef ASSERT + TempNewSymbol sym = SymbolTable::new_symbol("Ljava/lang/String;", CHECK); + assert(fd->signature() == sym, "just checking"); + #endif + oop string = fd->string_initial_value(CHECK); + mirror()->obj_field_put(fd->offset(), string); + } + break; + default: + THROW_MSG(vmSymbols::java_lang_ClassFormatError(), + "Illegal ConstantValue attribute in class file"); + } + } +} + + +// During bootstrap, java.lang.Class wasn't loaded so static field +// offsets were computed without the size added it. Go back and +// update all the static field offsets to included the size. +static void fixup_static_field(fieldDescriptor* fd, TRAPS) { + if (fd->is_static()) { + int real_offset = fd->offset() + instanceMirrorKlass::offset_of_static_fields(); + typeArrayOop fields = instanceKlass::cast(fd->field_holder())->fields(); + fields->short_at_put(fd->index() + instanceKlass::low_offset, extract_low_short_from_int(real_offset)); + fields->short_at_put(fd->index() + instanceKlass::high_offset, extract_high_short_from_int(real_offset)); + } +} + +void java_lang_Class::fixup_mirror(KlassHandle k, TRAPS) { + assert(instanceMirrorKlass::offset_of_static_fields() != 0, "must have been computed already"); + + if (k->oop_is_instance()) { + // Fixup the offsets + instanceKlass::cast(k())->do_local_static_fields(&fixup_static_field, CHECK); + } + create_mirror(k, CHECK); +} oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) { assert(k->java_mirror() == NULL, "should only assign mirror once"); @@ -378,12 +457,17 @@ // class is put into the system dictionary. int computed_modifiers = k->compute_modifier_flags(CHECK_0); k->set_modifier_flags(computed_modifiers); - if (SystemDictionary::Class_klass_loaded()) { + if (SystemDictionary::Class_klass_loaded() && (k->oop_is_instance() || k->oop_is_javaArray())) { // Allocate mirror (java.lang.Class instance) - Handle mirror = instanceKlass::cast(SystemDictionary::Class_klass())->allocate_permanent_instance(CHECK_0); + Handle mirror = instanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance(k, CHECK_0); // Setup indirections mirror->obj_field_put(klass_offset, k()); k->set_java_mirror(mirror()); + + instanceMirrorKlass* mk = instanceMirrorKlass::cast(mirror->klass()); + java_lang_Class::set_oop_size(mirror(), mk->instance_size(k)); + java_lang_Class::set_static_oop_field_count(mirror(), mk->compute_static_oop_field_count(mirror())); + // It might also have a component mirror. This mirror must already exist. if (k->oop_is_javaArray()) { Handle comp_mirror; @@ -406,6 +490,9 @@ arrayKlass::cast(k->as_klassOop())->set_component_mirror(comp_mirror()); set_array_klass(comp_mirror(), k->as_klassOop()); } + } else if (k->oop_is_instance()) { + // Initialize static fields + instanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL); } return mirror(); } else { @@ -414,21 +501,46 @@ } + +int java_lang_Class::oop_size(oop java_class) { + assert(oop_size_offset != 0, "must be set"); + return java_class->int_field(oop_size_offset); +} +void java_lang_Class::set_oop_size(oop java_class, int size) { + assert(oop_size_offset != 0, "must be set"); + java_class->int_field_put(oop_size_offset, size); +} +int java_lang_Class::static_oop_field_count(oop java_class) { + assert(static_oop_field_count_offset != 0, "must be set"); + return java_class->int_field(static_oop_field_count_offset); +} +void java_lang_Class::set_static_oop_field_count(oop java_class, int size) { + assert(static_oop_field_count_offset != 0, "must be set"); + java_class->int_field_put(static_oop_field_count_offset, size); +} + + + + oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) { // This should be improved by adding a field at the Java level or by // introducing a new VM klass (see comment in ClassFileParser) - oop java_class = instanceKlass::cast(SystemDictionary::Class_klass())->allocate_permanent_instance(CHECK_0); + oop java_class = instanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance((oop)NULL, CHECK_0); if (type != T_VOID) { klassOop aklass = Universe::typeArrayKlassObj(type); assert(aklass != NULL, "correct bootstrap"); set_array_klass(java_class, aklass); } + instanceMirrorKlass* mk = instanceMirrorKlass::cast(SystemDictionary::Class_klass()); + java_lang_Class::set_oop_size(java_class, mk->instance_size(oop(NULL))); + java_lang_Class::set_static_oop_field_count(java_class, 0); return java_class; } klassOop java_lang_Class::as_klassOop(oop java_class) { //%note memory_2 + assert(java_lang_Class::is_instance(java_class), "must be a Class object"); klassOop k = klassOop(java_class->obj_field(klass_offset)); assert(k == NULL || k->is_klass(), "type check"); return k; @@ -1319,32 +1431,41 @@ } } #ifdef ASSERT - assert(st_method() == method && st.bci() == bci, - "Wrong stack trace"); - st.next(); - // vframeStream::method isn't GC-safe so store off a copy - // of the methodOop in case we GC. - if (!st.at_end()) { - st_method = st.method(); - } + assert(st_method() == method && st.bci() == bci, + "Wrong stack trace"); + st.next(); + // vframeStream::method isn't GC-safe so store off a copy + // of the methodOop in case we GC. + if (!st.at_end()) { + st_method = st.method(); + } #endif + + // the format of the stacktrace will be: + // - 1 or more fillInStackTrace frames for the exception class (skipped) + // - 0 or more <init> methods for the exception class (skipped) + // - rest of the stack + if (!skip_fillInStackTrace_check) { - // check "fillInStackTrace" only once, so we negate the flag - // after the first time check. - skip_fillInStackTrace_check = true; - if (method->name() == vmSymbols::fillInStackTrace_name()) { + if ((method->name() == vmSymbols::fillInStackTrace_name() || + method->name() == vmSymbols::fillInStackTrace0_name()) && + throwable->is_a(method->method_holder())) { continue; } + else { + skip_fillInStackTrace_check = true; // gone past them all + } } - // skip <init> methods of the exceptions klass. If there is <init> methods - // that belongs to a superclass of the exception we are going to skipping - // them in stack trace. This is simlar to classic VM. if (!skip_throwableInit_check) { + assert(skip_fillInStackTrace_check, "logic error in backtrace filtering"); + + // skip <init> methods of the exception class and superclasses + // This is simlar to classic VM. if (method->name() == vmSymbols::object_initializer_name() && throwable->is_a(method->method_holder())) { continue; } else { - // if no "Throwable.init()" method found, we stop checking it next time. + // there are none or we've seen them all - either way stop checking skip_throwableInit_check = true; } } @@ -2130,7 +2251,7 @@ // Support for java_lang_ref_Reference oop java_lang_ref_Reference::pending_list_lock() { instanceKlass* ik = instanceKlass::cast(SystemDictionary::Reference_klass()); - char *addr = (((char *)ik->start_of_static_fields()) + static_lock_offset); + address addr = ik->static_field_addr(static_lock_offset); if (UseCompressedOops) { return oopDesc::load_decode_heap_oop((narrowOop *)addr); } else { @@ -2140,7 +2261,7 @@ HeapWord *java_lang_ref_Reference::pending_list_addr() { instanceKlass* ik = instanceKlass::cast(SystemDictionary::Reference_klass()); - char *addr = (((char *)ik->start_of_static_fields()) + static_pending_offset); + address addr = ik->static_field_addr(static_pending_offset); // XXX This might not be HeapWord aligned, almost rather be char *. return (HeapWord*)addr; } @@ -2163,56 +2284,55 @@ jlong java_lang_ref_SoftReference::clock() { instanceKlass* ik = instanceKlass::cast(SystemDictionary::SoftReference_klass()); - int offset = ik->offset_of_static_fields() + static_clock_offset; - - return SystemDictionary::SoftReference_klass()->long_field(offset); + jlong* offset = (jlong*)ik->static_field_addr(static_clock_offset); + return *offset; } void java_lang_ref_SoftReference::set_clock(jlong value) { instanceKlass* ik = instanceKlass::cast(SystemDictionary::SoftReference_klass()); - int offset = ik->offset_of_static_fields() + static_clock_offset; - - SystemDictionary::SoftReference_klass()->long_field_put(offset, value); + jlong* offset = (jlong*)ik->static_field_addr(static_clock_offset); + *offset = value; } -// Support for java_dyn_MethodHandle - -int java_dyn_MethodHandle::_type_offset; -int java_dyn_MethodHandle::_vmtarget_offset; -int java_dyn_MethodHandle::_vmentry_offset; -int java_dyn_MethodHandle::_vmslots_offset; - -int sun_dyn_MemberName::_clazz_offset; -int sun_dyn_MemberName::_name_offset; -int sun_dyn_MemberName::_type_offset; -int sun_dyn_MemberName::_flags_offset; -int sun_dyn_MemberName::_vmtarget_offset; -int sun_dyn_MemberName::_vmindex_offset; - -int sun_dyn_DirectMethodHandle::_vmindex_offset; - -int sun_dyn_BoundMethodHandle::_argument_offset; -int sun_dyn_BoundMethodHandle::_vmargslot_offset; - -int sun_dyn_AdapterMethodHandle::_conversion_offset; - -void java_dyn_MethodHandle::compute_offsets() { +// Support for java_lang_invoke_MethodHandle + +int java_lang_invoke_MethodHandle::_type_offset; +int java_lang_invoke_MethodHandle::_vmtarget_offset; +int java_lang_invoke_MethodHandle::_vmentry_offset; +int java_lang_invoke_MethodHandle::_vmslots_offset; + +int java_lang_invoke_MemberName::_clazz_offset; +int java_lang_invoke_MemberName::_name_offset; +int java_lang_invoke_MemberName::_type_offset; +int java_lang_invoke_MemberName::_flags_offset; +int java_lang_invoke_MemberName::_vmtarget_offset; +int java_lang_invoke_MemberName::_vmindex_offset; + +int java_lang_invoke_DirectMethodHandle::_vmindex_offset; + +int java_lang_invoke_BoundMethodHandle::_argument_offset; +int java_lang_invoke_BoundMethodHandle::_vmargslot_offset; + +int java_lang_invoke_AdapterMethodHandle::_conversion_offset; + +void java_lang_invoke_MethodHandle::compute_offsets() { klassOop k = SystemDictionary::MethodHandle_klass(); - if (k != NULL && EnableMethodHandles) { - compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::java_dyn_MethodType_signature(), true); - compute_offset(_vmtarget_offset, k, vmSymbols::vmtarget_name(), vmSymbols::object_signature(), true); - compute_offset(_vmentry_offset, k, vmSymbols::vmentry_name(), vmSymbols::machine_word_signature(), true); + if (k != NULL && EnableInvokeDynamic) { + bool allow_super = false; + compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::java_lang_invoke_MethodType_signature(), allow_super); + compute_offset(_vmtarget_offset, k, vmSymbols::vmtarget_name(), vmSymbols::object_signature(), allow_super); + compute_offset(_vmentry_offset, k, vmSymbols::vmentry_name(), vmSymbols::machine_word_signature(), allow_super); // Note: MH.vmslots (if it is present) is a hoisted copy of MH.type.form.vmslots. // It is optional pending experiments to keep or toss. - compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), true); + compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), allow_super); } } -void sun_dyn_MemberName::compute_offsets() { +void java_lang_invoke_MemberName::compute_offsets() { klassOop k = SystemDictionary::MemberName_klass(); - if (k != NULL && EnableMethodHandles) { + if (k != NULL && EnableInvokeDynamic) { compute_offset(_clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature()); compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature()); compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::object_signature()); @@ -2222,37 +2342,37 @@ } } -void sun_dyn_DirectMethodHandle::compute_offsets() { +void java_lang_invoke_DirectMethodHandle::compute_offsets() { klassOop k = SystemDictionary::DirectMethodHandle_klass(); - if (k != NULL && EnableMethodHandles) { + if (k != NULL && EnableInvokeDynamic) { compute_offset(_vmindex_offset, k, vmSymbols::vmindex_name(), vmSymbols::int_signature(), true); } } -void sun_dyn_BoundMethodHandle::compute_offsets() { +void java_lang_invoke_BoundMethodHandle::compute_offsets() { klassOop k = SystemDictionary::BoundMethodHandle_klass(); - if (k != NULL && EnableMethodHandles) { + if (k != NULL && EnableInvokeDynamic) { compute_offset(_vmargslot_offset, k, vmSymbols::vmargslot_name(), vmSymbols::int_signature(), true); compute_offset(_argument_offset, k, vmSymbols::argument_name(), vmSymbols::object_signature(), true); } } -void sun_dyn_AdapterMethodHandle::compute_offsets() { +void java_lang_invoke_AdapterMethodHandle::compute_offsets() { klassOop k = SystemDictionary::AdapterMethodHandle_klass(); - if (k != NULL && EnableMethodHandles) { + if (k != NULL && EnableInvokeDynamic) { compute_offset(_conversion_offset, k, vmSymbols::conversion_name(), vmSymbols::int_signature(), true); } } -oop java_dyn_MethodHandle::type(oop mh) { +oop java_lang_invoke_MethodHandle::type(oop mh) { return mh->obj_field(_type_offset); } -void java_dyn_MethodHandle::set_type(oop mh, oop mtype) { +void java_lang_invoke_MethodHandle::set_type(oop mh, oop mtype) { mh->obj_field_put(_type_offset, mtype); } -int java_dyn_MethodHandle::vmslots(oop mh) { +int java_lang_invoke_MethodHandle::vmslots(oop mh) { int vmslots_offset = _vmslots_offset; if (vmslots_offset != 0) { #ifdef ASSERT @@ -2267,7 +2387,7 @@ } // if MH.vmslots exists, hoist into it the value of type.form.vmslots -void java_dyn_MethodHandle::init_vmslots(oop mh) { +void java_lang_invoke_MethodHandle::init_vmslots(oop mh) { int vmslots_offset = _vmslots_offset; if (vmslots_offset != 0) { mh->int_field_put(vmslots_offset, compute_vmslots(mh)); @@ -2276,20 +2396,20 @@ // fetch type.form.vmslots, which is the number of JVM stack slots // required to carry the arguments of this MH -int java_dyn_MethodHandle::compute_vmslots(oop mh) { +int java_lang_invoke_MethodHandle::compute_vmslots(oop mh) { oop mtype = type(mh); if (mtype == NULL) return 0; // Java code would get NPE - oop form = java_dyn_MethodType::form(mtype); + oop form = java_lang_invoke_MethodType::form(mtype); if (form == NULL) return 0; // Java code would get NPE - return java_dyn_MethodTypeForm::vmslots(form); + return java_lang_invoke_MethodTypeForm::vmslots(form); } // fetch the low-level entry point for this mh -MethodHandleEntry* java_dyn_MethodHandle::vmentry(oop mh) { +MethodHandleEntry* java_lang_invoke_MethodHandle::vmentry(oop mh) { return (MethodHandleEntry*) mh->address_field(_vmentry_offset); } -void java_dyn_MethodHandle::set_vmentry(oop mh, MethodHandleEntry* me) { +void java_lang_invoke_MethodHandle::set_vmentry(oop mh, MethodHandleEntry* me) { assert(_vmentry_offset != 0, "must be present"); // This is always the final step that initializes a valid method handle: @@ -2303,123 +2423,123 @@ /// MemberName accessors -oop sun_dyn_MemberName::clazz(oop mname) { +oop java_lang_invoke_MemberName::clazz(oop mname) { assert(is_instance(mname), "wrong type"); return mname->obj_field(_clazz_offset); } -void sun_dyn_MemberName::set_clazz(oop mname, oop clazz) { +void java_lang_invoke_MemberName::set_clazz(oop mname, oop clazz) { assert(is_instance(mname), "wrong type"); mname->obj_field_put(_clazz_offset, clazz); } -oop sun_dyn_MemberName::name(oop mname) { +oop java_lang_invoke_MemberName::name(oop mname) { assert(is_instance(mname), "wrong type"); return mname->obj_field(_name_offset); } -void sun_dyn_MemberName::set_name(oop mname, oop name) { +void java_lang_invoke_MemberName::set_name(oop mname, oop name) { assert(is_instance(mname), "wrong type"); mname->obj_field_put(_name_offset, name); } -oop sun_dyn_MemberName::type(oop mname) { +oop java_lang_invoke_MemberName::type(oop mname) { assert(is_instance(mname), "wrong type"); return mname->obj_field(_type_offset); } -void sun_dyn_MemberName::set_type(oop mname, oop type) { +void java_lang_invoke_MemberName::set_type(oop mname, oop type) { assert(is_instance(mname), "wrong type"); mname->obj_field_put(_type_offset, type); } -int sun_dyn_MemberName::flags(oop mname) { +int java_lang_invoke_MemberName::flags(oop mname) { assert(is_instance(mname), "wrong type"); return mname->int_field(_flags_offset); } -void sun_dyn_MemberName::set_flags(oop mname, int flags) { +void java_lang_invoke_MemberName::set_flags(oop mname, int flags) { assert(is_instance(mname), "wrong type"); mname->int_field_put(_flags_offset, flags); } -oop sun_dyn_MemberName::vmtarget(oop mname) { +oop java_lang_invoke_MemberName::vmtarget(oop mname) { assert(is_instance(mname), "wrong type"); return mname->obj_field(_vmtarget_offset); } -void sun_dyn_MemberName::set_vmtarget(oop mname, oop ref) { +void java_lang_invoke_MemberName::set_vmtarget(oop mname, oop ref) { assert(is_instance(mname), "wrong type"); mname->obj_field_put(_vmtarget_offset, ref); } -int sun_dyn_MemberName::vmindex(oop mname) { +int java_lang_invoke_MemberName::vmindex(oop mname) { assert(is_instance(mname), "wrong type"); return mname->int_field(_vmindex_offset); } -void sun_dyn_MemberName::set_vmindex(oop mname, int index) { +void java_lang_invoke_MemberName::set_vmindex(oop mname, int index) { assert(is_instance(mname), "wrong type"); mname->int_field_put(_vmindex_offset, index); } -oop java_dyn_MethodHandle::vmtarget(oop mh) { +oop java_lang_invoke_MethodHandle::vmtarget(oop mh) { assert(is_instance(mh), "MH only"); return mh->obj_field(_vmtarget_offset); } -void java_dyn_MethodHandle::set_vmtarget(oop mh, oop ref) { +void java_lang_invoke_MethodHandle::set_vmtarget(oop mh, oop ref) { assert(is_instance(mh), "MH only"); mh->obj_field_put(_vmtarget_offset, ref); } -int sun_dyn_DirectMethodHandle::vmindex(oop mh) { +int java_lang_invoke_DirectMethodHandle::vmindex(oop mh) { assert(is_instance(mh), "DMH only"); return mh->int_field(_vmindex_offset); } -void sun_dyn_DirectMethodHandle::set_vmindex(oop mh, int index) { +void java_lang_invoke_DirectMethodHandle::set_vmindex(oop mh, int index) { assert(is_instance(mh), "DMH only"); mh->int_field_put(_vmindex_offset, index); } -int sun_dyn_BoundMethodHandle::vmargslot(oop mh) { +int java_lang_invoke_BoundMethodHandle::vmargslot(oop mh) { assert(is_instance(mh), "BMH only"); return mh->int_field(_vmargslot_offset); } -oop sun_dyn_BoundMethodHandle::argument(oop mh) { +oop java_lang_invoke_BoundMethodHandle::argument(oop mh) { assert(is_instance(mh), "BMH only"); return mh->obj_field(_argument_offset); } -int sun_dyn_AdapterMethodHandle::conversion(oop mh) { +int java_lang_invoke_AdapterMethodHandle::conversion(oop mh) { assert(is_instance(mh), "AMH only"); return mh->int_field(_conversion_offset); } -void sun_dyn_AdapterMethodHandle::set_conversion(oop mh, int conv) { +void java_lang_invoke_AdapterMethodHandle::set_conversion(oop mh, int conv) { assert(is_instance(mh), "AMH only"); mh->int_field_put(_conversion_offset, conv); } -// Support for java_dyn_MethodType - -int java_dyn_MethodType::_rtype_offset; -int java_dyn_MethodType::_ptypes_offset; -int java_dyn_MethodType::_form_offset; - -void java_dyn_MethodType::compute_offsets() { +// Support for java_lang_invoke_MethodType + +int java_lang_invoke_MethodType::_rtype_offset; +int java_lang_invoke_MethodType::_ptypes_offset; +int java_lang_invoke_MethodType::_form_offset; + +void java_lang_invoke_MethodType::compute_offsets() { klassOop k = SystemDictionary::MethodType_klass(); if (k != NULL) { compute_offset(_rtype_offset, k, vmSymbols::rtype_name(), vmSymbols::class_signature()); compute_offset(_ptypes_offset, k, vmSymbols::ptypes_name(), vmSymbols::class_array_signature()); - compute_offset(_form_offset, k, vmSymbols::form_name(), vmSymbols::java_dyn_MethodTypeForm_signature()); + compute_offset(_form_offset, k, vmSymbols::form_name(), vmSymbols::java_lang_invoke_MethodTypeForm_signature()); } } -void java_dyn_MethodType::print_signature(oop mt, outputStream* st) { +void java_lang_invoke_MethodType::print_signature(oop mt, outputStream* st) { st->print("("); objArrayOop pts = ptypes(mt); for (int i = 0, limit = pts->length(); i < limit; i++) { @@ -2429,7 +2549,7 @@ java_lang_Class::print_signature(rtype(mt), st); } -Symbol* java_dyn_MethodType::as_signature(oop mt, bool intern_if_not_found, TRAPS) { +Symbol* java_lang_invoke_MethodType::as_signature(oop mt, bool intern_if_not_found, TRAPS) { ResourceMark rm; stringStream buffer(128); print_signature(mt, &buffer); @@ -2444,103 +2564,83 @@ return name; } -oop java_dyn_MethodType::rtype(oop mt) { +oop java_lang_invoke_MethodType::rtype(oop mt) { assert(is_instance(mt), "must be a MethodType"); return mt->obj_field(_rtype_offset); } -objArrayOop java_dyn_MethodType::ptypes(oop mt) { +objArrayOop java_lang_invoke_MethodType::ptypes(oop mt) { assert(is_instance(mt), "must be a MethodType"); return (objArrayOop) mt->obj_field(_ptypes_offset); } -oop java_dyn_MethodType::form(oop mt) { +oop java_lang_invoke_MethodType::form(oop mt) { assert(is_instance(mt), "must be a MethodType"); return mt->obj_field(_form_offset); } -oop java_dyn_MethodType::ptype(oop mt, int idx) { +oop java_lang_invoke_MethodType::ptype(oop mt, int idx) { return ptypes(mt)->obj_at(idx); } -int java_dyn_MethodType::ptype_count(oop mt) { +int java_lang_invoke_MethodType::ptype_count(oop mt) { return ptypes(mt)->length(); } -// Support for java_dyn_MethodTypeForm - -int java_dyn_MethodTypeForm::_vmslots_offset; -int java_dyn_MethodTypeForm::_erasedType_offset; -int java_dyn_MethodTypeForm::_genericInvoker_offset; - -void java_dyn_MethodTypeForm::compute_offsets() { +// Support for java_lang_invoke_MethodTypeForm + +int java_lang_invoke_MethodTypeForm::_vmslots_offset; +int java_lang_invoke_MethodTypeForm::_erasedType_offset; +int java_lang_invoke_MethodTypeForm::_genericInvoker_offset; + +void java_lang_invoke_MethodTypeForm::compute_offsets() { klassOop k = SystemDictionary::MethodTypeForm_klass(); if (k != NULL) { compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), true); - compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_dyn_MethodType_signature(), true); - compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_dyn_MethodHandle_signature(), true); + compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_lang_invoke_MethodType_signature(), true); + compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_lang_invoke_MethodHandle_signature(), true); if (_genericInvoker_offset == 0) _genericInvoker_offset = -1; // set to explicit "empty" value } } -int java_dyn_MethodTypeForm::vmslots(oop mtform) { +int java_lang_invoke_MethodTypeForm::vmslots(oop mtform) { assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); return mtform->int_field(_vmslots_offset); } -oop java_dyn_MethodTypeForm::erasedType(oop mtform) { +oop java_lang_invoke_MethodTypeForm::erasedType(oop mtform) { assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); return mtform->obj_field(_erasedType_offset); } -oop java_dyn_MethodTypeForm::genericInvoker(oop mtform) { +oop java_lang_invoke_MethodTypeForm::genericInvoker(oop mtform) { assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only"); return mtform->obj_field(_genericInvoker_offset); } -// Support for java_dyn_CallSite - -int java_dyn_CallSite::_target_offset; -int java_dyn_CallSite::_caller_method_offset; -int java_dyn_CallSite::_caller_bci_offset; - -void java_dyn_CallSite::compute_offsets() { +// Support for java_lang_invoke_CallSite + +int java_lang_invoke_CallSite::_target_offset; + +void java_lang_invoke_CallSite::compute_offsets() { if (!EnableInvokeDynamic) return; klassOop k = SystemDictionary::CallSite_klass(); if (k != NULL) { - compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature()); - compute_offset(_caller_method_offset, k, vmSymbols::vmmethod_name(), vmSymbols::sun_dyn_MemberName_signature()); - compute_offset(_caller_bci_offset, k, vmSymbols::vmindex_name(), vmSymbols::int_signature()); + compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_lang_invoke_MethodHandle_signature()); } } -oop java_dyn_CallSite::target(oop site) { +oop java_lang_invoke_CallSite::target(oop site) { return site->obj_field(_target_offset); } -void java_dyn_CallSite::set_target(oop site, oop target) { +void java_lang_invoke_CallSite::set_target(oop site, oop target) { site->obj_field_put(_target_offset, target); } -oop java_dyn_CallSite::caller_method(oop site) { - return site->obj_field(_caller_method_offset); -} - -void java_dyn_CallSite::set_caller_method(oop site, oop ref) { - site->obj_field_put(_caller_method_offset, ref); -} - -jint java_dyn_CallSite::caller_bci(oop site) { - return site->int_field(_caller_bci_offset); -} - -void java_dyn_CallSite::set_caller_bci(oop site, jint bci) { - site->int_field_put(_caller_bci_offset, bci); -} - // Support for java_security_AccessControlContext @@ -2621,26 +2721,18 @@ // Support for java_lang_System - -void java_lang_System::compute_offsets() { - assert(offset_of_static_fields == 0, "offsets should be initialized only once"); - - instanceKlass* ik = instanceKlass::cast(SystemDictionary::System_klass()); - offset_of_static_fields = ik->offset_of_static_fields(); -} - int java_lang_System::in_offset_in_bytes() { - return (offset_of_static_fields + static_in_offset); + return (instanceMirrorKlass::offset_of_static_fields() + static_in_offset); } int java_lang_System::out_offset_in_bytes() { - return (offset_of_static_fields + static_out_offset); + return (instanceMirrorKlass::offset_of_static_fields() + static_out_offset); } int java_lang_System::err_offset_in_bytes() { - return (offset_of_static_fields + static_err_offset); + return (instanceMirrorKlass::offset_of_static_fields() + static_err_offset); } @@ -2653,6 +2745,8 @@ int java_lang_Class::array_klass_offset; int java_lang_Class::resolved_constructor_offset; int java_lang_Class::number_of_fake_oop_fields; +int java_lang_Class::oop_size_offset; +int java_lang_Class::static_oop_field_count_offset; int java_lang_Throwable::backtrace_offset; int java_lang_Throwable::detailMessage_offset; int java_lang_Throwable::cause_offset; @@ -2696,7 +2790,6 @@ int java_lang_ref_SoftReference::timestamp_offset; int java_lang_ref_SoftReference::static_clock_offset; int java_lang_ClassLoader::parent_offset; -int java_lang_System::offset_of_static_fields; int java_lang_System::static_in_offset; int java_lang_System::static_out_offset; int java_lang_System::static_err_offset; @@ -2813,10 +2906,19 @@ java_lang_String::count_offset = java_lang_String::offset_offset + sizeof (jint); java_lang_String::hash_offset = java_lang_String::count_offset + sizeof (jint); - // Do the Class Class - java_lang_Class::klass_offset = java_lang_Class::hc_klass_offset * x + header; - java_lang_Class::array_klass_offset = java_lang_Class::hc_array_klass_offset * x + header; - java_lang_Class::resolved_constructor_offset = java_lang_Class::hc_resolved_constructor_offset * x + header; + { + // Do the Class Class + int offset = header; + java_lang_Class::oop_size_offset = header; + offset += BytesPerInt; + java_lang_Class::static_oop_field_count_offset = offset; + offset = align_size_up(offset + BytesPerInt, x); + java_lang_Class::klass_offset = offset; + offset += x; + java_lang_Class::array_klass_offset = offset; + offset += x; + java_lang_Class::resolved_constructor_offset = offset; + } // This is NOT an offset java_lang_Class::number_of_fake_oop_fields = java_lang_Class::hc_number_of_fake_oop_fields; @@ -2873,20 +2975,17 @@ void JavaClasses::compute_offsets() { java_lang_Class::compute_offsets(); - java_lang_System::compute_offsets(); java_lang_Thread::compute_offsets(); java_lang_ThreadGroup::compute_offsets(); - if (EnableMethodHandles) { - java_dyn_MethodHandle::compute_offsets(); - sun_dyn_MemberName::compute_offsets(); - sun_dyn_DirectMethodHandle::compute_offsets(); - sun_dyn_BoundMethodHandle::compute_offsets(); - sun_dyn_AdapterMethodHandle::compute_offsets(); - java_dyn_MethodType::compute_offsets(); - java_dyn_MethodTypeForm::compute_offsets(); - } if (EnableInvokeDynamic) { - java_dyn_CallSite::compute_offsets(); + java_lang_invoke_MethodHandle::compute_offsets(); + java_lang_invoke_MemberName::compute_offsets(); + java_lang_invoke_DirectMethodHandle::compute_offsets(); + java_lang_invoke_BoundMethodHandle::compute_offsets(); + java_lang_invoke_AdapterMethodHandle::compute_offsets(); + java_lang_invoke_MethodType::compute_offsets(); + java_lang_invoke_MethodTypeForm::compute_offsets(); + java_lang_invoke_CallSite::compute_offsets(); } java_security_AccessControlContext::compute_offsets(); // Initialize reflection classes. The layouts of these classes @@ -2957,10 +3056,10 @@ tty->print_cr("Static field %s.%s appears to be nonstatic", klass_name, field_name); return false; } - if (fd.offset() == hardcoded_offset + h_klass->offset_of_static_fields()) { + if (fd.offset() == hardcoded_offset + instanceMirrorKlass::offset_of_static_fields()) { return true; } else { - tty->print_cr("Offset of static field %s.%s is hardcoded as %d but should really be %d.", klass_name, field_name, hardcoded_offset, fd.offset() - h_klass->offset_of_static_fields()); + tty->print_cr("Offset of static field %s.%s is hardcoded as %d but should really be %d.", klass_name, field_name, hardcoded_offset, fd.offset() - instanceMirrorKlass::offset_of_static_fields()); return false; } }
--- a/src/share/vm/classfile/javaClasses.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/javaClasses.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -109,6 +109,30 @@ static char* as_platform_dependent_str(Handle java_string, TRAPS); static jchar* as_unicode_string(oop java_string, int& length); + // Compute the hash value for a java.lang.String object which would + // contain the characters passed in. This hash value is used for at + // least two purposes. + // + // (a) As the hash value used by the StringTable for bucket selection + // and comparison (stored in the HashtableEntry structures). This + // is used in the String.intern() method. + // + // (b) As the hash value used by the String object itself, in + // String.hashCode(). This value is normally calculate in Java code + // in the String.hashCode method(), but is precomputed for String + // objects in the shared archive file. + // + // For this reason, THIS ALGORITHM MUST MATCH String.hashCode(). + static unsigned int hash_string(jchar* s, int len) { + unsigned int h = 0; + while (len-- > 0) { + h = 31*h + (unsigned int) *s; + s++; + } + return h; + } + static unsigned int hash_string(oop java_string); + static bool equals(oop java_string, jchar* chars, int len); // Conversion between '.' and '/' formats @@ -138,10 +162,8 @@ // The fake offsets are added by the class loader when java.lang.Class is loaded enum { - hc_klass_offset = 0, - hc_array_klass_offset = 1, - hc_resolved_constructor_offset = 2, - hc_number_of_fake_oop_fields = 3 + hc_number_of_fake_oop_fields = 3, + hc_number_of_fake_int_fields = 2 }; static int klass_offset; @@ -149,6 +171,9 @@ static int array_klass_offset; static int number_of_fake_oop_fields; + static int oop_size_offset; + static int static_oop_field_count_offset; + static void compute_offsets(); static bool offsets_computed; static int classRedefinedCount_offset; @@ -157,6 +182,7 @@ public: // Instance creation static oop create_mirror(KlassHandle k, TRAPS); + static void fixup_mirror(KlassHandle k, TRAPS); static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS); // Conversion static klassOop as_klassOop(oop java_class); @@ -191,6 +217,12 @@ static void set_classRedefinedCount(oop the_class_mirror, int value); // Support for parallelCapable field static bool parallelCapable(oop the_class_mirror); + + static int oop_size(oop java_class); + static void set_oop_size(oop java_class, int size); + static int static_oop_field_count(oop java_class); + static void set_static_oop_field_count(oop java_class, int size); + // Debugging friend class JavaClasses; friend class instanceKlass; // verification code accesses offsets @@ -794,11 +826,11 @@ }; -// Interface to java.dyn.MethodHandle objects +// Interface to java.lang.invoke.MethodHandle objects class MethodHandleEntry; -class java_dyn_MethodHandle: AllStatic { +class java_lang_invoke_MethodHandle: AllStatic { friend class JavaClasses; private: @@ -839,7 +871,7 @@ static int vmslots_offset_in_bytes() { return _vmslots_offset; } }; -class sun_dyn_DirectMethodHandle: public java_dyn_MethodHandle { +class java_lang_invoke_DirectMethodHandle: public java_lang_invoke_MethodHandle { friend class JavaClasses; private: @@ -864,7 +896,7 @@ static int vmindex_offset_in_bytes() { return _vmindex_offset; } }; -class sun_dyn_BoundMethodHandle: public java_dyn_MethodHandle { +class java_lang_invoke_BoundMethodHandle: public java_lang_invoke_MethodHandle { friend class JavaClasses; private: @@ -891,7 +923,7 @@ static int vmargslot_offset_in_bytes() { return _vmargslot_offset; } }; -class sun_dyn_AdapterMethodHandle: public sun_dyn_BoundMethodHandle { +class java_lang_invoke_AdapterMethodHandle: public java_lang_invoke_BoundMethodHandle { friend class JavaClasses; private: @@ -942,14 +974,14 @@ }; -// Interface to sun.dyn.MemberName objects +// Interface to java.lang.invoke.MemberName objects // (These are a private interface for Java code to query the class hierarchy.) -class sun_dyn_MemberName: AllStatic { +class java_lang_invoke_MemberName: AllStatic { friend class JavaClasses; private: - // From java.dyn.MemberName: + // From java.lang.invoke.MemberName: // private Class<?> clazz; // class in which the method is defined // private String name; // may be null if not yet materialized // private Object type; // may be null if not yet materialized @@ -1018,9 +1050,9 @@ }; -// Interface to java.dyn.MethodType objects +// Interface to java.lang.invoke.MethodType objects -class java_dyn_MethodType: AllStatic { +class java_lang_invoke_MethodType: AllStatic { friend class JavaClasses; private: @@ -1052,7 +1084,7 @@ static int form_offset_in_bytes() { return _form_offset; } }; -class java_dyn_MethodTypeForm: AllStatic { +class java_lang_invoke_MethodTypeForm: AllStatic { friend class JavaClasses; private: @@ -1075,9 +1107,9 @@ }; -// Interface to java.dyn.CallSite objects +// Interface to java.lang.invoke.CallSite objects -class java_dyn_CallSite: AllStatic { +class java_lang_invoke_CallSite: AllStatic { friend class JavaClasses; private: @@ -1165,13 +1197,10 @@ hc_static_err_offset = 2 }; - static int offset_of_static_fields; static int static_in_offset; static int static_out_offset; static int static_err_offset; - static void compute_offsets(); - public: static int in_offset_in_bytes(); static int out_offset_in_bytes();
--- a/src/share/vm/classfile/loaderConstraints.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/loaderConstraints.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/loaderConstraints.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/loaderConstraints.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/placeholders.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/placeholders.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/placeholders.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/placeholders.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/resolutionErrors.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/resolutionErrors.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/resolutionErrors.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/resolutionErrors.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/stackMapFrame.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/stackMapFrame.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -170,8 +170,48 @@ return true; } -bool StackMapFrame::is_assignable_to(const StackMapFrame* target, TRAPS) const { - if (_max_locals != target->max_locals() || _stack_size != target->stack_size()) { +bool StackMapFrame::has_flag_match_exception( + const StackMapFrame* target) const { + // We allow flags of {UninitThis} to assign to {} if-and-only-if the + // target frame does not depend upon the current type. + // This is slightly too strict, as we need only enforce that the + // slots that were initialized by the <init> (the things that were + // UninitializedThis before initialize_object() converted them) are unused. + // However we didn't save that information so we'll enforce this upon + // anything that might have been initialized. This is a rare situation + // and javac never generates code that would end up here, but some profilers + // (such as NetBeans) might, when adding exception handlers in <init> + // methods to cover the invokespecial instruction. See 7020118. + + assert(max_locals() == target->max_locals() && + stack_size() == target->stack_size(), "StackMap sizes must match"); + + VerificationType top = VerificationType::top_type(); + VerificationType this_type = verifier()->current_type(); + + if (!flag_this_uninit() || target->flags() != 0) { + return false; + } + + for (int i = 0; i < target->locals_size(); ++i) { + if (locals()[i] == this_type && target->locals()[i] != top) { + return false; + } + } + + for (int i = 0; i < target->stack_size(); ++i) { + if (stack()[i] == this_type && target->stack()[i] != top) { + return false; + } + } + + return true; +} + +bool StackMapFrame::is_assignable_to( + const StackMapFrame* target, bool is_exception_handler, TRAPS) const { + if (_max_locals != target->max_locals() || + _stack_size != target->stack_size()) { return false; } // Only need to compare type elements up to target->locals() or target->stack(). @@ -182,7 +222,9 @@ bool match_stack = is_assignable_to( _stack, target->stack(), _stack_size, CHECK_false); bool match_flags = (_flags | target->flags()) == target->flags(); - return (match_locals && match_stack && match_flags); + + return match_locals && match_stack && + (match_flags || (is_exception_handler && has_flag_match_exception(target))); } VerificationType StackMapFrame::pop_stack_ex(VerificationType type, TRAPS) {
--- a/src/share/vm/classfile/stackMapFrame.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/stackMapFrame.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -134,7 +134,8 @@ void copy_stack(const StackMapFrame* src); // Return true if this stack map frame is assignable to target. - bool is_assignable_to(const StackMapFrame* target, TRAPS) const; + bool is_assignable_to(const StackMapFrame* target, + bool is_exception_handler, TRAPS) const; // Push type into stack type array. inline void push_stack(VerificationType type, TRAPS) { @@ -228,6 +229,8 @@ bool is_assignable_to( VerificationType* src, VerificationType* target, int32_t len, TRAPS) const; + bool has_flag_match_exception(const StackMapFrame* target) const; + // Debugging void print() const PRODUCT_RETURN; };
--- a/src/share/vm/classfile/stackMapTable.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/stackMapTable.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,10 +98,13 @@ bool result = true; StackMapFrame *stackmap_frame = _frame_array[frame_index]; if (match) { + // when checking handler target, match == true && update == false + bool is_exception_handler = !update; // Has direct control flow from last instruction, need to match the two // frames. result = frame->is_assignable_to( - stackmap_frame, CHECK_VERIFY_(frame->verifier(), false)); + stackmap_frame, is_exception_handler, + CHECK_VERIFY_(frame->verifier(), false)); } if (update) { // Use the frame in stackmap table as current frame
--- a/src/share/vm/classfile/stackMapTable.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/stackMapTable.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/symbolTable.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/symbolTable.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -88,7 +88,7 @@ void SymbolTable::unlink() { int removed = 0; int total = 0; - int memory_total = 0; + size_t memory_total = 0; for (int i = 0; i < the_table()->table_size(); ++i) { for (HashtableEntry<Symbol*>** p = the_table()->bucket_addr(i); *p != NULL; ) { HashtableEntry<Symbol*>* entry = *p; @@ -112,8 +112,10 @@ } symbols_removed += removed; symbols_counted += total; - if (PrintGCDetails) { - gclog_or_tty->print(" [Symbols=%d size=%dK] ", total, + // Exclude printing for normal PrintGCDetails because people parse + // this output. + if (PrintGCDetails && Verbose && WizardMode) { + gclog_or_tty->print(" [Symbols=%d size=" SIZE_FORMAT "K] ", total, (memory_total*HeapWordSize)/1024); } } @@ -478,33 +480,6 @@ // -------------------------------------------------------------------------- - - -// Compute the hash value for a java.lang.String object which would -// contain the characters passed in. This hash value is used for at -// least two purposes. -// -// (a) As the hash value used by the StringTable for bucket selection -// and comparison (stored in the HashtableEntry structures). This -// is used in the String.intern() method. -// -// (b) As the hash value used by the String object itself, in -// String.hashCode(). This value is normally calculate in Java code -// in the String.hashCode method(), but is precomputed for String -// objects in the shared archive file. -// -// For this reason, THIS ALGORITHM MUST MATCH String.hashCode(). - -int StringTable::hash_string(jchar* s, int len) { - unsigned h = 0; - while (len-- > 0) { - h = 31*h + (unsigned) *s; - s++; - } - return h; -} - - StringTable* StringTable::_the_table = NULL; oop StringTable::lookup(int index, jchar* name, @@ -528,7 +503,7 @@ Handle string; // try to reuse the string if possible - if (!string_or_null.is_null() && string_or_null()->is_perm()) { + if (!string_or_null.is_null() && (!JavaObjectsInPerm || string_or_null()->is_perm())) { string = string_or_null; } else { string = java_lang_String::create_tenured_from_unicode(name, len, CHECK_NULL); @@ -559,7 +534,7 @@ ResourceMark rm; int length; jchar* chars = symbol->as_unicode(length); - unsigned int hashValue = hash_string(chars, length); + unsigned int hashValue = java_lang_String::hash_string(chars, length); int index = the_table()->hash_to_index(hashValue); return the_table()->lookup(index, chars, length, hashValue); } @@ -567,7 +542,7 @@ oop StringTable::intern(Handle string_or_null, jchar* name, int len, TRAPS) { - unsigned int hashValue = hash_string(name, len); + unsigned int hashValue = java_lang_String::hash_string(name, len); int index = the_table()->hash_to_index(hashValue); oop string = the_table()->lookup(index, name, len, hashValue); @@ -660,11 +635,8 @@ for ( ; p != NULL; p = p->next()) { oop s = p->literal(); guarantee(s != NULL, "interned string is NULL"); - guarantee(s->is_perm(), "interned string not in permspace"); - - int length; - jchar* chars = java_lang_String::as_unicode_string(s, length); - unsigned int h = hash_string(chars, length); + guarantee(s->is_perm() || !JavaObjectsInPerm, "interned string not in permspace"); + unsigned int h = java_lang_String::hash_string(s); guarantee(p->hash() == h, "broken hash in string table entry"); guarantee(the_table()->hash_to_index(h) == i, "wrong index in string table");
--- a/src/share/vm/classfile/symbolTable.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/symbolTable.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -216,18 +216,14 @@ oop basic_add(int index, Handle string_or_null, jchar* name, int len, unsigned int hashValue, TRAPS); - // Table size - enum { - string_table_size = 1009 - }; - oop lookup(int index, jchar* chars, int length, unsigned int hashValue); - StringTable() : Hashtable<oop>(string_table_size, sizeof (HashtableEntry<oop>)) {} + StringTable() : Hashtable<oop>((int)StringTableSize, + sizeof (HashtableEntry<oop>)) {} StringTable(HashtableBucket* t, int number_of_entries) - : Hashtable<oop>(string_table_size, sizeof (HashtableEntry<oop>), t, - number_of_entries) {} + : Hashtable<oop>((int)StringTableSize, sizeof (HashtableEntry<oop>), t, + number_of_entries) {} public: // The string table @@ -241,13 +237,11 @@ static void create_table(HashtableBucket* t, int length, int number_of_entries) { assert(_the_table == NULL, "One string table allowed."); - assert(length == string_table_size * sizeof(HashtableBucket), + assert((size_t)length == StringTableSize * sizeof(HashtableBucket), "bad shared string size."); _the_table = new StringTable(t, number_of_entries); } - static int hash_string(jchar* s, int len); - // GC support // Delete pointers to otherwise-unreachable objects. static void unlink(BoolObjectClosure* cl);
--- a/src/share/vm/classfile/systemDictionary.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/systemDictionary.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1018,7 +1018,7 @@ } if (host_klass.not_null() && k.not_null()) { - assert(AnonymousClasses, ""); + assert(EnableInvokeDynamic, ""); // If it's anonymous, initialize it now, since nobody else will. k->set_host_klass(host_klass()); @@ -1967,25 +1967,15 @@ instanceKlass::cast(WK_KLASS(FinalReference_klass))->set_reference_type(REF_FINAL); instanceKlass::cast(WK_KLASS(PhantomReference_klass))->set_reference_type(REF_PHANTOM); - WKID meth_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass); - WKID meth_group_end = WK_KLASS_ENUM_NAME(WrongMethodTypeException_klass); - initialize_wk_klasses_until(meth_group_start, scan, CHECK); - if (EnableMethodHandles) { - initialize_wk_klasses_through(meth_group_end, scan, CHECK); - } - if (_well_known_klasses[meth_group_start] == NULL) { - // Skip the rest of the method handle classes, if MethodHandle is not loaded. - scan = WKID(meth_group_end+1); - } - WKID indy_group_start = WK_KLASS_ENUM_NAME(Linkage_klass); - WKID indy_group_end = WK_KLASS_ENUM_NAME(CallSite_klass); - initialize_wk_klasses_until(indy_group_start, scan, CHECK); + // JSR 292 classes + WKID jsr292_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass); + WKID jsr292_group_end = WK_KLASS_ENUM_NAME(CallSite_klass); + initialize_wk_klasses_until(jsr292_group_start, scan, CHECK); if (EnableInvokeDynamic) { - initialize_wk_klasses_through(indy_group_end, scan, CHECK); - } - if (_well_known_klasses[indy_group_start] == NULL) { - // Skip the rest of the dynamic typing classes, if Linkage is not loaded. - scan = WKID(indy_group_end+1); + initialize_wk_klasses_through(jsr292_group_end, scan, CHECK); + } else { + // Skip the JSR 292 classes, if not enabled. + scan = WKID(jsr292_group_end + 1); } initialize_wk_klasses_until(WKID_LIMIT, scan, CHECK); @@ -2336,7 +2326,7 @@ Symbol* signature, KlassHandle accessing_klass, TRAPS) { - if (!EnableMethodHandles) return NULL; + if (!EnableInvokeDynamic) return NULL; vmSymbols::SID name_id = vmSymbols::find_sid(name); assert(name_id != vmSymbols::NO_SID, "must be a known name"); unsigned int hash = invoke_method_table()->compute_hash(signature, name_id); @@ -2349,7 +2339,7 @@ // (tw) May we do this? //if (THREAD->is_Compiler_thread()) // return NULL; // do not attempt from within compiler - bool for_invokeGeneric = (name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name)); + bool for_invokeGeneric = (name_id != vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name)); bool found_on_bcp = false; Handle mt = find_method_handle_type(signature, accessing_klass, for_invokeGeneric, @@ -2378,7 +2368,7 @@ } } -// Ask Java code to find or construct a java.dyn.MethodType for the given +// Ask Java code to find or construct a java.lang.invoke.MethodType for the given // signature, as interpreted relative to the given class loader. // Because of class loader constraints, all method handle usage must be // consistent with this loader. @@ -2432,7 +2422,7 @@ } assert(arg == npts, ""); - // call sun.dyn.MethodHandleNatives::findMethodType(Class rt, Class[] pts) -> MethodType + // call java.lang.invoke.MethodHandleNatives::findMethodType(Class rt, Class[] pts) -> MethodType JavaCallArguments args(Handle(THREAD, rt())); args.push_oop(pts()); JavaValue result(T_OBJECT); @@ -2444,7 +2434,7 @@ Handle method_type(THREAD, (oop) result.get_jobject()); if (for_invokeGeneric) { - // call sun.dyn.MethodHandleNatives::notifyGenericMethodType(MethodType) -> void + // call java.lang.invoke.MethodHandleNatives::notifyGenericMethodType(MethodType) -> void JavaCallArguments args(Handle(THREAD, method_type())); JavaValue no_result(T_VOID); JavaCalls::call_static(&no_result, @@ -2491,7 +2481,7 @@ THROW_MSG_(vmSymbols::java_lang_LinkageError(), "bad signature", empty); } - // call sun.dyn.MethodHandleNatives::linkMethodHandleConstant(Class caller, int refKind, Class callee, String name, Object type) -> MethodHandle + // call java.lang.invoke.MethodHandleNatives::linkMethodHandleConstant(Class caller, int refKind, Class callee, String name, Object type) -> MethodHandle JavaCallArguments args; args.push_oop(caller->java_mirror()); // the referring class args.push_int(ref_kind); @@ -2507,7 +2497,7 @@ return Handle(THREAD, (oop) result.get_jobject()); } -// Ask Java code to find or construct a java.dyn.CallSite for the given +// Ask Java code to find or construct a java.lang.invoke.CallSite for the given // name and signature, as interpreted relative to the given class loader. Handle SystemDictionary::make_dynamic_call_site(Handle bootstrap_method, Symbol* name, @@ -2518,13 +2508,13 @@ TRAPS) { Handle empty; guarantee(bootstrap_method.not_null() && - java_dyn_MethodHandle::is_instance(bootstrap_method()), + java_lang_invoke_MethodHandle::is_instance(bootstrap_method()), "caller must supply a valid BSM"); Handle caller_mname = MethodHandles::new_MemberName(CHECK_(empty)); MethodHandles::init_MemberName(caller_mname(), caller_method()); - // call sun.dyn.MethodHandleNatives::makeDynamicCallSite(bootm, name, mtype, info, caller_mname, caller_pos) + // call java.lang.invoke.MethodHandleNatives::makeDynamicCallSite(bootm, name, mtype, info, caller_mname, caller_pos) oop name_str_oop = StringTable::intern(name, CHECK_(empty)); // not a handle! JavaCallArguments args(Handle(THREAD, bootstrap_method())); args.push_oop(name_str_oop); @@ -2540,7 +2530,7 @@ &args, CHECK_(empty)); oop call_site_oop = (oop) result.get_jobject(); assert(call_site_oop->is_oop() - /*&& java_dyn_CallSite::is_instance(call_site_oop)*/, "must be sane"); + /*&& java_lang_invoke_CallSite::is_instance(call_site_oop)*/, "must be sane"); if (TraceMethodHandles) { #ifndef PRODUCT tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop); @@ -2617,28 +2607,10 @@ argument_info_result = argument_info; // return argument_info to caller return bsm; } - // else null BSM; fall through - } else if (tag.is_name_and_type()) { - // JSR 292 EDR does not have JVM_CONSTANT_InvokeDynamic - // a bare name&type defaults its BSM to null, so fall through... } else { ShouldNotReachHere(); // verifier does not allow this } - // Fall through to pick up the per-class bootstrap method. - // This mechanism may go away in the PFD. - assert(AllowTransitionalJSR292, "else the verifier should have stopped us already"); - argument_info_result = empty; // return no argument_info to caller - oop bsm_oop = instanceKlass::cast(caller_method->method_holder())->bootstrap_method(); - if (bsm_oop != NULL) { - if (TraceMethodHandles) { - tty->print_cr("bootstrap method for "PTR_FORMAT" registered as "PTR_FORMAT":", - (intptr_t) caller_method(), (intptr_t) bsm_oop); - } - assert(bsm_oop->is_oop(), "must be sane"); - return Handle(THREAD, bsm_oop); - } - return empty; }
--- a/src/share/vm/classfile/systemDictionary.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/systemDictionary.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -144,18 +144,16 @@ template(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15) \ \ /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \ - template(MethodHandle_klass, java_dyn_MethodHandle, Opt) \ - template(MemberName_klass, sun_dyn_MemberName, Opt) \ - template(MethodHandleImpl_klass, sun_dyn_MethodHandleImpl, Opt) \ - template(MethodHandleNatives_klass, sun_dyn_MethodHandleNatives, Opt) \ - template(AdapterMethodHandle_klass, sun_dyn_AdapterMethodHandle, Opt) \ - template(BoundMethodHandle_klass, sun_dyn_BoundMethodHandle, Opt) \ - template(DirectMethodHandle_klass, sun_dyn_DirectMethodHandle, Opt) \ - template(MethodType_klass, java_dyn_MethodType, Opt) \ - template(MethodTypeForm_klass, java_dyn_MethodTypeForm, Opt) \ - template(WrongMethodTypeException_klass, java_dyn_WrongMethodTypeException, Opt) \ - template(Linkage_klass, java_dyn_Linkage, Opt) \ - template(CallSite_klass, java_dyn_CallSite, Opt) \ + template(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292) \ + template(MemberName_klass, java_lang_invoke_MemberName, Pre_JSR292) \ + template(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre_JSR292) \ + template(AdapterMethodHandle_klass, java_lang_invoke_AdapterMethodHandle, Pre_JSR292) \ + template(BoundMethodHandle_klass, java_lang_invoke_BoundMethodHandle, Pre_JSR292) \ + template(DirectMethodHandle_klass, java_lang_invoke_DirectMethodHandle, Pre_JSR292) \ + template(MethodType_klass, java_lang_invoke_MethodType, Pre_JSR292) \ + template(MethodTypeForm_klass, java_lang_invoke_MethodTypeForm, Pre_JSR292) \ + template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \ + template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \ /* Note: MethodHandle must be first, and CallSite last in group */ \ \ template(StringBuffer_klass, java_lang_StringBuffer, Pre) \ @@ -240,6 +238,7 @@ enum InitOption { Pre, // preloaded; error if not present + Pre_JSR292, // preloaded if EnableInvokeDynamic // Order is significant. Options before this point require resolve_or_fail. // Options after this point will use resolve_or_null instead. @@ -434,6 +433,7 @@ } static klassOop check_klass_Pre(klassOop k) { return check_klass(k); } + static klassOop check_klass_Pre_JSR292(klassOop k) { return EnableInvokeDynamic ? check_klass(k) : k; } static klassOop check_klass_Opt(klassOop k) { return k; } static klassOop check_klass_Opt_Kernel(klassOop k) { return k; } //== Opt static klassOop check_klass_Opt_Only_JDK15(klassOop k) { @@ -508,18 +508,18 @@ Handle loader2, bool is_method, TRAPS); // JSR 292 - // find the java.dyn.MethodHandles::invoke method for a given signature + // find the java.lang.invoke.MethodHandles::invoke method for a given signature static methodOop find_method_handle_invoke(Symbol* name, Symbol* signature, KlassHandle accessing_klass, TRAPS); - // ask Java to compute a java.dyn.MethodType object for a given signature + // ask Java to compute a java.lang.invoke.MethodType object for a given signature static Handle find_method_handle_type(Symbol* signature, KlassHandle accessing_klass, bool for_invokeGeneric, bool& return_bcp_flag, TRAPS); - // ask Java to compute a java.dyn.MethodHandle object for a given CP entry + // ask Java to compute a java.lang.invoke.MethodHandle object for a given CP entry static Handle link_method_handle_constant(KlassHandle caller, int ref_kind, //e.g., JVM_REF_invokeVirtual KlassHandle callee,
--- a/src/share/vm/classfile/verificationType.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/verificationType.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/verificationType.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/verificationType.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -128,6 +128,7 @@ // Create verification types static VerificationType bogus_type() { return VerificationType(Bogus); } + static VerificationType top_type() { return bogus_type(); } // alias static VerificationType null_type() { return VerificationType(Null); } static VerificationType integer_type() { return VerificationType(Integer); } static VerificationType float_type() { return VerificationType(Float); }
--- a/src/share/vm/classfile/verifier.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/verifier.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1673,11 +1673,11 @@ } else if (tag.is_method_handle()) { current_frame->push_stack( VerificationType::reference_type( - vmSymbols::java_dyn_MethodHandle()), CHECK_VERIFY(this)); + vmSymbols::java_lang_invoke_MethodHandle()), CHECK_VERIFY(this)); } else if (tag.is_method_type()) { current_frame->push_stack( VerificationType::reference_type( - vmSymbols::java_dyn_MethodType()), CHECK_VERIFY(this)); + vmSymbols::java_lang_invoke_MethodType()), CHECK_VERIFY(this)); } else { verify_error(bci, "Invalid index in ldc"); return; @@ -1944,8 +1944,7 @@ unsigned int types = (opcode == Bytecodes::_invokeinterface ? 1 << JVM_CONSTANT_InterfaceMethodref : opcode == Bytecodes::_invokedynamic - ? ((AllowTransitionalJSR292 ? 1 << JVM_CONSTANT_NameAndType : 0) - |1 << JVM_CONSTANT_InvokeDynamic) + ? 1 << JVM_CONSTANT_InvokeDynamic : 1 << JVM_CONSTANT_Methodref); verify_cp_type(index, cp, types, CHECK_VERIFY(this));
--- a/src/share/vm/classfile/verifier.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/verifier.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/vmSymbols.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/vmSymbols.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -277,6 +277,12 @@ return sid; } +vmSymbols::SID vmSymbols::find_sid(const char* symbol_name) { + Symbol* symbol = SymbolTable::probe(symbol_name, (int) strlen(symbol_name)); + if (symbol == NULL) return NO_SID; + return find_sid(symbol); +} + static vmIntrinsics::ID wrapper_intrinsic(BasicType type, bool unboxing) { #define TYPE2(type, unboxing) ((int)(type)*2 + ((unboxing) ? 1 : 0)) switch (TYPE2(type, unboxing)) {
--- a/src/share/vm/classfile/vmSymbols.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/classfile/vmSymbols.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -229,33 +229,31 @@ template(base_name, "base") \ \ /* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */ \ - template(java_dyn_InvokeDynamic, "java/dyn/InvokeDynamic") \ - template(java_dyn_Linkage, "java/dyn/Linkage") \ - template(java_dyn_CallSite, "java/dyn/CallSite") \ - template(java_dyn_MethodHandle, "java/dyn/MethodHandle") \ - template(java_dyn_MethodType, "java/dyn/MethodType") \ - template(java_dyn_WrongMethodTypeException, "java/dyn/WrongMethodTypeException") \ - template(java_dyn_MethodType_signature, "Ljava/dyn/MethodType;") \ - template(java_dyn_MethodHandle_signature, "Ljava/dyn/MethodHandle;") \ + template(java_lang_invoke_InvokeDynamic, "java/lang/invoke/InvokeDynamic") \ + template(java_lang_invoke_Linkage, "java/lang/invoke/Linkage") \ + template(java_lang_invoke_CallSite, "java/lang/invoke/CallSite") \ + template(java_lang_invoke_MethodHandle, "java/lang/invoke/MethodHandle") \ + template(java_lang_invoke_MethodType, "java/lang/invoke/MethodType") \ + template(java_lang_invoke_WrongMethodTypeException, "java/lang/invoke/WrongMethodTypeException") \ + template(java_lang_invoke_MethodType_signature, "Ljava/lang/invoke/MethodType;") \ + template(java_lang_invoke_MethodHandle_signature, "Ljava/lang/invoke/MethodHandle;") \ /* internal classes known only to the JVM: */ \ - template(java_dyn_MethodTypeForm, "java/dyn/MethodTypeForm") \ - template(java_dyn_MethodTypeForm_signature, "Ljava/dyn/MethodTypeForm;") \ - template(sun_dyn_MemberName, "sun/dyn/MemberName") \ - template(sun_dyn_MemberName_signature, "Lsun/dyn/MemberName;") \ - template(sun_dyn_MethodHandleImpl, "sun/dyn/MethodHandleImpl") \ - template(sun_dyn_MethodHandleNatives, "sun/dyn/MethodHandleNatives") \ - template(sun_dyn_AdapterMethodHandle, "sun/dyn/AdapterMethodHandle") \ - template(sun_dyn_BoundMethodHandle, "sun/dyn/BoundMethodHandle") \ - template(sun_dyn_DirectMethodHandle, "sun/dyn/DirectMethodHandle") \ - /* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */ \ + template(java_lang_invoke_MethodTypeForm, "java/lang/invoke/MethodTypeForm") \ + template(java_lang_invoke_MethodTypeForm_signature, "Ljava/lang/invoke/MethodTypeForm;") \ + template(java_lang_invoke_MemberName, "java/lang/invoke/MemberName") \ + template(java_lang_invoke_MethodHandleNatives, "java/lang/invoke/MethodHandleNatives") \ + template(java_lang_invoke_AdapterMethodHandle, "java/lang/invoke/AdapterMethodHandle") \ + template(java_lang_invoke_BoundMethodHandle, "java/lang/invoke/BoundMethodHandle") \ + template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \ + /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \ template(findMethodHandleType_name, "findMethodHandleType") \ - template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \ + template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \ template(notifyGenericMethodType_name, "notifyGenericMethodType") \ - template(notifyGenericMethodType_signature, "(Ljava/dyn/MethodType;)V") \ + template(notifyGenericMethodType_signature, "(Ljava/lang/invoke/MethodType;)V") \ template(linkMethodHandleConstant_name, "linkMethodHandleConstant") \ - template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") \ + template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \ template(makeDynamicCallSite_name, "makeDynamicCallSite") \ - template(makeDynamicCallSite_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") \ + template(makeDynamicCallSite_signature, "(Ljava/lang/invoke/MethodHandle;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/Object;Ljava/lang/invoke/MemberName;I)Ljava/lang/invoke/CallSite;") \ NOT_LP64( do_alias(machine_word_signature, int_signature) ) \ LP64_ONLY( do_alias(machine_word_signature, long_signature) ) \ \ @@ -378,6 +376,7 @@ template(dispatch_name, "dispatch") \ template(getSystemClassLoader_name, "getSystemClassLoader") \ template(fillInStackTrace_name, "fillInStackTrace") \ + template(fillInStackTrace0_name, "fillInStackTrace0") \ template(getCause_name, "getCause") \ template(initCause_name, "initCause") \ template(setProperty_name, "setProperty") \ @@ -957,13 +956,13 @@ \ do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_object_array_object_signature, F_R) \ /* (symbols invoke_name and invoke_signature defined above) */ \ - do_intrinsic(_checkSpreadArgument, sun_dyn_MethodHandleImpl, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \ + do_intrinsic(_checkSpreadArgument, java_lang_invoke_MethodHandleNatives, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \ do_name( checkSpreadArgument_name, "checkSpreadArgument") \ do_name( checkSpreadArgument_signature, "(Ljava/lang/Object;I)V") \ - do_intrinsic(_invokeExact, java_dyn_MethodHandle, invokeExact_name, object_array_object_signature, F_RN) \ - do_intrinsic(_invokeGeneric, java_dyn_MethodHandle, invokeGeneric_name, object_array_object_signature, F_RN) \ - do_intrinsic(_invokeVarargs, java_dyn_MethodHandle, invokeVarargs_name, object_array_object_signature, F_R) \ - do_intrinsic(_invokeDynamic, java_dyn_InvokeDynamic, star_name, object_array_object_signature, F_SN) \ + do_intrinsic(_invokeExact, java_lang_invoke_MethodHandle, invokeExact_name, object_array_object_signature, F_RN) \ + do_intrinsic(_invokeGeneric, java_lang_invoke_MethodHandle, invokeGeneric_name, object_array_object_signature, F_RN) \ + do_intrinsic(_invokeVarargs, java_lang_invoke_MethodHandle, invokeVarargs_name, object_array_object_signature, F_R) \ + do_intrinsic(_invokeDynamic, java_lang_invoke_InvokeDynamic, star_name, object_array_object_signature, F_SN) \ \ /* unboxing methods: */ \ do_intrinsic(_booleanValue, java_lang_Boolean, booleanValue_name, void_boolean_signature, F_R) \ @@ -1070,6 +1069,7 @@ // Returns symbol's SID if one is assigned, else NO_SID. static SID find_sid(Symbol* symbol); + static SID find_sid(const char* symbol_name); #ifndef PRODUCT // No need for this in the product:
--- a/src/share/vm/code/codeBlob.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/code/codeBlob.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/codeCache.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/code/codeCache.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -337,7 +337,6 @@ if (is_live) { // Perform cur->oops_do(f), maybe just once per nmethod. f->do_code_blob(cur); - cur->fix_oop_relocations(); } } @@ -552,6 +551,19 @@ } +void CodeCache::verify_oops() { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + VerifyOopClosure voc; + FOR_ALL_ALIVE_BLOBS(cb) { + if (cb->is_nmethod()) { + nmethod *nm = (nmethod*)cb; + nm->oops_do(&voc); + nm->verify_oop_relocations(); + } + } +} + + address CodeCache::first_address() { assert_locked_or_safepoint(CodeCache_lock); return (address)_heap->begin(); @@ -939,9 +951,27 @@ _heap->high(), _heap->high_boundary()); st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT - " adapters=" UINT32_FORMAT " free_code_cache=" SIZE_FORMAT + " adapters=" UINT32_FORMAT " free_code_cache=" SIZE_FORMAT "Kb" " largest_free_block=" SIZE_FORMAT, - CodeCache::nof_blobs(), CodeCache::nof_nmethods(), - CodeCache::nof_adapters(), CodeCache::unallocated_capacity(), - CodeCache::largest_free_block()); + nof_blobs(), nof_nmethods(), nof_adapters(), + unallocated_capacity()/K, largest_free_block()); +} + +void CodeCache::log_state(outputStream* st) { + st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" + " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'" + " largest_free_block='" SIZE_FORMAT "'", + nof_blobs(), nof_nmethods(), nof_adapters(), + unallocated_capacity(), largest_free_block()); } + +size_t CodeCache::largest_free_block() { + // This is called both with and without CodeCache_lock held so + // handle both cases. + if (CodeCache_lock->owned_by_self()) { + return _heap->largest_free_block(); + } else { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + return _heap->largest_free_block(); + } +}
--- a/src/share/vm/code/codeCache.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/code/codeCache.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -122,6 +122,7 @@ // GC support static void gc_epilogue(); static void gc_prologue(); + static void verify_oops(); // If "unloading_occurred" is true, then unloads (i.e., breaks root links // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading" // to "true" iff some code got unloaded. @@ -147,6 +148,7 @@ static void verify(); // verifies the code cache static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN; static void print_bounds(outputStream* st); // Prints a summary of the bounds of the code cache + static void log_state(outputStream* st); // The full limits of the codeCache static address low_bound() { return (address) _heap->low_boundary(); } @@ -158,8 +160,8 @@ static size_t capacity() { return _heap->capacity(); } static size_t max_capacity() { return _heap->max_capacity(); } static size_t unallocated_capacity() { return _heap->unallocated_capacity(); } - static size_t largest_free_block() { return _heap->largest_free_block(); } - static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; } + static size_t largest_free_block(); + static bool needs_flushing() { return largest_free_block() < CodeCacheFlushingMinimumFreeSpace; } static bool needs_cache_clean() { return _needs_cache_clean; } static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
--- a/src/share/vm/code/compiledIC.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/code/compiledIC.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/compiledIC.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/code/compiledIC.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/dependencies.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/code/dependencies.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/icBuffer.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/code/icBuffer.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/nmethod.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/code/nmethod.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -28,6 +28,7 @@ #include "code/nmethod.hpp" #include "code/scopeDesc.hpp" #include "compiler/abstractCompiler.hpp" +#include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" #include "compiler/compilerOracle.hpp" #include "compiler/disassembler.hpp" @@ -170,7 +171,7 @@ int pc_desc_resets; // number of resets (= number of caches) int pc_desc_queries; // queries to nmethod::find_pc_desc int pc_desc_approx; // number of those which have approximate true - int pc_desc_repeats; // number of _last_pc_desc hits + int pc_desc_repeats; // number of _pc_descs[0] hits int pc_desc_hits; // number of LRU cache hits int pc_desc_tests; // total number of PcDesc examinations int pc_desc_searches; // total number of quasi-binary search steps @@ -190,15 +191,10 @@ } nmethod_stats; #endif //PRODUCT + //--------------------------------------------------------------------------------- -// The _unwind_handler is a special marker address, which says that -// for given exception oop and address, the frame should be removed -// as the tuple cannot be caught in the nmethod -address ExceptionCache::_unwind_handler = (address) -1; - - ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) { assert(pc != NULL, "Must be non null"); assert(exception.not_null(), "Must be non null"); @@ -283,40 +279,44 @@ void PcDescCache::reset_to(PcDesc* initial_pc_desc) { if (initial_pc_desc == NULL) { - _last_pc_desc = NULL; // native method + _pc_descs[0] = NULL; // native method; no PcDescs at all return; } NOT_PRODUCT(++nmethod_stats.pc_desc_resets); // reset the cache by filling it with benign (non-null) values assert(initial_pc_desc->pc_offset() < 0, "must be sentinel"); - _last_pc_desc = initial_pc_desc + 1; // first valid one is after sentinel for (int i = 0; i < cache_size; i++) _pc_descs[i] = initial_pc_desc; } PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) { NOT_PRODUCT(++nmethod_stats.pc_desc_queries); - NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx); + NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx); + + // Note: one might think that caching the most recently + // read value separately would be a win, but one would be + // wrong. When many threads are updating it, the cache + // line it's in would bounce between caches, negating + // any benefit. // In order to prevent race conditions do not load cache elements // repeatedly, but use a local copy: PcDesc* res; - // Step one: Check the most recently returned value. - res = _last_pc_desc; - if (res == NULL) return NULL; // native method; no PcDescs at all + // Step one: Check the most recently added value. + res = _pc_descs[0]; + if (res == NULL) return NULL; // native method; no PcDescs at all if (match_desc(res, pc_offset, approximate)) { NOT_PRODUCT(++nmethod_stats.pc_desc_repeats); return res; } - // Step two: Check the LRU cache. - for (int i = 0; i < cache_size; i++) { + // Step two: Check the rest of the LRU cache. + for (int i = 1; i < cache_size; ++i) { res = _pc_descs[i]; - if (res->pc_offset() < 0) break; // optimization: skip empty cache + if (res->pc_offset() < 0) break; // optimization: skip empty cache if (match_desc(res, pc_offset, approximate)) { NOT_PRODUCT(++nmethod_stats.pc_desc_hits); - _last_pc_desc = res; // record this cache hit in case of repeat return res; } } @@ -327,24 +327,23 @@ void PcDescCache::add_pc_desc(PcDesc* pc_desc) { NOT_PRODUCT(++nmethod_stats.pc_desc_adds); - // Update the LRU cache by shifting pc_desc forward: + // Update the LRU cache by shifting pc_desc forward. for (int i = 0; i < cache_size; i++) { PcDesc* next = _pc_descs[i]; _pc_descs[i] = pc_desc; pc_desc = next; } - // Note: Do not update _last_pc_desc. It fronts for the LRU cache. } // adjust pcs_size so that it is a multiple of both oopSize and // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple // of oopSize, then 2*sizeof(PcDesc) is) -static int adjust_pcs_size(int pcs_size) { +static int adjust_pcs_size(int pcs_size) { int nsize = round_to(pcs_size, oopSize); if ((nsize % sizeof(PcDesc)) != 0) { nsize = pcs_size + sizeof(PcDesc); } - assert((nsize % oopSize) == 0, "correct alignment"); + assert((nsize % oopSize) == 0, "correct alignment"); return nsize; } @@ -471,6 +470,7 @@ nmethod* nmethod::new_native_nmethod(methodHandle method, + int compile_id, CodeBuffer *code_buffer, int vep_offset, int frame_complete, @@ -487,7 +487,7 @@ offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); nm = new (native_nmethod_size) - nmethod(method(), native_nmethod_size, &offsets, + nmethod(method(), native_nmethod_size, compile_id, &offsets, code_buffer, frame_size, basic_lock_owner_sp_offset, basic_lock_sp_offset, oop_maps); @@ -612,6 +612,7 @@ nmethod::nmethod( methodOop method, int nmethod_size, + int compile_id, CodeOffsets* offsets, CodeBuffer* code_buffer, int frame_size, @@ -646,7 +647,7 @@ _handler_table_offset = _dependencies_offset; _nul_chk_table_offset = _handler_table_offset; _nmethod_end_offset = _nul_chk_table_offset; - _compile_id = 0; // default + _compile_id = compile_id; _comp_level = CompLevel_none; _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); @@ -655,6 +656,9 @@ _pc_desc_cache.reset_to(NULL); code_buffer->copy_oops_to(this); + if (ScavengeRootsInCode && detect_scavenge_root_oops()) { + CodeCache::add_scavenge_root_nmethod(this); + } debug_only(verify_scavenge_root_oops()); CodeCache::commit(this); } @@ -767,7 +771,7 @@ void* nmethod::operator new(size_t size, int nmethod_size) { // Always leave some room in the CodeCache for I2C/C2I adapters - if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) return NULL; + if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) return NULL; return CodeCache::allocate(nmethod_size); } @@ -940,72 +944,11 @@ #undef LOG_OFFSET -void nmethod::print_compilation(outputStream *st, const char *method_name, const char *title, - methodOop method, bool is_blocking, int compile_id, int bci, int comp_level) { - bool is_synchronized = false, has_xhandler = false, is_native = false; - int code_size = -1; - if (method != NULL) { - is_synchronized = method->is_synchronized(); - has_xhandler = method->has_exception_handler(); - is_native = method->is_native(); - code_size = method->code_size(); - } - // print compilation number - st->print("%7d %3d", (int)tty->time_stamp().milliseconds(), compile_id); - - // print method attributes - const bool is_osr = bci != InvocationEntryBci; - const char blocking_char = is_blocking ? 'b' : ' '; - const char compile_type = is_osr ? '%' : ' '; - const char sync_char = is_synchronized ? 's' : ' '; - const char exception_char = has_xhandler ? '!' : ' '; - const char native_char = is_native ? 'n' : ' '; - st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char); - if (TieredCompilation) { - st->print("%d ", comp_level); - } - - // print optional title - bool do_nl = false; - if (title != NULL) { - int tlen = (int) strlen(title); - bool do_nl = false; - if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; } - st->print("%.*s", tlen, title); - } else { - do_nl = true; - } - - // print method name string if given - if (method_name != NULL) { - st->print(method_name); - } else { - // otherwise as the method to print itself - if (method != NULL && !Universe::heap()->is_gc_active()) { - method->print_short_name(st); - } else { - st->print("(method)"); - } - } - - if (method != NULL) { - // print osr_bci if any - if (is_osr) st->print(" @ %d", bci); - // print method size - st->print(" (%d bytes)", code_size); - } - if (do_nl) st->cr(); -} - // Print out more verbose output usually for a newly created nmethod. -void nmethod::print_on(outputStream* st, const char* title) const { +void nmethod::print_on(outputStream* st, const char* msg) const { if (st != NULL) { ttyLocker ttyl; - print_compilation(st, /*method_name*/NULL, title, - method(), /*is_blocking*/false, - compile_id(), - is_osr_method() ? osr_entry_bci() : InvocationEntryBci, - comp_level()); + CompileTask::print_compilation(st, this, msg); if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this); } } @@ -1118,14 +1061,22 @@ } +void nmethod::verify_oop_relocations() { + // Ensure sure that the code matches the current oop values + RelocIterator iter(this, NULL, NULL); + while (iter.next()) { + if (iter.type() == relocInfo::oop_type) { + oop_Relocation* reloc = iter.oop_reloc(); + if (!reloc->oop_is_immediate()) { + reloc->verify_oop_relocation(); + } + } + } +} + + ScopeDesc* nmethod::scope_desc_at(address pc) { PcDesc* pd = pc_desc_at(pc); -#ifdef ASSERT - if (pd == NULL) { - tty->print_cr(err_msg("Missing scope at relative pc %d of method %s", pc - code_begin(), this->method()->name()->as_C_string())); - print_pcs(); - } -#endif guarantee(pd != NULL, "scope must be present"); return new ScopeDesc(this, pd->scope_decode_offset(), pd->obj_decode_offset(), pd->should_reexecute(), @@ -1202,14 +1153,17 @@ set_stack_traversal_mark(NMethodSweeper::traversal_count()); } -// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack) +// Tell if a non-entrant method can be converted to a zombie (i.e., +// there are no activations on the stack, not in use by the VM, +// and not in use by the ServiceThread) bool nmethod::can_not_entrant_be_converted() { assert(is_not_entrant(), "must be a non-entrant method"); // Since the nmethod sweeper only does partial sweep the sweeper's traversal // count can be greater than the stack traversal count before it hits the // nmethod for the second time. - return stack_traversal_mark()+1 < NMethodSweeper::traversal_count(); + return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() && + !is_locked_by_vm(); } void nmethod::inc_decompile_count() { @@ -1308,14 +1262,14 @@ } } if (PrintCompilation && _state != unloaded) { - print_on(tty, _state == zombie ? "made zombie " : "made not entrant "); - tty->cr(); + print_on(tty, _state == zombie ? "made zombie" : "made not entrant"); } } // Common functionality for both make_not_entrant and make_zombie bool nmethod::make_not_entrant_or_zombie(unsigned int state) { assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); + assert(!is_zombie(), "should not already be a zombie"); // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. nmethodLocker nml(this); @@ -1323,11 +1277,6 @@ No_Safepoint_Verifier nsv; { - // If the method is already zombie there is nothing to do - if (is_zombie()) { - return false; - } - // invalidate osr nmethod before acquiring the patching lock since // they both acquire leaf locks and we don't want a deadlock. // This logic is equivalent to the logic below for patching the @@ -1397,13 +1346,12 @@ flush_dependencies(NULL); } - { - // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event - // and it hasn't already been reported for this nmethod then report it now. - // (the event may have been reported earilier if the GC marked it for unloading). - Pause_No_Safepoint_Verifier pnsv(&nsv); - post_compiled_method_unload(); - } + // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload + // event and it hasn't already been reported for this nmethod then + // report it now. The event may have been reported earilier if the GC + // marked it for unloading). JvmtiDeferredEventQueue support means + // we no longer go to a safepoint here. + post_compiled_method_unload(); #ifdef ASSERT // It's no longer safe to access the oops section since zombie @@ -1588,7 +1536,7 @@ if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { assert(!unload_reported(), "already unloaded"); JvmtiDeferredEvent event = - JvmtiDeferredEvent::compiled_method_unload_event( + JvmtiDeferredEvent::compiled_method_unload_event(this, _jmethod_id, insts_begin()); if (SafepointSynchronize::is_at_safepoint()) { // Don't want to take the queueing lock. Add it as pending and @@ -1820,7 +1768,7 @@ break; } // Mark was clear when we first saw this guy. - NOT_PRODUCT(if (TraceScavenge) print_on(tty, "oops_do, mark\n")); + NOT_PRODUCT(if (TraceScavenge) print_on(tty, "oops_do, mark")); return false; } } @@ -1844,7 +1792,8 @@ assert(cur != NULL, "not NULL-terminated"); nmethod* next = cur->_oops_do_mark_link; cur->_oops_do_mark_link = NULL; - NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark\n")); + cur->fix_oop_relocations(); + NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark")); cur = next; } void* required = _oops_do_mark_nmethods; @@ -1903,7 +1852,7 @@ oop nmethod::embeddedOop_at(u_char* p) { - RelocIterator iter(this, p, MIN2(p + oopSize, code_end())); + RelocIterator iter(this, p, p + 1); while (iter.next()) if (iter.type() == relocInfo::oop_type) { return iter.oop_reloc()->oop_value(); @@ -2193,10 +2142,12 @@ lock_nmethod(_nm); } -void nmethodLocker::lock_nmethod(nmethod* nm) { +// Only JvmtiDeferredEvent::compiled_method_unload_event() +// should pass zombie_ok == true. +void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) { if (nm == NULL) return; Atomic::inc(&nm->_lock_count); - guarantee(!nm->is_zombie(), "cannot lock a zombie method"); + guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method"); } void nmethodLocker::unlock_nmethod(nmethod* nm) { @@ -2397,7 +2348,7 @@ ResourceMark rm; ttyLocker ttyl; // keep the following output all in one block - tty->print("Compiled "); + tty->print("Compiled method "); if (is_compiled_by_c1()) { tty->print("(c1) "); @@ -2409,8 +2360,8 @@ tty->print("(nm) "); } - print_on(tty, "nmethod"); - tty->cr(); + print_on(tty, NULL); + if (WizardMode) { tty->print("((nmethod*) "INTPTR_FORMAT ") ", this); tty->print(" for method " INTPTR_FORMAT , (address)method()); @@ -2797,7 +2748,8 @@ #ifndef PRODUCT void nmethod::print_value_on(outputStream* st) const { - print_on(st, "nmethod"); + st->print("nmethod"); + print_on(st, NULL); } void nmethod::print_calls(outputStream* st) {
--- a/src/share/vm/code/nmethod.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/code/nmethod.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -34,7 +34,6 @@ class ExceptionCache : public CHeapObj { friend class VMStructs; private: - static address _unwind_handler; enum { cache_size = 16 }; klassOop _exception_type; address _pc[cache_size]; @@ -62,8 +61,6 @@ bool match_exception_with_space(Handle exception) ; address test_address(address addr); bool add_address_and_handler(address addr, address handler) ; - - static address unwind_handler() { return _unwind_handler; } }; @@ -72,14 +69,13 @@ friend class VMStructs; private: enum { cache_size = 4 }; - PcDesc* _last_pc_desc; // most recent pc_desc found PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found public: - PcDescCache() { debug_only(_last_pc_desc = NULL); } + PcDescCache() { debug_only(_pc_descs[0] = NULL); } void reset_to(PcDesc* initial_pc_desc); PcDesc* find_pc_desc(int pc_offset, bool approximate); void add_pc_desc(PcDesc* pc_desc); - PcDesc* last_pc_desc() { return _last_pc_desc; } + PcDesc* last_pc_desc() { return _pc_descs[0]; } }; @@ -181,7 +177,7 @@ unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes? // Protected by Patching_lock - unsigned char _state; // {alive, not_entrant, zombie, unloaded) + unsigned char _state; // {alive, not_entrant, zombie, unloaded} #ifdef ASSERT bool _oops_are_stale; // indicates that it's no longer safe to access oops section @@ -197,7 +193,10 @@ NOT_PRODUCT(bool _has_debug_info; ) - // Nmethod Flushing lock (if non-zero, then the nmethod is not removed) + // Nmethod Flushing lock. If non-zero, then the nmethod is not removed + // and is not made into a zombie. However, once the nmethod is made into + // a zombie, it will be locked one final time if CompiledMethodUnload + // event processing needs to be done. jint _lock_count; // not_entrant method removal. Each mark_sweep pass will update @@ -230,6 +229,7 @@ // For native wrappers nmethod(methodOop method, int nmethod_size, + int compile_id, CodeOffsets* offsets, CodeBuffer *code_buffer, int frame_size, @@ -300,6 +300,7 @@ int comp_level); static nmethod* new_native_nmethod(methodHandle method, + int compile_id, CodeBuffer *code_buffer, int vep_offset, int frame_complete, @@ -460,6 +461,7 @@ public: void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } + void verify_oop_relocations(); bool is_at_poll_return(address pc); bool is_at_poll_or_poll_return(address pc); @@ -500,8 +502,8 @@ address continuation_for_implicit_exception(address pc); // On-stack replacement support - int osr_entry_bci() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _entry_bci; } - address osr_entry() const { assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); return _osr_entry_point; } + int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } + address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } void invalidate_osr_method(); nmethod* osr_link() const { return _osr_link; } void set_osr_link(nmethod *n) { _osr_link = n; } @@ -525,8 +527,9 @@ void flush(); public: - // If returning true, it is unsafe to remove this nmethod even though it is a zombie - // nmethod, since the VM might have a reference to it. Should only be called from a safepoint. + // When true is returned, it is unsafe to remove this nmethod even if + // it is a zombie, since the VM or the ServiceThread might still be + // using it. bool is_locked_by_vm() const { return _lock_count >0; } // See comment at definition of _last_seen_on_stack @@ -610,10 +613,6 @@ void verify_scopes(); void verify_interrupt_point(address interrupt_point); - // print compilation helper - static void print_compilation(outputStream *st, const char *method_name, const char *title, - methodOop method, bool is_blocking, int compile_id, int bci, int comp_level); - // printing support void print() const; void print_code(); @@ -629,7 +628,7 @@ // need to re-define this from CodeBlob else the overload hides it virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } - void print_on(outputStream* st, const char* title) const; + void print_on(outputStream* st, const char* msg) const; // Logging void log_identity(xmlStream* log) const; @@ -695,13 +694,20 @@ }; -// Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method +// Locks an nmethod so its code will not get removed and it will not +// be made into a zombie, even if it is a not_entrant method. After the +// nmethod becomes a zombie, if CompiledMethodUnload event processing +// needs to be done, then lock_nmethod() is used directly to keep the +// generated code from being reused too early. class nmethodLocker : public StackObj { nmethod* _nm; public: - static void lock_nmethod(nmethod* nm); // note: nm can be NULL + // note: nm can be NULL + // Only JvmtiDeferredEvent::compiled_method_unload_event() + // should pass zombie_ok == true. + static void lock_nmethod(nmethod* nm, bool zombie_ok = false); static void unlock_nmethod(nmethod* nm); // (ditto) nmethodLocker(address pc); // derive nm from pc
--- a/src/share/vm/code/relocInfo.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/code/relocInfo.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -472,20 +472,14 @@ return itr._rh; } - -static inline bool is_index(intptr_t index) { - return 0 < index && index < os::vm_page_size(); -} - - int32_t Relocation::runtime_address_to_index(address runtime_address) { - assert(!is_index((intptr_t)runtime_address), "must not look like an index"); + assert(!is_reloc_index((intptr_t)runtime_address), "must not look like an index"); if (runtime_address == NULL) return 0; StubCodeDesc* p = StubCodeDesc::desc_for(runtime_address); if (p != NULL && p->begin() == runtime_address) { - assert(is_index(p->index()), "there must not be too many stubs"); + assert(is_reloc_index(p->index()), "there must not be too many stubs"); return (int32_t)p->index(); } else { // Known "miscellaneous" non-stub pointers: @@ -506,7 +500,7 @@ address Relocation::index_to_runtime_address(int32_t index) { if (index == 0) return NULL; - if (is_index(index)) { + if (is_reloc_index(index)) { StubCodeDesc* p = StubCodeDesc::desc_for_index(index); assert(p != NULL, "there must be a stub for this index"); return p->begin(); @@ -634,7 +628,7 @@ #ifndef _LP64 p = pack_1_int_to(p, index); #else - if (is_index(index)) { + if (is_reloc_index(index)) { p = pack_2_ints_to(p, index, 0); } else { jlong t = (jlong) _target; @@ -642,7 +636,7 @@ int32_t hi = high(t); p = pack_2_ints_to(p, lo, hi); DEBUG_ONLY(jlong t1 = jlong_from(hi, lo)); - assert(!is_index(t1) && (address) t1 == _target, "not symmetric"); + assert(!is_reloc_index(t1) && (address) t1 == _target, "not symmetric"); } #endif /* _LP64 */ dest->set_locs_end((relocInfo*) p); @@ -656,7 +650,7 @@ int32_t lo, hi; unpack_2_ints(lo, hi); jlong t = jlong_from(hi, lo);; - if (is_index(t)) { + if (is_reloc_index(t)) { _target = index_to_runtime_address(t); } else { _target = (address) t; @@ -798,6 +792,14 @@ } +void oop_Relocation::verify_oop_relocation() { + if (!oop_is_immediate()) { + // get the oop from the pool, and re-insert it into the instruction: + verify_value(value()); + } +} + + RelocIterator virtual_call_Relocation::parse_ic(nmethod* &nm, address &ic_call, address &first_oop, oop* &oop_addr, bool *is_optimized) { assert(ic_call != NULL, "ic_call address must be set");
--- a/src/share/vm/code/relocInfo.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/code/relocInfo.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -703,6 +703,10 @@ assert(datalen()==0 || type()==relocInfo::none, "no data here"); } + static bool is_reloc_index(intptr_t index) { + return 0 < index && index < os::vm_page_size(); + } + protected: // Helper functions for pack_data_to() and unpack_data(). @@ -765,7 +769,8 @@ protected: // platform-dependent utilities for decoding and patching instructions - void pd_set_data_value (address x, intptr_t off); // a set or mem-ref + void pd_set_data_value (address x, intptr_t off, bool verify_only = false); // a set or mem-ref + void pd_verify_data_value (address x, intptr_t off) { pd_set_data_value(x, off, true); } address pd_call_destination (address orig_addr = NULL); void pd_set_call_destination (address x); void pd_swap_in_breakpoint (address x, short* instrs, int instrlen); @@ -880,6 +885,12 @@ else pd_set_data_value(x, o); } + void verify_value(address x) { + if (addr_in_const()) + assert(*(address*)addr() == x, "must agree"); + else + pd_verify_data_value(x, offset()); + } // The "o" (displacement) argument is relevant only to split relocations // on RISC machines. In some CPUs (SPARC), the set-hi and set-lo ins'ns @@ -950,6 +961,8 @@ void fix_oop_relocation(); // reasserts oop value + void verify_oop_relocation(); + address value() { return (address) *oop_addr(); } bool oop_is_immediate() { return oop_index() == 0; } @@ -1118,6 +1131,12 @@ return rh; } + // Some address looking values aren't safe to treat as relocations + // and should just be treated as constants. + static bool can_be_relocated(address target) { + return target != NULL && !is_reloc_index((intptr_t)target); + } + private: address _target; // address in runtime
--- a/src/share/vm/code/vmreg.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/code/vmreg.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/compiler/compileBroker.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/compiler/compileBroker.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -269,11 +269,6 @@ } -void CompileTask::print_compilation(outputStream *st, methodOop method, char* method_name) { - nmethod::print_compilation(st, method_name,/*title*/ NULL, method, - is_blocking(), compile_id(), osr_bci(), comp_level()); -} - // ------------------------------------------------------------------ // CompileTask::print_line_on_error // @@ -285,31 +280,115 @@ // Otherwise it's the same as CompileTask::print_line() // void CompileTask::print_line_on_error(outputStream* st, char* buf, int buflen) { - methodOop method = (methodOop)JNIHandles::resolve(_method); // print compiler name st->print("%s:", CompileBroker::compiler(comp_level())->name()); - char* method_name = NULL; - if (method != NULL) { - method_name = method->name_and_sig_as_C_string(buf, buflen); - } - print_compilation(st, method, method_name); + print_compilation(st); } // ------------------------------------------------------------------ // CompileTask::print_line void CompileTask::print_line() { - Thread *thread = Thread::current(); - methodHandle method(thread, - (methodOop)JNIHandles::resolve(method_handle())); - ResourceMark rm(thread); - ttyLocker ttyl; // keep the following output all in one block - // print compiler name if requested if (CIPrintCompilerName) tty->print("%s:", CompileBroker::compiler(comp_level())->name()); - print_compilation(tty, method(), NULL); + print_compilation(); +} + + +// ------------------------------------------------------------------ +// CompileTask::print_compilation_impl +void CompileTask::print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level, bool is_osr_method, int osr_bci, bool is_blocking, const char* msg) { + st->print("%7d ", (int) st->time_stamp().milliseconds()); // print timestamp + st->print("%4d ", compile_id); // print compilation number + + // method attributes + const char compile_type = is_osr_method ? '%' : ' '; + const char sync_char = method->is_synchronized() ? 's' : ' '; + const char exception_char = method->has_exception_handler() ? '!' : ' '; + const char blocking_char = is_blocking ? 'b' : ' '; + const char native_char = method->is_native() ? 'n' : ' '; + + // print method attributes + st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char); + + if (TieredCompilation) { + if (comp_level != -1) st->print("%d ", comp_level); + else st->print("- "); + } + st->print(" "); // more indent + + method->print_short_name(st); + if (is_osr_method) { + st->print(" @ %d", osr_bci); + } + st->print(" (%d bytes)", method->code_size()); + + if (msg != NULL) { + st->print(" %s", msg); + } + st->cr(); } +// ------------------------------------------------------------------ +// CompileTask::print_inlining +void CompileTask::print_inlining(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg) { + // 1234567 + st->print(" "); // print timestamp + // 1234 + st->print(" "); // print compilation number + + // method attributes + const char sync_char = method->is_synchronized() ? 's' : ' '; + const char exception_char = method->has_exception_handlers() ? '!' : ' '; + const char monitors_char = method->has_monitor_bytecodes() ? 'm' : ' '; + + // print method attributes + st->print(" %c%c%c ", sync_char, exception_char, monitors_char); + + if (TieredCompilation) { + st->print(" "); + } + st->print(" "); // more indent + st->print(" "); // initial inlining indent + + for (int i = 0; i < inline_level; i++) st->print(" "); + + st->print("@ %d ", bci); // print bci + method->print_short_name(st); + st->print(" (%d bytes)", method->code_size()); + + if (msg != NULL) { + st->print(" %s", msg); + } + st->cr(); +} + +// ------------------------------------------------------------------ +// CompileTask::print_inline_indent +void CompileTask::print_inline_indent(int inline_level, outputStream* st) { + // 1234567 + st->print(" "); // print timestamp + // 1234 + st->print(" "); // print compilation number + // %s!bn + st->print(" "); // print method attributes + if (TieredCompilation) { + st->print(" "); + } + st->print(" "); // more indent + st->print(" "); // initial inlining indent + for (int i = 0; i < inline_level; i++) st->print(" "); +} + +// ------------------------------------------------------------------ +// CompileTask::print_compilation +void CompileTask::print_compilation(outputStream* st) { + oop rem = JNIHandles::resolve(method_handle()); + assert(rem != NULL && rem->is_method(), "must be"); + methodOop method = (methodOop) rem; + bool is_osr_method = osr_bci() != InvocationEntryBci; + print_compilation_impl(st, method, compile_id(), comp_level(), is_osr_method, osr_bci(), is_blocking()); +} // ------------------------------------------------------------------ // CompileTask::log_task @@ -845,9 +924,9 @@ // Initialize the compilation queue void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) { EXCEPTION_MARK; -#ifndef ZERO +#if !defined(ZERO) && !defined(SHARK) assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?"); -#endif // !ZERO +#endif // !ZERO && !SHARK if (c2_compiler_count > 0) { _c2_method_queue = new CompileQueue("C2MethodQueue", MethodCompileQueue_lock); } @@ -951,6 +1030,14 @@ return; } +#ifndef PRODUCT + if (osr_bci != -1 && !FLAG_IS_DEFAULT(OSROnlyBCI)) { + if ((OSROnlyBCI > 0) ? (OSROnlyBCI != osr_bci) : (-OSROnlyBCI == osr_bci)) { + // Positive OSROnlyBCI means only compile that bci. Negative means don't compile that BCI. + return; + } + } +#endif // If this method is already in the compile queue, then // we do not block the current thread. @@ -1115,7 +1202,7 @@ assert(!HAS_PENDING_EXCEPTION, "No exception should be present"); // some prerequisites that are compiler specific - if (compiler(comp_level)->is_c2()) { + if (compiler(comp_level)->is_c2() || compiler(comp_level)->is_shark()) { method->constants()->resolve_string_constants(CHECK_0); // Resolve all classes seen in the signature of the method // we are compiling. @@ -1162,7 +1249,13 @@ // do the compilation if (method->is_native()) { if (!PreferInterpreterNativeStubs) { - (void) AdapterHandlerLibrary::create_native_wrapper(method); + // Acquire our lock. + int compile_id; + { + MutexLocker locker(MethodCompileQueue_lock, THREAD); + compile_id = assign_compile_id(method, standard_entry_bci); + } + (void) AdapterHandlerLibrary::create_native_wrapper(method, compile_id); } else { return NULL; } @@ -1270,7 +1363,6 @@ assert(MethodCompileQueue_lock->owner() == Thread::current(), "must hold the compilation queue lock"); bool is_osr = (osr_bci != standard_entry_bci); - assert(!method->is_native(), "no longer compile natives"); uint id; if (CICountOSR && is_osr) { id = ++_osr_compilation_id; @@ -1451,7 +1543,7 @@ // We need this HandleMark to avoid leaking VM handles. HandleMark hm(thread); - if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) { + if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) { // the code cache is really full handle_full_code_cache(); } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) { @@ -1734,12 +1826,20 @@ UseInterpreter = true; if (UseCompiler || AlwaysCompileLoopMethods ) { if (xtty != NULL) { + stringStream s; + // Dump code cache state into a buffer before locking the tty, + // because log_state() will use locks causing lock conflicts. + CodeCache::log_state(&s); + // Lock to prevent tearing + ttyLocker ttyl; xtty->begin_elem("code_cache_full"); + xtty->print(s.as_string()); xtty->stamp(); xtty->end_elem(); } warning("CodeCache is full. Compiler has been disabled."); warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); + CodeCache::print_bounds(tty); #ifndef PRODUCT if (CompileTheWorld || ExitOnFullCodeCache) { before_exit(JavaThread::current());
--- a/src/share/vm/compiler/compileBroker.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/compiler/compileBroker.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,7 +56,6 @@ int _hot_count; // information about its invocation counter const char* _comment; // more info about the task - void print_compilation(outputStream *st, methodOop method, char* method_name); public: CompileTask() { _lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock"); @@ -96,10 +95,26 @@ CompileTask* prev() const { return _prev; } void set_prev(CompileTask* prev) { _prev = prev; } +private: + static void print_compilation_impl(outputStream* st, methodOop method, int compile_id, int comp_level, bool is_osr_method = false, int osr_bci = -1, bool is_blocking = false, const char* msg = NULL); + +public: + void print_compilation(outputStream* st = tty); + static void print_compilation(outputStream* st, const nmethod* nm, const char* msg = NULL) { + print_compilation_impl(st, nm->method(), nm->compile_id(), nm->comp_level(), nm->is_osr_method(), nm->is_osr_method() ? nm->osr_entry_bci() : -1, /*is_blocking*/ false, msg); + } + + static void print_inlining(outputStream* st, ciMethod* method, int inline_level, int bci, const char* msg = NULL); + static void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) { + print_inlining(tty, method, inline_level, bci, msg); + } + + static void print_inline_indent(int inline_level, outputStream* st = tty); + void print(); void print_line(); + void print_line_on_error(outputStream* st, char* buf, int buflen); - void print_line_on_error(outputStream* st, char* buf, int buflen); void log_task(xmlStream* log); void log_task_queued(); void log_task_start(CompileLog* log);
--- a/src/share/vm/compiler/compileLog.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/compiler/compileLog.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/compiler/compilerOracle.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/compiler/compilerOracle.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/compiler/compilerOracle.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/compiler/compilerOracle.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/compiler/disassembler.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/compiler/disassembler.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/compiler/disassembler.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/compiler/disassembler.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -292,13 +292,15 @@ void CMSCollector::ref_processor_init() { if (_ref_processor == NULL) { // Allocate and initialize a reference processor - _ref_processor = ReferenceProcessor::create_ref_processor( - _span, // span - _cmsGen->refs_discovery_is_atomic(), // atomic_discovery - _cmsGen->refs_discovery_is_mt(), // mt_discovery - &_is_alive_closure, - ParallelGCThreads, - ParallelRefProcEnabled); + _ref_processor = + new ReferenceProcessor(_span, // span + (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing + (int) ParallelGCThreads, // mt processing degree + _cmsGen->refs_discovery_is_mt(), // mt discovery + (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree + _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic + &_is_alive_closure, // closure for liveness info + false); // next field updates do not need write barrier // Initialize the _ref_processor field of CMSGen _cmsGen->set_ref_processor(_ref_processor); @@ -641,7 +643,7 @@ } // Support for multi-threaded concurrent phases - if (CollectedHeap::use_parallel_gc_threads() && CMSConcurrentMTEnabled) { + if (CMSConcurrentMTEnabled) { if (FLAG_IS_DEFAULT(ConcGCThreads)) { // just for now FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4); @@ -1689,6 +1691,8 @@ MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag); _full_gc_requested = true; CGC_lock->notify(); // nudge CMS thread + } else { + assert(gc_count > full_gc_count, "Error: causal loop"); } } @@ -1988,17 +1992,16 @@ // Temporarily widen the span of the weak reference processing to // the entire heap. MemRegion new_span(GenCollectedHeap::heap()->reserved_region()); - ReferenceProcessorSpanMutator x(ref_processor(), new_span); - + ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span); // Temporarily, clear the "is_alive_non_header" field of the // reference processor. - ReferenceProcessorIsAliveMutator y(ref_processor(), NULL); - + ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL); // Temporarily make reference _processing_ single threaded (non-MT). - ReferenceProcessorMTProcMutator z(ref_processor(), false); - + ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false); // Temporarily make refs discovery atomic - ReferenceProcessorAtomicMutator w(ref_processor(), true); + ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true); + // Temporarily make reference _discovery_ single threaded (non-MT) + ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); ref_processor()->set_enqueuing_is_done(false); ref_processor()->enable_discovery(); @@ -4263,9 +4266,7 @@ // Refs discovery is already non-atomic. assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); - // Mutate the Refs discovery so it is MT during the - // multi-threaded marking phase. - ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1); + assert(ref_processor()->discovery_is_mt(), "Discovery should be MT"); DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) conc_workers()->start_task(&tsk); while (tsk.yielded()) { @@ -4318,6 +4319,8 @@ ResourceMark rm; HandleMark hm; + // Temporarily make refs discovery single threaded (non-MT) + ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, &_markStack, &_revisitStack, CMSYield && asynch); // the last argument to iterate indicates whether the iteration @@ -4356,10 +4359,6 @@ verify_overflow_empty(); _abort_preclean = false; if (CMSPrecleaningEnabled) { - // Precleaning is currently not MT but the reference processor - // may be set for MT. Disable it temporarily here. - ReferenceProcessor* rp = ref_processor(); - ReferenceProcessorMTProcMutator z(rp, false); _eden_chunk_index = 0; size_t used = get_eden_used(); size_t capacity = get_eden_capacity(); @@ -4502,11 +4501,16 @@ _collectorState == AbortablePreclean, "incorrect state"); ResourceMark rm; HandleMark hm; + + // Precleaning is currently not MT but the reference processor + // may be set for MT. Disable it temporarily here. + ReferenceProcessor* rp = ref_processor(); + ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); + // Do one pass of scrubbing the discovered reference lists // to remove any reference objects with strongly-reachable // referents. if (clean_refs) { - ReferenceProcessor* rp = ref_processor(); CMSPrecleanRefsYieldClosure yield_cl(this); assert(rp->span().equals(_span), "Spans should be equal"); CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, @@ -5576,8 +5580,10 @@ // in the multi-threaded case, but we special-case n=1 here to get // repeatable measurements of the 1-thread overhead of the parallel code. if (n_workers > 1) { - // Make refs discovery MT-safe - ReferenceProcessorMTMutator mt(ref_processor(), true); + // Make refs discovery MT-safe, if it isn't already: it may not + // necessarily be so, since it's possible that we are doing + // ST marking. + ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true); GenCollectedHeap::StrongRootsScope srs(gch); workers->run_task(&tsk); } else { @@ -5703,14 +5709,19 @@ CMSBitMap* mark_bit_map, AbstractWorkGang* workers, OopTaskQueueSet* task_queues): + // XXX Should superclass AGTWOQ also know about AWG since it knows + // about the task_queues used by the AWG? Then it could initialize + // the terminator() object. See 6984287. The set_for_termination() + // below is a temporary band-aid for the regression in 6984287. AbstractGangTaskWOopQueues("Process referents by policy in parallel", task_queues), _task(task), _collector(collector), _span(span), _mark_bit_map(mark_bit_map) - { - assert(_collector->_span.equals(_span) && !_span.is_empty(), - "Inconsistency in _span"); - } + { + assert(_collector->_span.equals(_span) && !_span.is_empty(), + "Inconsistency in _span"); + set_for_termination(workers->active_workers()); + } OopTaskQueueSet* task_queues() { return queues(); } @@ -5872,8 +5883,7 @@ // That is OK as long as the Reference lists are balanced (see // balance_all_queues() and balance_queues()). - - rp->set_mt_degree(ParallelGCThreads); + rp->set_active_mt_degree(ParallelGCThreads); CMSRefProcTaskExecutor task_executor(*this); rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, @@ -5920,14 +5930,18 @@ } { - TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty); - // Now clean up stale oops in StringTable - StringTable::unlink(&_is_alive_closure); + TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); } } + if (should_unload_classes() || !JavaObjectsInPerm) { + TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty); + // Now clean up stale oops in StringTable + StringTable::unlink(&_is_alive_closure); + } + verify_work_stacks_empty(); // Restore any preserved marks as a result of mark stack or // work queue overflow
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1133,7 +1133,7 @@ // rare that the cost of the CAS's involved is in the // noise. That's a measurement that should be done, and // the code simplified if that turns out to be the case. - return false; + return ConcGCThreads > 1; } // Override
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,7 +51,7 @@ volatile jint ConcurrentMarkSweepThread::_pending_yields = 0; volatile jint ConcurrentMarkSweepThread::_pending_decrements = 0; -volatile bool ConcurrentMarkSweepThread::_icms_enabled = false; +volatile jint ConcurrentMarkSweepThread::_icms_disabled = 0; volatile bool ConcurrentMarkSweepThread::_should_run = false; // When icms is enabled, the icms thread is stopped until explicitly // started. @@ -84,7 +84,7 @@ } } _sltMonitor = SLT_lock; - set_icms_enabled(CMSIncrementalMode); + assert(!CMSIncrementalMode || icms_is_enabled(), "Error"); } void ConcurrentMarkSweepThread::run() { @@ -341,11 +341,11 @@ void ConcurrentMarkSweepThread::icms_wait() { assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking"); - if (_should_stop && icms_enabled()) { + if (_should_stop && icms_is_enabled()) { MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag); trace_state("pause_icms"); _collector->stats().stop_cms_timer(); - while(!_should_run && icms_enabled()) { + while(!_should_run && icms_is_enabled()) { iCMS_lock->wait(Mutex::_no_safepoint_check_flag); } _collector->stats().start_cms_timer();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ class ConcurrentMarkSweepGeneration; class CMSCollector; -// The Concurrent Mark Sweep GC Thread (could be several in the future). +// The Concurrent Mark Sweep GC Thread class ConcurrentMarkSweepThread: public ConcurrentGCThread { friend class VMStructs; friend class ConcurrentMarkSweepGeneration; // XXX should remove friendship @@ -55,8 +55,6 @@ static SurrogateLockerThread::SLT_msg_type _sltBuffer; static Monitor* _sltMonitor; - ConcurrentMarkSweepThread* _next; - static bool _should_terminate; enum CMS_flag_type { @@ -84,7 +82,7 @@ // Tracing messages, enabled by CMSTraceThreadState. static inline void trace_state(const char* desc); - static volatile bool _icms_enabled; // iCMS enabled? + static volatile int _icms_disabled; // a counter to track #iCMS disable & enable static volatile bool _should_run; // iCMS may run static volatile bool _should_stop; // iCMS should stop @@ -214,10 +212,25 @@ // Incremental mode is enabled globally by the flag CMSIncrementalMode. It // must also be enabled/disabled dynamically to allow foreground collections. - static inline void enable_icms() { _icms_enabled = true; } - static inline void disable_icms() { _icms_enabled = false; } - static inline void set_icms_enabled(bool val) { _icms_enabled = val; } - static inline bool icms_enabled() { return _icms_enabled; } +#define ICMS_ENABLING_ASSERT \ + assert((CMSIncrementalMode && _icms_disabled >= 0) || \ + (!CMSIncrementalMode && _icms_disabled <= 0), "Error") + + static inline void enable_icms() { + ICMS_ENABLING_ASSERT; + Atomic::dec(&_icms_disabled); + } + static inline void disable_icms() { + ICMS_ENABLING_ASSERT; + Atomic::inc(&_icms_disabled); + } + static inline bool icms_is_disabled() { + ICMS_ENABLING_ASSERT; + return _icms_disabled > 0; + } + static inline bool icms_is_enabled() { + return !icms_is_disabled(); + } }; inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -192,14 +192,18 @@ "total_collections() should be monotonically increasing"); MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); + assert(_full_gc_count_before <= gch->total_full_collections(), "Error"); if (gch->total_full_collections() == _full_gc_count_before) { - // Disable iCMS until the full collection is done. + // Disable iCMS until the full collection is done, and + // remember that we did so. CMSCollector::disable_icms(); + _disabled_icms = true; // In case CMS thread was in icms_wait(), wake it up. CMSCollector::start_icms(); // Nudge the CMS thread to start a concurrent collection. CMSCollector::request_full_gc(_full_gc_count_before); } else { + assert(_full_gc_count_before < gch->total_full_collections(), "Error"); FullGCCount_lock->notify_all(); // Inform the Java thread its work is done } } @@ -259,6 +263,8 @@ FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag); } } - // Enable iCMS back. - CMSCollector::enable_icms(); + // Enable iCMS back if we disabled it earlier. + if (_disabled_icms) { + CMSCollector::enable_icms(); + } }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -128,11 +128,14 @@ // VM operation to invoke a concurrent collection of the heap as a // GenCollectedHeap heap. class VM_GenCollectFullConcurrent: public VM_GC_Operation { + bool _disabled_icms; public: VM_GenCollectFullConcurrent(unsigned int gc_count_before, unsigned int full_gc_count_before, GCCause::Cause gc_cause) - : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */) { + : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */), + _disabled_icms(false) + { assert(FullGCCount_lock != NULL, "Error"); assert(UseAsyncConcMarkSweepGC, "Else will hang caller"); }
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -262,39 +262,18 @@ for (int i = 0; i < _numMarkedRegions; i++) { assert(_markedRegions.at(i) != NULL, "Should be true by sorting!"); _markedRegions.at(i)->set_sort_index(i); - if (G1PrintRegionLivenessInfo > 0) { - if (i == 0) gclog_or_tty->print_cr("Sorted marked regions:"); - if (i < G1PrintRegionLivenessInfo || - (_numMarkedRegions-i) < G1PrintRegionLivenessInfo) { - HeapRegion* hr = _markedRegions.at(i); - size_t u = hr->used(); - gclog_or_tty->print_cr(" Region %d: %d used, %d max live, %5.2f%%.", - i, u, hr->max_live_bytes(), - 100.0*(float)hr->max_live_bytes()/(float)u); - } + } + if (G1PrintRegionLivenessInfo) { + G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting"); + for (int i = 0; i < _numMarkedRegions; ++i) { + HeapRegion* r = _markedRegions.at(i); + cl.doHeapRegion(r); } } - if (G1PolicyVerbose > 1) - printSortedHeapRegions(); assert(verify(), "should now be sorted"); } void -printHeapRegion(HeapRegion *hr) { - if (hr->isHumongous()) - gclog_or_tty->print("H: "); - if (hr->in_collection_set()) - gclog_or_tty->print("CS: "); - gclog_or_tty->print_cr("Region " PTR_FORMAT " (%s%s) " - "[" PTR_FORMAT ", " PTR_FORMAT"] " - "Used: " SIZE_FORMAT "K, garbage: " SIZE_FORMAT "K.", - hr, hr->is_young() ? "Y " : " ", - hr->is_marked()? "M1" : "M0", - hr->bottom(), hr->end(), - hr->used()/K, hr->garbage_bytes()/K); -} - -void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { assert(!hr->isHumongous(), "Humongous regions shouldn't be added to the collection set"); @@ -351,27 +330,9 @@ void CollectionSetChooser::updateAfterFullCollection() { - G1CollectedHeap* g1h = G1CollectedHeap::heap(); clearMarkedHeapRegions(); } -void -CollectionSetChooser::printSortedHeapRegions() { - gclog_or_tty->print_cr("Printing %d Heap Regions sorted by amount of known garbage", - _numMarkedRegions); - - DEBUG_ONLY(int marked_count = 0;) - for (int i = 0; i < _markedRegions.length(); i++) { - HeapRegion* r = _markedRegions.at(i); - if (r != NULL) { - printHeapRegion(r); - DEBUG_ONLY(marked_count++;) - } - } - assert(marked_count == _numMarkedRegions, "must be"); - gclog_or_tty->print_cr("Done sorted heap region print"); -} - void CollectionSetChooser::removeRegion(HeapRegion *hr) { int si = hr->sort_index(); assert(si == -1 || hr->is_marked(), "Sort index not valid.");
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -100,8 +100,6 @@ CollectionSetChooser(); - void printSortedHeapRegions(); - void sortMarkedHeapRegions(); void fillCache(); bool addRegionToCache(void);
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -31,23 +31,31 @@ #include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "memory/space.inline.hpp" #include "runtime/atomic.hpp" +#include "runtime/java.hpp" #include "utilities/copy.hpp" // Possible sizes for the card counts cache: odd primes that roughly double in size. // (See jvmtiTagMap.cpp). -int ConcurrentG1Refine::_cc_cache_sizes[] = { - 16381, 32771, 76831, 150001, 307261, - 614563, 1228891, 2457733, 4915219, 9830479, - 19660831, 39321619, 78643219, 157286461, -1 + +#define MAX_SIZE ((size_t) -1) + +size_t ConcurrentG1Refine::_cc_cache_sizes[] = { + 16381, 32771, 76831, 150001, 307261, + 614563, 1228891, 2457733, 4915219, 9830479, + 19660831, 39321619, 78643219, 157286461, MAX_SIZE }; ConcurrentG1Refine::ConcurrentG1Refine() : _card_counts(NULL), _card_epochs(NULL), - _n_card_counts(0), _max_n_card_counts(0), + _n_card_counts(0), _max_cards(0), _max_n_card_counts(0), _cache_size_index(0), _expand_card_counts(false), _hot_cache(NULL), _def_use_cache(false), _use_cache(false), - _n_periods(0), + // We initialize the epochs of the array to 0. By initializing + // _n_periods to 1 and not 0 we automatically invalidate all the + // entries on the array. Otherwise we might accidentally think that + // we claimed a card that was in fact never set (see CR7033292). + _n_periods(1), _threads(NULL), _n_threads(0) { @@ -98,27 +106,44 @@ void ConcurrentG1Refine::init() { if (G1ConcRSLogCacheSize > 0) { _g1h = G1CollectedHeap::heap(); - _max_n_card_counts = - (unsigned) (_g1h->max_capacity() >> CardTableModRefBS::card_shift); + + _max_cards = _g1h->max_capacity() >> CardTableModRefBS::card_shift; + _max_n_card_counts = _max_cards * G1MaxHotCardCountSizePercent / 100; size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1; - guarantee(_max_n_card_counts < max_card_num, "card_num representation"); + guarantee(_max_cards < max_card_num, "card_num representation"); - int desired = _max_n_card_counts / InitialCacheFraction; - for (_cache_size_index = 0; - _cc_cache_sizes[_cache_size_index] >= 0; _cache_size_index++) { - if (_cc_cache_sizes[_cache_size_index] >= desired) break; - } - _cache_size_index = MAX2(0, (_cache_size_index - 1)); + // We need _n_card_counts to be less than _max_n_card_counts here + // so that the expansion call (below) actually allocates the + // _counts and _epochs arrays. + assert(_n_card_counts == 0, "pre-condition"); + assert(_max_n_card_counts > 0, "pre-condition"); - int initial_size = _cc_cache_sizes[_cache_size_index]; - if (initial_size < 0) initial_size = _max_n_card_counts; + // Find the index into cache size array that is of a size that's + // large enough to hold desired_sz. + size_t desired_sz = _max_cards / InitialCacheFraction; + int desired_sz_index = 0; + while (_cc_cache_sizes[desired_sz_index] < desired_sz) { + desired_sz_index += 1; + assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant"); + } + assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant"); - // Make sure we don't go bigger than we will ever need - _n_card_counts = MIN2((unsigned) initial_size, _max_n_card_counts); + // If the desired_sz value is between two sizes then + // _cc_cache_sizes[desired_sz_index-1] < desired_sz <= _cc_cache_sizes[desired_sz_index] + // we will start with the lower size in the optimistic expectation that + // we will not need to expand up. Note desired_sz_index could also be 0. + if (desired_sz_index > 0 && + _cc_cache_sizes[desired_sz_index] > desired_sz) { + desired_sz_index -= 1; + } - _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts); - _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts); + if (!expand_card_count_cache(desired_sz_index)) { + // Allocation was unsuccessful - exit + vm_exit_during_initialization("Could not reserve enough space for card count cache"); + } + assert(_n_card_counts > 0, "post-condition"); + assert(_cache_size_index == desired_sz_index, "post-condition"); Copy::fill_to_bytes(&_card_counts[0], _n_card_counts * sizeof(CardCountCacheEntry)); @@ -163,10 +188,13 @@ ConcurrentG1Refine::~ConcurrentG1Refine() { if (G1ConcRSLogCacheSize > 0) { + // Please see the comment in allocate_card_count_cache + // for why we call os::malloc() and os::free() directly. assert(_card_counts != NULL, "Logic"); - FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts); + os::free(_card_counts); assert(_card_epochs != NULL, "Logic"); - FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs); + os::free(_card_epochs); + assert(_hot_cache != NULL, "Logic"); FREE_C_HEAP_ARRAY(jbyte*, _hot_cache); } @@ -373,7 +401,7 @@ // RSet updating while within an evacuation pause. // In this case worker_i should be the id of a GC worker thread assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); - assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "incorrect worker id"); + assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id"); into_cset_dcq->enqueue(entry); } } @@ -382,29 +410,93 @@ } } -void ConcurrentG1Refine::expand_card_count_cache() { - if (_n_card_counts < _max_n_card_counts) { - int new_idx = _cache_size_index+1; - int new_size = _cc_cache_sizes[new_idx]; - if (new_size < 0) new_size = _max_n_card_counts; +// The arrays used to hold the card counts and the epochs must have +// a 1:1 correspondence. Hence they are allocated and freed together +// Returns true if the allocations of both the counts and epochs +// were successful; false otherwise. +bool ConcurrentG1Refine::allocate_card_count_cache(size_t n, + CardCountCacheEntry** counts, + CardEpochCacheEntry** epochs) { + // We call the allocation/free routines directly for the counts + // and epochs arrays. The NEW_C_HEAP_ARRAY/FREE_C_HEAP_ARRAY + // macros call AllocateHeap and FreeHeap respectively. + // AllocateHeap will call vm_exit_out_of_memory in the event + // of an allocation failure and abort the JVM. With the + // _counts/epochs arrays we only need to abort the JVM if the + // initial allocation of these arrays fails. + // + // Additionally AllocateHeap/FreeHeap do some tracing of + // allocate/free calls so calling one without calling the + // other can cause inconsistencies in the tracing. So we + // call neither. - // Make sure we don't go bigger than we will ever need - new_size = MIN2((unsigned) new_size, _max_n_card_counts); + assert(*counts == NULL, "out param"); + assert(*epochs == NULL, "out param"); + + size_t counts_size = n * sizeof(CardCountCacheEntry); + size_t epochs_size = n * sizeof(CardEpochCacheEntry); + + *counts = (CardCountCacheEntry*) os::malloc(counts_size); + if (*counts == NULL) { + // allocation was unsuccessful + return false; + } + + *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size); + if (*epochs == NULL) { + // allocation was unsuccessful - free counts array + assert(*counts != NULL, "must be"); + os::free(*counts); + *counts = NULL; + return false; + } - // Expand the card count and card epoch tables - if (new_size > (int)_n_card_counts) { - // We can just free and allocate a new array as we're - // not interested in preserving the contents - assert(_card_counts != NULL, "Logic!"); - assert(_card_epochs != NULL, "Logic!"); - FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts); - FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs); - _n_card_counts = new_size; - _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts); - _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts); - _cache_size_index = new_idx; + // We successfully allocated both counts and epochs + return true; +} + +// Returns true if the card counts/epochs cache was +// successfully expanded; false otherwise. +bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) { + // Can we expand the card count and epoch tables? + if (_n_card_counts < _max_n_card_counts) { + assert(cache_size_idx >= 0 && cache_size_idx < MAX_CC_CACHE_INDEX, "oob"); + + size_t cache_size = _cc_cache_sizes[cache_size_idx]; + // Make sure we don't go bigger than we will ever need + cache_size = MIN2(cache_size, _max_n_card_counts); + + // Should we expand the card count and card epoch tables? + if (cache_size > _n_card_counts) { + // We have been asked to allocate new, larger, arrays for + // the card counts and the epochs. Attempt the allocation + // of both before we free the existing arrays in case + // the allocation is unsuccessful... + CardCountCacheEntry* counts = NULL; + CardEpochCacheEntry* epochs = NULL; + + if (allocate_card_count_cache(cache_size, &counts, &epochs)) { + // Allocation was successful. + // We can just free the old arrays; we're + // not interested in preserving the contents + if (_card_counts != NULL) os::free(_card_counts); + if (_card_epochs != NULL) os::free(_card_epochs); + + // Cache the size of the arrays and the index that got us there. + _n_card_counts = cache_size; + _cache_size_index = cache_size_idx; + + _card_counts = counts; + _card_epochs = epochs; + + // We successfully allocated/expanded the caches. + return true; + } } } + + // We did not successfully expand the caches. + return false; } void ConcurrentG1Refine::clear_and_record_card_counts() { @@ -415,10 +507,16 @@ #endif if (_expand_card_counts) { - expand_card_count_cache(); + int new_idx = _cache_size_index + 1; + + if (expand_card_count_cache(new_idx)) { + // Allocation was successful and _n_card_counts has + // been updated to the new size. We only need to clear + // the epochs so we don't read a bogus epoch value + // when inserting a card into the hot card cache. + Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry)); + } _expand_card_counts = false; - // Only need to clear the epochs. - Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry)); } int this_epoch = (int) _n_periods;
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -94,7 +94,7 @@ } CardEpochCacheEntry; julong make_epoch_entry(unsigned int card_num, unsigned int epoch) { - assert(0 <= card_num && card_num < _max_n_card_counts, "Bounds"); + assert(0 <= card_num && card_num < _max_cards, "Bounds"); assert(0 <= epoch && epoch <= _n_periods, "must be"); return ((julong) card_num << card_num_shift) | epoch; @@ -117,15 +117,24 @@ CardEpochCacheEntry* _card_epochs; // The current number of buckets in the card count cache - unsigned _n_card_counts; + size_t _n_card_counts; + + // The number of cards for the entire reserved heap + size_t _max_cards; - // The max number of buckets required for the number of - // cards for the entire reserved heap - unsigned _max_n_card_counts; + // The max number of buckets for the card counts and epochs caches. + // This is the maximum that the counts and epochs will grow to. + // It is specified as a fraction or percentage of _max_cards using + // G1MaxHotCardCountSizePercent. + size_t _max_n_card_counts; // Possible sizes of the cache: odd primes that roughly double in size. // (See jvmtiTagMap.cpp). - static int _cc_cache_sizes[]; + enum { + MAX_CC_CACHE_INDEX = 15 // maximum index into the cache size array. + }; + + static size_t _cc_cache_sizes[MAX_CC_CACHE_INDEX]; // The index in _cc_cache_sizes corresponding to the size of // _card_counts. @@ -147,11 +156,22 @@ CardTableModRefBS* _ct_bs; G1CollectedHeap* _g1h; - // Expands the array that holds the card counts to the next size up - void expand_card_count_cache(); + // Helper routine for expand_card_count_cache(). + // The arrays used to hold the card counts and the epochs must have + // a 1:1 correspondence. Hence they are allocated and freed together. + // Returns true if the allocations of both the counts and epochs + // were successful; false otherwise. + bool allocate_card_count_cache(size_t n, + CardCountCacheEntry** counts, + CardEpochCacheEntry** epochs); + + // Expands the arrays that hold the card counts and epochs + // to the cache size at index. Returns true if the expansion/ + // allocation was successful; false otherwise. + bool expand_card_count_cache(int index); // hash a given key (index of card_ptr) with the specified size - static unsigned int hash(size_t key, int size) { + static unsigned int hash(size_t key, size_t size) { return (unsigned int) key % size; }
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1204,7 +1204,6 @@ g1p->record_concurrent_mark_remark_end(); } - #define CARD_BM_TEST_MODE 0 class CalcLiveObjectsClosure: public HeapRegionClosure { @@ -1726,6 +1725,11 @@ } _total_counting_time += this_final_counting_time; + if (G1PrintRegionLivenessInfo) { + G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); + _g1h->heap_region_iterate(&cl); + } + // Install newly created mark bitMap as "prev". swapMarkBitMaps(); @@ -1828,7 +1832,7 @@ G1CollectedHeap* g1h = G1CollectedHeap::heap(); _cleanup_list.verify_optional(); - FreeRegionList local_free_list("Local Cleanup List"); + FreeRegionList tmp_free_list("Tmp Free List"); if (G1ConcRegionFreeingVerbose) { gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " @@ -1842,7 +1846,7 @@ HeapRegion* hr = _cleanup_list.remove_head(); assert(hr != NULL, "the list was not empty"); hr->rem_set()->clear(); - local_free_list.add_as_tail(hr); + tmp_free_list.add_as_tail(hr); // Instead of adding one region at a time to the secondary_free_list, // we accumulate them in the local list and move them a few at a @@ -1850,20 +1854,20 @@ // we do during this process. We'll also append the local list when // _cleanup_list is empty (which means we just removed the last // region from the _cleanup_list). - if ((local_free_list.length() % G1SecondaryFreeListAppendLength == 0) || + if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || _cleanup_list.is_empty()) { if (G1ConcRegionFreeingVerbose) { gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " "appending "SIZE_FORMAT" entries to the " "secondary_free_list, clean list still has " SIZE_FORMAT" entries", - local_free_list.length(), + tmp_free_list.length(), _cleanup_list.length()); } { MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); - g1h->secondary_free_list_add_as_tail(&local_free_list); + g1h->secondary_free_list_add_as_tail(&tmp_free_list); SecondaryFreeList_lock->notify_all(); } @@ -1874,7 +1878,7 @@ } } } - assert(local_free_list.is_empty(), "post-condition"); + assert(tmp_free_list.is_empty(), "post-condition"); } // Support closures for reference procssing in G1 @@ -2141,21 +2145,22 @@ G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap()); G1CMDrainMarkingStackClosure g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive); - // We use the work gang from the G1CollectedHeap and we utilize all // the worker threads. - int active_workers = MAX2(MIN2(g1h->workers()->total_workers(), (int)_max_task_num), 1); + int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1; + active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1); G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(), g1h->workers(), active_workers); + if (rp->processing_is_mt()) { // Set the degree of MT here. If the discovery is done MT, there // may have been a different number of threads doing the discovery // and a different number of discovered lists may have Ref objects. // That is OK as long as the Reference lists are balanced (see // balance_all_queues() and balance_queues()). - rp->set_mt_degree(active_workers); + rp->set_active_mt_degree(active_workers); rp->process_discovered_references(&g1_is_alive, &g1_keep_alive, @@ -3182,7 +3187,7 @@ template <class T> void do_oop_work(T* p) { assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant"); - assert(!_g1h->is_on_free_list( + assert(!_g1h->is_on_master_free_list( _g1h->heap_region_containing((HeapWord*) p)), "invariant"); oop obj = oopDesc::load_decode_heap_oop(p); @@ -3198,8 +3203,12 @@ CMTask* task) : _g1h(g1h), _cm(cm), _task(task) { - _ref_processor = g1h->ref_processor(); - assert(_ref_processor != NULL, "should not be NULL"); + assert(_ref_processor == NULL, "should be initialized to NULL"); + + if (G1UseConcMarkReferenceProcessing) { + _ref_processor = g1h->ref_processor(); + assert(_ref_processor != NULL, "should not be NULL"); + } } }; @@ -3403,7 +3412,7 @@ void CMTask::push(oop obj) { HeapWord* objAddr = (HeapWord*) obj; assert(_g1h->is_in_g1_reserved(objAddr), "invariant"); - assert(!_g1h->is_on_free_list( + assert(!_g1h->is_on_master_free_list( _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant"); assert(!_g1h->is_obj_ill(obj), "invariant"); assert(_nextMarkBitMap->isMarked(objAddr), "invariant"); @@ -3649,7 +3658,7 @@ (void*) obj); assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); - assert(!_g1h->is_on_free_list( + assert(!_g1h->is_on_master_free_list( _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); scan_object(obj); @@ -4422,3 +4431,175 @@ _marking_step_diffs_ms.add(0.5); } + +// These are formatting macros that are used below to ensure +// consistent formatting. The *_H_* versions are used to format the +// header for a particular value and they should be kept consistent +// with the corresponding macro. Also note that most of the macros add +// the necessary white space (as a prefix) which makes them a bit +// easier to compose. + +// All the output lines are prefixed with this string to be able to +// identify them easily in a large log file. +#define G1PPRL_LINE_PREFIX "###" + +#define G1PPRL_ADDR_BASE_FORMAT " "PTR_FORMAT"-"PTR_FORMAT +#ifdef _LP64 +#define G1PPRL_ADDR_BASE_H_FORMAT " %37s" +#else // _LP64 +#define G1PPRL_ADDR_BASE_H_FORMAT " %21s" +#endif // _LP64 + +// For per-region info +#define G1PPRL_TYPE_FORMAT " %-4s" +#define G1PPRL_TYPE_H_FORMAT " %4s" +#define G1PPRL_BYTE_FORMAT " "SIZE_FORMAT_W(9) +#define G1PPRL_BYTE_H_FORMAT " %9s" +#define G1PPRL_DOUBLE_FORMAT " %14.1f" +#define G1PPRL_DOUBLE_H_FORMAT " %14s" + +// For summary info +#define G1PPRL_SUM_ADDR_FORMAT(tag) " "tag":"G1PPRL_ADDR_BASE_FORMAT +#define G1PPRL_SUM_BYTE_FORMAT(tag) " "tag": "SIZE_FORMAT +#define G1PPRL_SUM_MB_FORMAT(tag) " "tag": %1.2f MB" +#define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag)" / %1.2f %%" + +G1PrintRegionLivenessInfoClosure:: +G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) + : _out(out), + _total_used_bytes(0), _total_capacity_bytes(0), + _total_prev_live_bytes(0), _total_next_live_bytes(0), + _hum_used_bytes(0), _hum_capacity_bytes(0), + _hum_prev_live_bytes(0), _hum_next_live_bytes(0) { + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + MemRegion g1_committed = g1h->g1_committed(); + MemRegion g1_reserved = g1h->g1_reserved(); + double now = os::elapsedTime(); + + // Print the header of the output. + _out->cr(); + _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); + _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" + G1PPRL_SUM_ADDR_FORMAT("committed") + G1PPRL_SUM_ADDR_FORMAT("reserved") + G1PPRL_SUM_BYTE_FORMAT("region-size"), + g1_committed.start(), g1_committed.end(), + g1_reserved.start(), g1_reserved.end(), + HeapRegion::GrainBytes); + _out->print_cr(G1PPRL_LINE_PREFIX); + _out->print_cr(G1PPRL_LINE_PREFIX + G1PPRL_TYPE_H_FORMAT + G1PPRL_ADDR_BASE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_DOUBLE_H_FORMAT, + "type", "address-range", + "used", "prev-live", "next-live", "gc-eff"); +} + +// It takes as a parameter a reference to one of the _hum_* fields, it +// deduces the corresponding value for a region in a humongous region +// series (either the region size, or what's left if the _hum_* field +// is < the region size), and updates the _hum_* field accordingly. +size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) { + size_t bytes = 0; + // The > 0 check is to deal with the prev and next live bytes which + // could be 0. + if (*hum_bytes > 0) { + bytes = MIN2((size_t) HeapRegion::GrainBytes, *hum_bytes); + *hum_bytes -= bytes; + } + return bytes; +} + +// It deduces the values for a region in a humongous region series +// from the _hum_* fields and updates those accordingly. It assumes +// that that _hum_* fields have already been set up from the "starts +// humongous" region and we visit the regions in address order. +void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes, + size_t* capacity_bytes, + size_t* prev_live_bytes, + size_t* next_live_bytes) { + assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition"); + *used_bytes = get_hum_bytes(&_hum_used_bytes); + *capacity_bytes = get_hum_bytes(&_hum_capacity_bytes); + *prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes); + *next_live_bytes = get_hum_bytes(&_hum_next_live_bytes); +} + +bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { + const char* type = ""; + HeapWord* bottom = r->bottom(); + HeapWord* end = r->end(); + size_t capacity_bytes = r->capacity(); + size_t used_bytes = r->used(); + size_t prev_live_bytes = r->live_bytes(); + size_t next_live_bytes = r->next_live_bytes(); + double gc_eff = r->gc_efficiency(); + if (r->used() == 0) { + type = "FREE"; + } else if (r->is_survivor()) { + type = "SURV"; + } else if (r->is_young()) { + type = "EDEN"; + } else if (r->startsHumongous()) { + type = "HUMS"; + + assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 && + _hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0, + "they should have been zeroed after the last time we used them"); + // Set up the _hum_* fields. + _hum_capacity_bytes = capacity_bytes; + _hum_used_bytes = used_bytes; + _hum_prev_live_bytes = prev_live_bytes; + _hum_next_live_bytes = next_live_bytes; + get_hum_bytes(&used_bytes, &capacity_bytes, + &prev_live_bytes, &next_live_bytes); + end = bottom + HeapRegion::GrainWords; + } else if (r->continuesHumongous()) { + type = "HUMC"; + get_hum_bytes(&used_bytes, &capacity_bytes, + &prev_live_bytes, &next_live_bytes); + assert(end == bottom + HeapRegion::GrainWords, "invariant"); + } else { + type = "OLD"; + } + + _total_used_bytes += used_bytes; + _total_capacity_bytes += capacity_bytes; + _total_prev_live_bytes += prev_live_bytes; + _total_next_live_bytes += next_live_bytes; + + // Print a line for this particular region. + _out->print_cr(G1PPRL_LINE_PREFIX + G1PPRL_TYPE_FORMAT + G1PPRL_ADDR_BASE_FORMAT + G1PPRL_BYTE_FORMAT + G1PPRL_BYTE_FORMAT + G1PPRL_BYTE_FORMAT + G1PPRL_DOUBLE_FORMAT, + type, bottom, end, + used_bytes, prev_live_bytes, next_live_bytes, gc_eff); + + return false; +} + +G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() { + // Print the footer of the output. + _out->print_cr(G1PPRL_LINE_PREFIX); + _out->print_cr(G1PPRL_LINE_PREFIX + " SUMMARY" + G1PPRL_SUM_MB_FORMAT("capacity") + G1PPRL_SUM_MB_PERC_FORMAT("used") + G1PPRL_SUM_MB_PERC_FORMAT("prev-live") + G1PPRL_SUM_MB_PERC_FORMAT("next-live"), + bytes_to_mb(_total_capacity_bytes), + bytes_to_mb(_total_used_bytes), + perc(_total_used_bytes, _total_capacity_bytes), + bytes_to_mb(_total_prev_live_bytes), + perc(_total_prev_live_bytes, _total_capacity_bytes), + bytes_to_mb(_total_next_live_bytes), + perc(_total_next_live_bytes, _total_capacity_bytes)); + _out->cr(); +}
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1149,4 +1149,54 @@ #endif // _MARKING_STATS_ }; +// Class that's used to to print out per-region liveness +// information. It's currently used at the end of marking and also +// after we sort the old regions at the end of the cleanup operation. +class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure { +private: + outputStream* _out; + + // Accumulators for these values. + size_t _total_used_bytes; + size_t _total_capacity_bytes; + size_t _total_prev_live_bytes; + size_t _total_next_live_bytes; + + // These are set up when we come across a "stars humongous" region + // (as this is where most of this information is stored, not in the + // subsequent "continues humongous" regions). After that, for every + // region in a given humongous region series we deduce the right + // values for it by simply subtracting the appropriate amount from + // these fields. All these values should reach 0 after we've visited + // the last region in the series. + size_t _hum_used_bytes; + size_t _hum_capacity_bytes; + size_t _hum_prev_live_bytes; + size_t _hum_next_live_bytes; + + static double perc(size_t val, size_t total) { + if (total == 0) { + return 0.0; + } else { + return 100.0 * ((double) val / (double) total); + } + } + + static double bytes_to_mb(size_t val) { + return (double) val / (double) M; + } + + // See the .cpp file. + size_t get_hum_bytes(size_t* hum_bytes); + void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes, + size_t* prev_live_bytes, size_t* next_live_bytes); + +public: + // The header and footer are printed in the constructor and + // destructor respectively. + G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name); + virtual bool doHeapRegion(HeapRegion* r); + ~G1PrintRegionLivenessInfoClosure(); +}; + #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -237,9 +237,9 @@ // The following will finish freeing up any regions that we // found to be empty during cleanup. We'll do this part // without joining the suspendible set. If an evacuation pause - // takes places, then we would carry on freeing regions in + // takes place, then we would carry on freeing regions in // case they are needed by the pause. If a Full GC takes - // places, it would wait for us to process the regions + // place, it would wait for us to process the regions // reclaimed by cleanup. double cleanup_start_sec = os::elapsedTime();
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1AllocRegion.inline.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" + +G1CollectedHeap* G1AllocRegion::_g1h = NULL; +HeapRegion* G1AllocRegion::_dummy_region = NULL; + +void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) { + assert(_dummy_region == NULL, "should be set once"); + assert(dummy_region != NULL, "pre-condition"); + assert(dummy_region->free() == 0, "pre-condition"); + + // Make sure that any allocation attempt on this region will fail + // and will not trigger any asserts. + assert(allocate(dummy_region, 1, false) == NULL, "should fail"); + assert(par_allocate(dummy_region, 1, false) == NULL, "should fail"); + assert(allocate(dummy_region, 1, true) == NULL, "should fail"); + assert(par_allocate(dummy_region, 1, true) == NULL, "should fail"); + + _g1h = g1h; + _dummy_region = dummy_region; +} + +void G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region, + bool bot_updates) { + assert(alloc_region != NULL && alloc_region != _dummy_region, + "pre-condition"); + + // Other threads might still be trying to allocate using a CAS out + // of the region we are trying to retire, as they can do so without + // holding the lock. So, we first have to make sure that noone else + // can allocate out of it by doing a maximal allocation. Even if our + // CAS attempt fails a few times, we'll succeed sooner or later + // given that failed CAS attempts mean that the region is getting + // closed to being full. + size_t free_word_size = alloc_region->free() / HeapWordSize; + + // This is the minimum free chunk we can turn into a dummy + // object. If the free space falls below this, then noone can + // allocate in this region anyway (all allocation requests will be + // of a size larger than this) so we won't have to perform the dummy + // allocation. + size_t min_word_size_to_fill = CollectedHeap::min_fill_size(); + + while (free_word_size >= min_word_size_to_fill) { + HeapWord* dummy = par_allocate(alloc_region, free_word_size, bot_updates); + if (dummy != NULL) { + // If the allocation was successful we should fill in the space. + CollectedHeap::fill_with_object(dummy, free_word_size); + alloc_region->set_pre_dummy_top(dummy); + break; + } + + free_word_size = alloc_region->free() / HeapWordSize; + // It's also possible that someone else beats us to the + // allocation and they fill up the region. In that case, we can + // just get out of the loop. + } + assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill, + "post-condition"); +} + +void G1AllocRegion::retire(bool fill_up) { + assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly")); + + trace("retiring"); + HeapRegion* alloc_region = _alloc_region; + if (alloc_region != _dummy_region) { + // We never have to check whether the active region is empty or not, + // and potentially free it if it is, given that it's guaranteed that + // it will never be empty. + assert(!alloc_region->is_empty(), + ar_ext_msg(this, "the alloc region should never be empty")); + + if (fill_up) { + fill_up_remaining_space(alloc_region, _bot_updates); + } + + assert(alloc_region->used() >= _used_bytes_before, + ar_ext_msg(this, "invariant")); + size_t allocated_bytes = alloc_region->used() - _used_bytes_before; + retire_region(alloc_region, allocated_bytes); + _used_bytes_before = 0; + _alloc_region = _dummy_region; + } + trace("retired"); +} + +HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size, + bool force) { + assert(_alloc_region == _dummy_region, ar_ext_msg(this, "pre-condition")); + assert(_used_bytes_before == 0, ar_ext_msg(this, "pre-condition")); + + trace("attempting region allocation"); + HeapRegion* new_alloc_region = allocate_new_region(word_size, force); + if (new_alloc_region != NULL) { + new_alloc_region->reset_pre_dummy_top(); + // Need to do this before the allocation + _used_bytes_before = new_alloc_region->used(); + HeapWord* result = allocate(new_alloc_region, word_size, _bot_updates); + assert(result != NULL, ar_ext_msg(this, "the allocation should succeeded")); + + OrderAccess::storestore(); + // Note that we first perform the allocation and then we store the + // region in _alloc_region. This is the reason why an active region + // can never be empty. + _alloc_region = new_alloc_region; + trace("region allocation successful"); + return result; + } else { + trace("region allocation failed"); + return NULL; + } + ShouldNotReachHere(); +} + +void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) { + msg->append("[%s] %s b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT, + _name, message, BOOL_TO_STR(_bot_updates), + _alloc_region, _used_bytes_before); +} + +void G1AllocRegion::init() { + trace("initializing"); + assert(_alloc_region == NULL && _used_bytes_before == 0, + ar_ext_msg(this, "pre-condition")); + assert(_dummy_region != NULL, "should have been set"); + _alloc_region = _dummy_region; + trace("initialized"); +} + +HeapRegion* G1AllocRegion::release() { + trace("releasing"); + HeapRegion* alloc_region = _alloc_region; + retire(false /* fill_up */); + assert(_alloc_region == _dummy_region, "post-condition of retire()"); + _alloc_region = NULL; + trace("released"); + return (alloc_region == _dummy_region) ? NULL : alloc_region; +} + +#if G1_ALLOC_REGION_TRACING +void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) { + // All the calls to trace that set either just the size or the size + // and the result are considered part of level 2 tracing and are + // skipped during level 1 tracing. + if ((word_size == 0 && result == NULL) || (G1_ALLOC_REGION_TRACING > 1)) { + const size_t buffer_length = 128; + char hr_buffer[buffer_length]; + char rest_buffer[buffer_length]; + + HeapRegion* alloc_region = _alloc_region; + if (alloc_region == NULL) { + jio_snprintf(hr_buffer, buffer_length, "NULL"); + } else if (alloc_region == _dummy_region) { + jio_snprintf(hr_buffer, buffer_length, "DUMMY"); + } else { + jio_snprintf(hr_buffer, buffer_length, + HR_FORMAT, HR_FORMAT_PARAMS(alloc_region)); + } + + if (G1_ALLOC_REGION_TRACING > 1) { + if (result != NULL) { + jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT" "PTR_FORMAT, + word_size, result); + } else if (word_size != 0) { + jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT, word_size); + } else { + jio_snprintf(rest_buffer, buffer_length, ""); + } + } else { + jio_snprintf(rest_buffer, buffer_length, ""); + } + + tty->print_cr("[%s] %s : %s %s", _name, hr_buffer, str, rest_buffer); + } +} +#endif // G1_ALLOC_REGION_TRACING + +G1AllocRegion::G1AllocRegion(const char* name, + bool bot_updates) + : _name(name), _bot_updates(bot_updates), + _alloc_region(NULL), _used_bytes_before(0) { } +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP + +#include "gc_implementation/g1/heapRegion.hpp" + +class G1CollectedHeap; + +// 0 -> no tracing, 1 -> basic tracing, 2 -> basic + allocation tracing +#define G1_ALLOC_REGION_TRACING 0 + +class ar_ext_msg; + +// A class that holds a region that is active in satisfying allocation +// requests, potentially issued in parallel. When the active region is +// full it will be retired it replaced with a new one. The +// implementation assumes that fast-path allocations will be lock-free +// and a lock will need to be taken when the active region needs to be +// replaced. + +class G1AllocRegion VALUE_OBJ_CLASS_SPEC { + friend class ar_ext_msg; + +private: + // The active allocating region we are currently allocating out + // of. The invariant is that if this object is initialized (i.e., + // init() has been called and release() has not) then _alloc_region + // is either an active allocating region or the dummy region (i.e., + // it can never be NULL) and this object can be used to satisfy + // allocation requests. If this object is not initialized + // (i.e. init() has not been called or release() has been called) + // then _alloc_region is NULL and this object should not be used to + // satisfy allocation requests (it was done this way to force the + // correct use of init() and release()). + HeapRegion* _alloc_region; + + // When we set up a new active region we save its used bytes in this + // field so that, when we retire it, we can calculate how much space + // we allocated in it. + size_t _used_bytes_before; + + // Specifies whether the allocate calls will do BOT updates or not. + bool _bot_updates; + + // Useful for debugging and tracing. + const char* _name; + + // A dummy region (i.e., it's been allocated specially for this + // purpose and it is not part of the heap) that is full (i.e., top() + // == end()). When we don't have a valid active region we make + // _alloc_region point to this. This allows us to skip checking + // whether the _alloc_region is NULL or not. + static HeapRegion* _dummy_region; + + // Some of the methods below take a bot_updates parameter. Its value + // should be the same as the _bot_updates field. The idea is that + // the parameter will be a constant for a particular alloc region + // and, given that these methods will be hopefully inlined, the + // compiler should compile out the test. + + // Perform a non-MT-safe allocation out of the given region. + static inline HeapWord* allocate(HeapRegion* alloc_region, + size_t word_size, + bool bot_updates); + + // Perform a MT-safe allocation out of the given region. + static inline HeapWord* par_allocate(HeapRegion* alloc_region, + size_t word_size, + bool bot_updates); + + // Ensure that the region passed as a parameter has been filled up + // so that noone else can allocate out of it any more. + static void fill_up_remaining_space(HeapRegion* alloc_region, + bool bot_updates); + + // Retire the active allocating region. If fill_up is true then make + // sure that the region is full before we retire it so that noone + // else can allocate out of it. + void retire(bool fill_up); + + // Allocate a new active region and use it to perform a word_size + // allocation. The force parameter will be passed on to + // G1CollectedHeap::allocate_new_alloc_region() and tells it to try + // to allocate a new region even if the max has been reached. + HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force); + + void fill_in_ext_msg(ar_ext_msg* msg, const char* message); + +protected: + // For convenience as subclasses use it. + static G1CollectedHeap* _g1h; + + virtual HeapRegion* allocate_new_region(size_t word_size, bool force) = 0; + virtual void retire_region(HeapRegion* alloc_region, + size_t allocated_bytes) = 0; + + G1AllocRegion(const char* name, bool bot_updates); + +public: + static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region); + + HeapRegion* get() const { + // Make sure that the dummy region does not escape this class. + return (_alloc_region == _dummy_region) ? NULL : _alloc_region; + } + + // The following two are the building blocks for the allocation method. + + // First-level allocation: Should be called without holding a + // lock. It will try to allocate lock-free out of the active region, + // or return NULL if it was unable to. + inline HeapWord* attempt_allocation(size_t word_size, bool bot_updates); + + // Second-level allocation: Should be called while holding a + // lock. It will try to first allocate lock-free out of the active + // region or, if it's unable to, it will try to replace the active + // alloc region with a new one. We require that the caller takes the + // appropriate lock before calling this so that it is easier to make + // it conform to its locking protocol. + inline HeapWord* attempt_allocation_locked(size_t word_size, + bool bot_updates); + + // Should be called to allocate a new region even if the max of this + // type of regions has been reached. Should only be called if other + // allocation attempts have failed and we are not holding a valid + // active region. + inline HeapWord* attempt_allocation_force(size_t word_size, + bool bot_updates); + + // Should be called before we start using this object. + void init(); + + // Should be called when we want to release the active region which + // is returned after it's been retired. + HeapRegion* release(); + +#if G1_ALLOC_REGION_TRACING + void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL); +#else // G1_ALLOC_REGION_TRACING + void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL) { } +#endif // G1_ALLOC_REGION_TRACING +}; + +class ar_ext_msg : public err_msg { +public: + ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("") { + alloc_region->fill_in_ext_msg(this, message); + } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP + +#include "gc_implementation/g1/g1AllocRegion.hpp" + +inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region, + size_t word_size, + bool bot_updates) { + assert(alloc_region != NULL, err_msg("pre-condition")); + + if (!bot_updates) { + return alloc_region->allocate_no_bot_updates(word_size); + } else { + return alloc_region->allocate(word_size); + } +} + +inline HeapWord* G1AllocRegion::par_allocate(HeapRegion* alloc_region, + size_t word_size, + bool bot_updates) { + assert(alloc_region != NULL, err_msg("pre-condition")); + assert(!alloc_region->is_empty(), err_msg("pre-condition")); + + if (!bot_updates) { + return alloc_region->par_allocate_no_bot_updates(word_size); + } else { + return alloc_region->par_allocate(word_size); + } +} + +inline HeapWord* G1AllocRegion::attempt_allocation(size_t word_size, + bool bot_updates) { + assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition")); + + HeapRegion* alloc_region = _alloc_region; + assert(alloc_region != NULL, ar_ext_msg(this, "not initialized properly")); + + HeapWord* result = par_allocate(alloc_region, word_size, bot_updates); + if (result != NULL) { + trace("alloc", word_size, result); + return result; + } + trace("alloc failed", word_size); + return NULL; +} + +inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size, + bool bot_updates) { + // First we have to tedo the allocation, assuming we're holding the + // appropriate lock, in case another thread changed the region while + // we were waiting to get the lock. + HeapWord* result = attempt_allocation(word_size, bot_updates); + if (result != NULL) { + return result; + } + + retire(true /* fill_up */); + result = new_alloc_region_and_allocate(word_size, false /* force */); + if (result != NULL) { + trace("alloc locked (second attempt)", word_size, result); + return result; + } + trace("alloc locked failed", word_size); + return NULL; +} + +inline HeapWord* G1AllocRegion::attempt_allocation_force(size_t word_size, + bool bot_updates) { + assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition")); + assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly")); + + trace("forcing alloc"); + HeapWord* result = new_alloc_region_and_allocate(word_size, true /* force */); + if (result != NULL) { + trace("alloc forced", word_size, result); + return result; + } + trace("alloc forced failed", word_size); + return NULL; +} + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -28,6 +28,7 @@ #include "gc_implementation/g1/concurrentG1Refine.hpp" #include "gc_implementation/g1/concurrentG1RefineThread.hpp" #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" +#include "gc_implementation/g1/g1AllocRegion.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/g1MarkSweep.hpp" @@ -479,7 +480,7 @@ // Private methods. HeapRegion* -G1CollectedHeap::new_region_try_secondary_free_list(size_t word_size) { +G1CollectedHeap::new_region_try_secondary_free_list() { MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); while (!_secondary_free_list.is_empty() || free_regions_coming()) { if (!_secondary_free_list.is_empty()) { @@ -517,8 +518,7 @@ return NULL; } -HeapRegion* G1CollectedHeap::new_region_work(size_t word_size, - bool do_expand) { +HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) { assert(!isHumongous(word_size) || word_size <= (size_t) HeapRegion::GrainWords, "the only time we use this to allocate a humongous region is " @@ -531,7 +531,7 @@ gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " "forced to look at the secondary_free_list"); } - res = new_region_try_secondary_free_list(word_size); + res = new_region_try_secondary_free_list(); if (res != NULL) { return res; } @@ -543,7 +543,7 @@ gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " "res == NULL, trying the secondary_free_list"); } - res = new_region_try_secondary_free_list(word_size); + res = new_region_try_secondary_free_list(); } if (res == NULL && do_expand) { if (expand(word_size * HeapWordSize)) { @@ -566,7 +566,7 @@ size_t word_size) { HeapRegion* alloc_region = NULL; if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) { - alloc_region = new_region_work(word_size, true /* do_expand */); + alloc_region = new_region(word_size, true /* do_expand */); if (purpose == GCAllocForSurvived && alloc_region != NULL) { alloc_region->set_survivor(); } @@ -579,12 +579,15 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, size_t word_size) { + assert(isHumongous(word_size), "word_size should be humongous"); + assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); + int first = -1; if (num_regions == 1) { // Only one region to allocate, no need to go through the slower // path. The caller will attempt the expasion if this fails, so // let's not try to expand here too. - HeapRegion* hr = new_region_work(word_size, false /* do_expand */); + HeapRegion* hr = new_region(word_size, false /* do_expand */); if (hr != NULL) { first = hr->hrs_index(); } else { @@ -600,7 +603,7 @@ // request. If we are only allocating one region we use the common // region allocation code (see above). wait_while_free_regions_coming(); - append_secondary_free_list_if_not_empty(); + append_secondary_free_list_if_not_empty_with_lock(); if (free_regions() >= num_regions) { first = _hrs->find_contiguous(num_regions); @@ -608,7 +611,7 @@ for (int i = first; i < first + (int) num_regions; ++i) { HeapRegion* hr = _hrs->at(i); assert(hr->is_empty(), "sanity"); - assert(is_on_free_list(hr), "sanity"); + assert(is_on_master_free_list(hr), "sanity"); hr->set_pending_removal(true); } _free_list.remove_all_pending(num_regions); @@ -618,6 +621,126 @@ return first; } +HeapWord* +G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first, + size_t num_regions, + size_t word_size) { + assert(first != -1, "pre-condition"); + assert(isHumongous(word_size), "word_size should be humongous"); + assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); + + // Index of last region in the series + 1. + int last = first + (int) num_regions; + + // We need to initialize the region(s) we just discovered. This is + // a bit tricky given that it can happen concurrently with + // refinement threads refining cards on these regions and + // potentially wanting to refine the BOT as they are scanning + // those cards (this can happen shortly after a cleanup; see CR + // 6991377). So we have to set up the region(s) carefully and in + // a specific order. + + // The word size sum of all the regions we will allocate. + size_t word_size_sum = num_regions * HeapRegion::GrainWords; + assert(word_size <= word_size_sum, "sanity"); + + // This will be the "starts humongous" region. + HeapRegion* first_hr = _hrs->at(first); + // The header of the new object will be placed at the bottom of + // the first region. + HeapWord* new_obj = first_hr->bottom(); + // This will be the new end of the first region in the series that + // should also match the end of the last region in the seriers. + HeapWord* new_end = new_obj + word_size_sum; + // This will be the new top of the first region that will reflect + // this allocation. + HeapWord* new_top = new_obj + word_size; + + // First, we need to zero the header of the space that we will be + // allocating. When we update top further down, some refinement + // threads might try to scan the region. By zeroing the header we + // ensure that any thread that will try to scan the region will + // come across the zero klass word and bail out. + // + // NOTE: It would not have been correct to have used + // CollectedHeap::fill_with_object() and make the space look like + // an int array. The thread that is doing the allocation will + // later update the object header to a potentially different array + // type and, for a very short period of time, the klass and length + // fields will be inconsistent. This could cause a refinement + // thread to calculate the object size incorrectly. + Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); + + // We will set up the first region as "starts humongous". This + // will also update the BOT covering all the regions to reflect + // that there is a single object that starts at the bottom of the + // first region. + first_hr->set_startsHumongous(new_top, new_end); + + // Then, if there are any, we will set up the "continues + // humongous" regions. + HeapRegion* hr = NULL; + for (int i = first + 1; i < last; ++i) { + hr = _hrs->at(i); + hr->set_continuesHumongous(first_hr); + } + // If we have "continues humongous" regions (hr != NULL), then the + // end of the last one should match new_end. + assert(hr == NULL || hr->end() == new_end, "sanity"); + + // Up to this point no concurrent thread would have been able to + // do any scanning on any region in this series. All the top + // fields still point to bottom, so the intersection between + // [bottom,top] and [card_start,card_end] will be empty. Before we + // update the top fields, we'll do a storestore to make sure that + // no thread sees the update to top before the zeroing of the + // object header and the BOT initialization. + OrderAccess::storestore(); + + // Now that the BOT and the object header have been initialized, + // we can update top of the "starts humongous" region. + assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), + "new_top should be in this region"); + first_hr->set_top(new_top); + + // Now, we will update the top fields of the "continues humongous" + // regions. The reason we need to do this is that, otherwise, + // these regions would look empty and this will confuse parts of + // G1. For example, the code that looks for a consecutive number + // of empty regions will consider them empty and try to + // re-allocate them. We can extend is_empty() to also include + // !continuesHumongous(), but it is easier to just update the top + // fields here. The way we set top for all regions (i.e., top == + // end for all regions but the last one, top == new_top for the + // last one) is actually used when we will free up the humongous + // region in free_humongous_region(). + hr = NULL; + for (int i = first + 1; i < last; ++i) { + hr = _hrs->at(i); + if ((i + 1) == last) { + // last continues humongous region + assert(hr->bottom() < new_top && new_top <= hr->end(), + "new_top should fall on this region"); + hr->set_top(new_top); + } else { + // not last one + assert(new_top > hr->end(), "new_top should be above this region"); + hr->set_top(hr->end()); + } + } + // If we have continues humongous regions (hr != NULL), then the + // end of the last one should match new_end and its top should + // match new_top. + assert(hr == NULL || + (hr->end() == new_end && hr->top() == new_top), "sanity"); + + assert(first_hr->used() == word_size * HeapWordSize, "invariant"); + _summary_bytes_used += first_hr->used(); + _humongous_set.add(first_hr); + + return new_obj; +} + // If could fit into free regions w/o expansion, try. // Otherwise, if can expand, do so. // Otherwise, if using ex regions might help, try with ex given back. @@ -653,524 +776,24 @@ } } + HeapWord* result = NULL; if (first != -1) { - // Index of last region in the series + 1. - int last = first + (int) num_regions; - - // We need to initialize the region(s) we just discovered. This is - // a bit tricky given that it can happen concurrently with - // refinement threads refining cards on these regions and - // potentially wanting to refine the BOT as they are scanning - // those cards (this can happen shortly after a cleanup; see CR - // 6991377). So we have to set up the region(s) carefully and in - // a specific order. - - // The word size sum of all the regions we will allocate. - size_t word_size_sum = num_regions * HeapRegion::GrainWords; - assert(word_size <= word_size_sum, "sanity"); - - // This will be the "starts humongous" region. - HeapRegion* first_hr = _hrs->at(first); - // The header of the new object will be placed at the bottom of - // the first region. - HeapWord* new_obj = first_hr->bottom(); - // This will be the new end of the first region in the series that - // should also match the end of the last region in the seriers. - HeapWord* new_end = new_obj + word_size_sum; - // This will be the new top of the first region that will reflect - // this allocation. - HeapWord* new_top = new_obj + word_size; - - // First, we need to zero the header of the space that we will be - // allocating. When we update top further down, some refinement - // threads might try to scan the region. By zeroing the header we - // ensure that any thread that will try to scan the region will - // come across the zero klass word and bail out. - // - // NOTE: It would not have been correct to have used - // CollectedHeap::fill_with_object() and make the space look like - // an int array. The thread that is doing the allocation will - // later update the object header to a potentially different array - // type and, for a very short period of time, the klass and length - // fields will be inconsistent. This could cause a refinement - // thread to calculate the object size incorrectly. - Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); - - // We will set up the first region as "starts humongous". This - // will also update the BOT covering all the regions to reflect - // that there is a single object that starts at the bottom of the - // first region. - first_hr->set_startsHumongous(new_top, new_end); - - // Then, if there are any, we will set up the "continues - // humongous" regions. - HeapRegion* hr = NULL; - for (int i = first + 1; i < last; ++i) { - hr = _hrs->at(i); - hr->set_continuesHumongous(first_hr); - } - // If we have "continues humongous" regions (hr != NULL), then the - // end of the last one should match new_end. - assert(hr == NULL || hr->end() == new_end, "sanity"); - - // Up to this point no concurrent thread would have been able to - // do any scanning on any region in this series. All the top - // fields still point to bottom, so the intersection between - // [bottom,top] and [card_start,card_end] will be empty. Before we - // update the top fields, we'll do a storestore to make sure that - // no thread sees the update to top before the zeroing of the - // object header and the BOT initialization. - OrderAccess::storestore(); - - // Now that the BOT and the object header have been initialized, - // we can update top of the "starts humongous" region. - assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), - "new_top should be in this region"); - first_hr->set_top(new_top); - - // Now, we will update the top fields of the "continues humongous" - // regions. The reason we need to do this is that, otherwise, - // these regions would look empty and this will confuse parts of - // G1. For example, the code that looks for a consecutive number - // of empty regions will consider them empty and try to - // re-allocate them. We can extend is_empty() to also include - // !continuesHumongous(), but it is easier to just update the top - // fields here. The way we set top for all regions (i.e., top == - // end for all regions but the last one, top == new_top for the - // last one) is actually used when we will free up the humongous - // region in free_humongous_region(). - hr = NULL; - for (int i = first + 1; i < last; ++i) { - hr = _hrs->at(i); - if ((i + 1) == last) { - // last continues humongous region - assert(hr->bottom() < new_top && new_top <= hr->end(), - "new_top should fall on this region"); - hr->set_top(new_top); - } else { - // not last one - assert(new_top > hr->end(), "new_top should be above this region"); - hr->set_top(hr->end()); - } - } - // If we have continues humongous regions (hr != NULL), then the - // end of the last one should match new_end and its top should - // match new_top. - assert(hr == NULL || - (hr->end() == new_end && hr->top() == new_top), "sanity"); - - assert(first_hr->used() == word_size * HeapWordSize, "invariant"); - _summary_bytes_used += first_hr->used(); - _humongous_set.add(first_hr); - - return new_obj; + result = + humongous_obj_allocate_initialize_regions(first, num_regions, word_size); + assert(result != NULL, "it should always return a valid result"); } verify_region_sets_optional(); - return NULL; -} - -void -G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) { - // Other threads might still be trying to allocate using CASes out - // of the region we are retiring, as they can do so without holding - // the Heap_lock. So we first have to make sure that noone else can - // allocate in it by doing a maximal allocation. Even if our CAS - // attempt fails a few times, we'll succeed sooner or later given - // that a failed CAS attempt mean that the region is getting closed - // to being full (someone else succeeded in allocating into it). - size_t free_word_size = cur_alloc_region->free() / HeapWordSize; - - // This is the minimum free chunk we can turn into a dummy - // object. If the free space falls below this, then noone can - // allocate in this region anyway (all allocation requests will be - // of a size larger than this) so we won't have to perform the dummy - // allocation. - size_t min_word_size_to_fill = CollectedHeap::min_fill_size(); - - while (free_word_size >= min_word_size_to_fill) { - HeapWord* dummy = - cur_alloc_region->par_allocate_no_bot_updates(free_word_size); - if (dummy != NULL) { - // If the allocation was successful we should fill in the space. - CollectedHeap::fill_with_object(dummy, free_word_size); - break; - } - - free_word_size = cur_alloc_region->free() / HeapWordSize; - // It's also possible that someone else beats us to the - // allocation and they fill up the region. In that case, we can - // just get out of the loop - } - assert(cur_alloc_region->free() / HeapWordSize < min_word_size_to_fill, - "sanity"); - - retire_cur_alloc_region_common(cur_alloc_region); - assert(_cur_alloc_region == NULL, "post-condition"); -} - -// See the comment in the .hpp file about the locking protocol and -// assumptions of this method (and other related ones). -HeapWord* -G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size, - bool at_safepoint, - bool do_dirtying, - bool can_expand) { - assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); - assert(_cur_alloc_region == NULL, - "replace_cur_alloc_region_and_allocate() should only be called " - "after retiring the previous current alloc region"); - assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, - "at_safepoint and is_at_safepoint() should be a tautology"); - assert(!can_expand || g1_policy()->can_expand_young_list(), - "we should not call this method with can_expand == true if " - "we are not allowed to expand the young gen"); - - if (can_expand || !g1_policy()->is_young_list_full()) { - HeapRegion* new_cur_alloc_region = new_alloc_region(word_size); - if (new_cur_alloc_region != NULL) { - assert(new_cur_alloc_region->is_empty(), - "the newly-allocated region should be empty, " - "as right now we only allocate new regions out of the free list"); - g1_policy()->update_region_num(true /* next_is_young */); - set_region_short_lived_locked(new_cur_alloc_region); - - assert(!new_cur_alloc_region->isHumongous(), - "Catch a regression of this bug."); - - // We need to ensure that the stores to _cur_alloc_region and, - // subsequently, to top do not float above the setting of the - // young type. - OrderAccess::storestore(); - - // Now, perform the allocation out of the region we just - // allocated. Note that noone else can access that region at - // this point (as _cur_alloc_region has not been updated yet), - // so we can just go ahead and do the allocation without any - // atomics (and we expect this allocation attempt to - // suceeded). Given that other threads can attempt an allocation - // with a CAS and without needing the Heap_lock, if we assigned - // the new region to _cur_alloc_region before first allocating - // into it other threads might have filled up the new region - // before we got a chance to do the allocation ourselves. In - // that case, we would have needed to retire the region, grab a - // new one, and go through all this again. Allocating out of the - // new region before assigning it to _cur_alloc_region avoids - // all this. - HeapWord* result = - new_cur_alloc_region->allocate_no_bot_updates(word_size); - assert(result != NULL, "we just allocate out of an empty region " - "so allocation should have been successful"); - assert(is_in(result), "result should be in the heap"); - - // Now make sure that the store to _cur_alloc_region does not - // float above the store to top. - OrderAccess::storestore(); - _cur_alloc_region = new_cur_alloc_region; - - if (!at_safepoint) { - Heap_lock->unlock(); - } - - // do the dirtying, if necessary, after we release the Heap_lock - if (do_dirtying) { - dirty_young_block(result, word_size); - } - return result; - } - } - - assert(_cur_alloc_region == NULL, "we failed to allocate a new current " - "alloc region, it should still be NULL"); - assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); - return NULL; -} - -// See the comment in the .hpp file about the locking protocol and -// assumptions of this method (and other related ones). -HeapWord* -G1CollectedHeap::attempt_allocation_slow(size_t word_size) { - assert_heap_locked_and_not_at_safepoint(); - assert(!isHumongous(word_size), "attempt_allocation_slow() should not be " - "used for humongous allocations"); - - // We should only reach here when we were unable to allocate - // otherwise. So, we should have not active current alloc region. - assert(_cur_alloc_region == NULL, "current alloc region should be NULL"); - - // We will loop while succeeded is false, which means that we tried - // to do a collection, but the VM op did not succeed. So, when we - // exit the loop, either one of the allocation attempts was - // successful, or we succeeded in doing the VM op but which was - // unable to allocate after the collection. - for (int try_count = 1; /* we'll return or break */; try_count += 1) { - bool succeeded = true; - - // Every time we go round the loop we should be holding the Heap_lock. - assert_heap_locked(); - - if (GC_locker::is_active_and_needs_gc()) { - // We are locked out of GC because of the GC locker. We can - // allocate a new region only if we can expand the young gen. - - if (g1_policy()->can_expand_young_list()) { - // Yes, we are allowed to expand the young gen. Let's try to - // allocate a new current alloc region. - HeapWord* result = - replace_cur_alloc_region_and_allocate(word_size, - false, /* at_safepoint */ - true, /* do_dirtying */ - true /* can_expand */); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - } - // We could not expand the young gen further (or we could but we - // failed to allocate a new region). We'll stall until the GC - // locker forces a GC. - - // If this thread is not in a jni critical section, we stall - // the requestor until the critical section has cleared and - // GC allowed. When the critical section clears, a GC is - // initiated by the last thread exiting the critical section; so - // we retry the allocation sequence from the beginning of the loop, - // rather than causing more, now probably unnecessary, GC attempts. - JavaThread* jthr = JavaThread::current(); - assert(jthr != NULL, "sanity"); - if (jthr->in_critical()) { - if (CheckJNICalls) { - fatal("Possible deadlock due to allocating while" - " in jni critical section"); - } - // We are returning NULL so the protocol is that we're still - // holding the Heap_lock. - assert_heap_locked(); - return NULL; - } - - Heap_lock->unlock(); - GC_locker::stall_until_clear(); - - // No need to relock the Heap_lock. We'll fall off to the code - // below the else-statement which assumes that we are not - // holding the Heap_lock. - } else { - // We are not locked out. So, let's try to do a GC. The VM op - // will retry the allocation before it completes. - - // Read the GC count while holding the Heap_lock - unsigned int gc_count_before = SharedHeap::heap()->total_collections(); - - Heap_lock->unlock(); - - HeapWord* result = - do_collection_pause(word_size, gc_count_before, &succeeded); - assert_heap_not_locked(); - if (result != NULL) { - assert(succeeded, "the VM op should have succeeded"); - - // Allocations that take place on VM operations do not do any - // card dirtying and we have to do it here. - dirty_young_block(result, word_size); - return result; - } - } - - // Both paths that get us here from above unlock the Heap_lock. - assert_heap_not_locked(); - - // We can reach here when we were unsuccessful in doing a GC, - // because another thread beat us to it, or because we were locked - // out of GC due to the GC locker. In either case a new alloc - // region might be available so we will retry the allocation. - HeapWord* result = attempt_allocation(word_size); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - - // So far our attempts to allocate failed. The only time we'll go - // around the loop and try again is if we tried to do a GC and the - // VM op that we tried to schedule was not successful because - // another thread beat us to it. If that happened it's possible - // that by the time we grabbed the Heap_lock again and tried to - // allocate other threads filled up the young generation, which - // means that the allocation attempt after the GC also failed. So, - // it's worth trying to schedule another GC pause. - if (succeeded) { - break; - } - - // Give a warning if we seem to be looping forever. - if ((QueuedAllocationWarningCount > 0) && - (try_count % QueuedAllocationWarningCount == 0)) { - warning("G1CollectedHeap::attempt_allocation_slow() " - "retries %d times", try_count); - } - } - - assert_heap_locked(); - return NULL; -} - -// See the comment in the .hpp file about the locking protocol and -// assumptions of this method (and other related ones). -HeapWord* -G1CollectedHeap::attempt_allocation_humongous(size_t word_size, - bool at_safepoint) { - // This is the method that will allocate a humongous object. All - // allocation paths that attempt to allocate a humongous object - // should eventually reach here. Currently, the only paths are from - // mem_allocate() and attempt_allocation_at_safepoint(). - assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); - assert(isHumongous(word_size), "attempt_allocation_humongous() " - "should only be used for humongous allocations"); - assert(SafepointSynchronize::is_at_safepoint() == at_safepoint, - "at_safepoint and is_at_safepoint() should be a tautology"); - - HeapWord* result = NULL; - - // We will loop while succeeded is false, which means that we tried - // to do a collection, but the VM op did not succeed. So, when we - // exit the loop, either one of the allocation attempts was - // successful, or we succeeded in doing the VM op but which was - // unable to allocate after the collection. - for (int try_count = 1; /* we'll return or break */; try_count += 1) { - bool succeeded = true; - - // Given that humongous objects are not allocated in young - // regions, we'll first try to do the allocation without doing a - // collection hoping that there's enough space in the heap. - result = humongous_obj_allocate(word_size); - assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(), - "catch a regression of this bug."); - if (result != NULL) { - if (!at_safepoint) { - // If we're not at a safepoint, unlock the Heap_lock. - Heap_lock->unlock(); - } - return result; - } - - // If we failed to allocate the humongous object, we should try to - // do a collection pause (if we're allowed) in case it reclaims - // enough space for the allocation to succeed after the pause. - if (!at_safepoint) { - // Read the GC count while holding the Heap_lock - unsigned int gc_count_before = SharedHeap::heap()->total_collections(); - - // If we're allowed to do a collection we're not at a - // safepoint, so it is safe to unlock the Heap_lock. - Heap_lock->unlock(); - - result = do_collection_pause(word_size, gc_count_before, &succeeded); - assert_heap_not_locked(); - if (result != NULL) { - assert(succeeded, "the VM op should have succeeded"); - return result; - } - - // If we get here, the VM operation either did not succeed - // (i.e., another thread beat us to it) or it succeeded but - // failed to allocate the object. - - // If we're allowed to do a collection we're not at a - // safepoint, so it is safe to lock the Heap_lock. - Heap_lock->lock(); - } - - assert(result == NULL, "otherwise we should have exited the loop earlier"); - - // So far our attempts to allocate failed. The only time we'll go - // around the loop and try again is if we tried to do a GC and the - // VM op that we tried to schedule was not successful because - // another thread beat us to it. That way it's possible that some - // space was freed up by the thread that successfully scheduled a - // GC. So it's worth trying to allocate again. - if (succeeded) { - break; - } - - // Give a warning if we seem to be looping forever. - if ((QueuedAllocationWarningCount > 0) && - (try_count % QueuedAllocationWarningCount == 0)) { - warning("G1CollectedHeap::attempt_allocation_humongous " - "retries %d times", try_count); - } - } - - assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); - return NULL; -} - -HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, - bool expect_null_cur_alloc_region) { - assert_at_safepoint(true /* should_be_vm_thread */); - assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region, - err_msg("the current alloc region was unexpectedly found " - "to be non-NULL, cur alloc region: "PTR_FORMAT" " - "expect_null_cur_alloc_region: %d word_size: "SIZE_FORMAT, - _cur_alloc_region, expect_null_cur_alloc_region, word_size)); - - if (!isHumongous(word_size)) { - if (!expect_null_cur_alloc_region) { - HeapRegion* cur_alloc_region = _cur_alloc_region; - if (cur_alloc_region != NULL) { - // We are at a safepoint so no reason to use the MT-safe version. - HeapWord* result = cur_alloc_region->allocate_no_bot_updates(word_size); - if (result != NULL) { - assert(is_in(result), "result should be in the heap"); - - // We will not do any dirtying here. This is guaranteed to be - // called during a safepoint and the thread that scheduled the - // pause will do the dirtying if we return a non-NULL result. - return result; - } - - retire_cur_alloc_region_common(cur_alloc_region); - } - } - - assert(_cur_alloc_region == NULL, - "at this point we should have no cur alloc region"); - return replace_cur_alloc_region_and_allocate(word_size, - true, /* at_safepoint */ - false /* do_dirtying */, - false /* can_expand */); - } else { - return attempt_allocation_humongous(word_size, - true /* at_safepoint */); - } - - ShouldNotReachHere(); + + return result; } HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { assert_heap_not_locked_and_not_at_safepoint(); - assert(!isHumongous(word_size), "we do not allow TLABs of humongous size"); - - // First attempt: Try allocating out of the current alloc region - // using a CAS. If that fails, take the Heap_lock and retry the - // allocation, potentially replacing the current alloc region. - HeapWord* result = attempt_allocation(word_size); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - - // Second attempt: Go to the slower path where we might try to - // schedule a collection. - result = attempt_allocation_slow(word_size); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - - assert_heap_locked(); - // Need to unlock the Heap_lock before returning. - Heap_lock->unlock(); - return NULL; + assert(!isHumongous(word_size), "we do not allow humongous TLABs"); + + unsigned int dummy_gc_count_before; + return attempt_allocation(word_size, &dummy_gc_count_before); } HeapWord* @@ -1182,48 +805,18 @@ assert(!is_tlab, "mem_allocate() this should not be called directly " "to allocate TLABs"); - // Loop until the allocation is satisified, - // or unsatisfied after GC. + // Loop until the allocation is satisified, or unsatisfied after GC. for (int try_count = 1; /* we'll return */; try_count += 1) { unsigned int gc_count_before; - { - if (!isHumongous(word_size)) { - // First attempt: Try allocating out of the current alloc region - // using a CAS. If that fails, take the Heap_lock and retry the - // allocation, potentially replacing the current alloc region. - HeapWord* result = attempt_allocation(word_size); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - - assert_heap_locked(); - - // Second attempt: Go to the slower path where we might try to - // schedule a collection. - result = attempt_allocation_slow(word_size); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - } else { - // attempt_allocation_humongous() requires the Heap_lock to be held. - Heap_lock->lock(); - - HeapWord* result = attempt_allocation_humongous(word_size, - false /* at_safepoint */); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - } - - assert_heap_locked(); - // Read the gc count while the heap lock is held. - gc_count_before = SharedHeap::heap()->total_collections(); - - // Release the Heap_lock before attempting the collection. - Heap_lock->unlock(); + + HeapWord* result = NULL; + if (!isHumongous(word_size)) { + result = attempt_allocation(word_size, &gc_count_before); + } else { + result = attempt_allocation_humongous(word_size, &gc_count_before); + } + if (result != NULL) { + return result; } // Create the garbage collection operation... @@ -1231,7 +824,6 @@ // ...and get the VM thread to execute it. VMThread::execute(&op); - assert_heap_not_locked(); if (op.prologue_succeeded() && op.pause_succeeded()) { // If the operation was successful we'll return the result even // if it is NULL. If the allocation attempt failed immediately @@ -1257,21 +849,207 @@ } ShouldNotReachHere(); + return NULL; } -void G1CollectedHeap::abandon_cur_alloc_region() { +HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, + unsigned int *gc_count_before_ret) { + // Make sure you read the note in attempt_allocation_humongous(). + + assert_heap_not_locked_and_not_at_safepoint(); + assert(!isHumongous(word_size), "attempt_allocation_slow() should not " + "be called for humongous allocation requests"); + + // We should only get here after the first-level allocation attempt + // (attempt_allocation()) failed to allocate. + + // We will loop until a) we manage to successfully perform the + // allocation or b) we successfully schedule a collection which + // fails to perform the allocation. b) is the only case when we'll + // return NULL. + HeapWord* result = NULL; + for (int try_count = 1; /* we'll return */; try_count += 1) { + bool should_try_gc; + unsigned int gc_count_before; + + { + MutexLockerEx x(Heap_lock); + + result = _mutator_alloc_region.attempt_allocation_locked(word_size, + false /* bot_updates */); + if (result != NULL) { + return result; + } + + // If we reach here, attempt_allocation_locked() above failed to + // allocate a new region. So the mutator alloc region should be NULL. + assert(_mutator_alloc_region.get() == NULL, "only way to get here"); + + if (GC_locker::is_active_and_needs_gc()) { + if (g1_policy()->can_expand_young_list()) { + result = _mutator_alloc_region.attempt_allocation_force(word_size, + false /* bot_updates */); + if (result != NULL) { + return result; + } + } + should_try_gc = false; + } else { + // Read the GC count while still holding the Heap_lock. + gc_count_before = SharedHeap::heap()->total_collections(); + should_try_gc = true; + } + } + + if (should_try_gc) { + bool succeeded; + result = do_collection_pause(word_size, gc_count_before, &succeeded); + if (result != NULL) { + assert(succeeded, "only way to get back a non-NULL result"); + return result; + } + + if (succeeded) { + // If we get here we successfully scheduled a collection which + // failed to allocate. No point in trying to allocate + // further. We'll just return NULL. + MutexLockerEx x(Heap_lock); + *gc_count_before_ret = SharedHeap::heap()->total_collections(); + return NULL; + } + } else { + GC_locker::stall_until_clear(); + } + + // We can reach here if we were unsuccessul in scheduling a + // collection (because another thread beat us to it) or if we were + // stalled due to the GC locker. In either can we should retry the + // allocation attempt in case another thread successfully + // performed a collection and reclaimed enough space. We do the + // first attempt (without holding the Heap_lock) here and the + // follow-on attempt will be at the start of the next loop + // iteration (after taking the Heap_lock). + result = _mutator_alloc_region.attempt_allocation(word_size, + false /* bot_updates */); + if (result != NULL ){ + return result; + } + + // Give a warning if we seem to be looping forever. + if ((QueuedAllocationWarningCount > 0) && + (try_count % QueuedAllocationWarningCount == 0)) { + warning("G1CollectedHeap::attempt_allocation_slow() " + "retries %d times", try_count); + } + } + + ShouldNotReachHere(); + return NULL; +} + +HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, + unsigned int * gc_count_before_ret) { + // The structure of this method has a lot of similarities to + // attempt_allocation_slow(). The reason these two were not merged + // into a single one is that such a method would require several "if + // allocation is not humongous do this, otherwise do that" + // conditional paths which would obscure its flow. In fact, an early + // version of this code did use a unified method which was harder to + // follow and, as a result, it had subtle bugs that were hard to + // track down. So keeping these two methods separate allows each to + // be more readable. It will be good to keep these two in sync as + // much as possible. + + assert_heap_not_locked_and_not_at_safepoint(); + assert(isHumongous(word_size), "attempt_allocation_humongous() " + "should only be called for humongous allocations"); + + // We will loop until a) we manage to successfully perform the + // allocation or b) we successfully schedule a collection which + // fails to perform the allocation. b) is the only case when we'll + // return NULL. + HeapWord* result = NULL; + for (int try_count = 1; /* we'll return */; try_count += 1) { + bool should_try_gc; + unsigned int gc_count_before; + + { + MutexLockerEx x(Heap_lock); + + // Given that humongous objects are not allocated in young + // regions, we'll first try to do the allocation without doing a + // collection hoping that there's enough space in the heap. + result = humongous_obj_allocate(word_size); + if (result != NULL) { + return result; + } + + if (GC_locker::is_active_and_needs_gc()) { + should_try_gc = false; + } else { + // Read the GC count while still holding the Heap_lock. + gc_count_before = SharedHeap::heap()->total_collections(); + should_try_gc = true; + } + } + + if (should_try_gc) { + // If we failed to allocate the humongous object, we should try to + // do a collection pause (if we're allowed) in case it reclaims + // enough space for the allocation to succeed after the pause. + + bool succeeded; + result = do_collection_pause(word_size, gc_count_before, &succeeded); + if (result != NULL) { + assert(succeeded, "only way to get back a non-NULL result"); + return result; + } + + if (succeeded) { + // If we get here we successfully scheduled a collection which + // failed to allocate. No point in trying to allocate + // further. We'll just return NULL. + MutexLockerEx x(Heap_lock); + *gc_count_before_ret = SharedHeap::heap()->total_collections(); + return NULL; + } + } else { + GC_locker::stall_until_clear(); + } + + // We can reach here if we were unsuccessul in scheduling a + // collection (because another thread beat us to it) or if we were + // stalled due to the GC locker. In either can we should retry the + // allocation attempt in case another thread successfully + // performed a collection and reclaimed enough space. Give a + // warning if we seem to be looping forever. + + if ((QueuedAllocationWarningCount > 0) && + (try_count % QueuedAllocationWarningCount == 0)) { + warning("G1CollectedHeap::attempt_allocation_humongous() " + "retries %d times", try_count); + } + } + + ShouldNotReachHere(); + return NULL; +} + +HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, + bool expect_null_mutator_alloc_region) { assert_at_safepoint(true /* should_be_vm_thread */); - - HeapRegion* cur_alloc_region = _cur_alloc_region; - if (cur_alloc_region != NULL) { - assert(!cur_alloc_region->is_empty(), - "the current alloc region can never be empty"); - assert(cur_alloc_region->is_young(), - "the current alloc region should be young"); - - retire_cur_alloc_region_common(cur_alloc_region); - } - assert(_cur_alloc_region == NULL, "post-condition"); + assert(_mutator_alloc_region.get() == NULL || + !expect_null_mutator_alloc_region, + "the current alloc region was unexpectedly found to be non-NULL"); + + if (!isHumongous(word_size)) { + return _mutator_alloc_region.attempt_allocation_locked(word_size, + false /* bot_updates */); + } else { + return humongous_obj_allocate(word_size); + } + + ShouldNotReachHere(); } void G1CollectedHeap::abandon_gc_alloc_regions() { @@ -1389,7 +1167,7 @@ g1_policy()->record_full_collection_start(); wait_while_free_regions_coming(); - append_secondary_free_list_if_not_empty(); + append_secondary_free_list_if_not_empty_with_lock(); gc_prologue(true); increment_total_collections(true /* full gc */); @@ -1399,8 +1177,8 @@ if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification + gclog_or_tty->print(" VerifyBeforeGC:"); prepare_for_verify(); - gclog_or_tty->print(" VerifyBeforeGC:"); Universe::verify(true); } @@ -1421,9 +1199,8 @@ concurrent_mark()->abort(); // Make sure we'll choose a new allocation region afterwards. - abandon_cur_alloc_region(); + release_mutator_alloc_region(); abandon_gc_alloc_regions(); - assert(_cur_alloc_region == NULL, "Invariant."); g1_rem_set()->cleanupHRRS(); tear_down_region_lists(); @@ -1444,7 +1221,7 @@ // how reference processing currently works in G1. // Temporarily make reference _discovery_ single threaded (non-MT). - ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false); + ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false); // Temporarily make refs discovery atomic ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true); @@ -1529,6 +1306,8 @@ // evacuation pause. clear_cset_fast_test(); + init_mutator_alloc_region(); + double end = os::elapsedTime(); g1_policy()->record_full_collection_end(); @@ -1702,8 +1481,9 @@ *succeeded = true; // Let's attempt the allocation first. - HeapWord* result = attempt_allocation_at_safepoint(word_size, - false /* expect_null_cur_alloc_region */); + HeapWord* result = + attempt_allocation_at_safepoint(word_size, + false /* expect_null_mutator_alloc_region */); if (result != NULL) { assert(*succeeded, "sanity"); return result; @@ -1730,7 +1510,7 @@ // Retry the allocation result = attempt_allocation_at_safepoint(word_size, - true /* expect_null_cur_alloc_region */); + true /* expect_null_mutator_alloc_region */); if (result != NULL) { assert(*succeeded, "sanity"); return result; @@ -1747,7 +1527,7 @@ // Retry the allocation once more result = attempt_allocation_at_safepoint(word_size, - true /* expect_null_cur_alloc_region */); + true /* expect_null_mutator_alloc_region */); if (result != NULL) { assert(*succeeded, "sanity"); return result; @@ -1778,7 +1558,7 @@ if (expand(expand_bytes)) { verify_region_sets_optional(); return attempt_allocation_at_safepoint(word_size, - false /* expect_null_cur_alloc_region */); + false /* expect_null_mutator_alloc_region */); } return NULL; } @@ -1922,7 +1702,6 @@ _evac_failure_scan_stack(NULL) , _mark_in_progress(false), _cg1r(NULL), _summary_bytes_used(0), - _cur_alloc_region(NULL), _refine_cte_cl(NULL), _full_collection(false), _free_list("Master Free List"), @@ -2081,7 +1860,6 @@ _g1_max_committed = _g1_committed; _hrs = new HeapRegionSeq(_expansion_regions); guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq"); - guarantee(_cur_alloc_region == NULL, "from constructor"); // 6843694 - ensure that the maximum region index can fit // in the remembered set structures. @@ -2177,6 +1955,22 @@ // Do later initialization work for concurrent refinement. _cg1r->init(); + // Here we allocate the dummy full region that is required by the + // G1AllocRegion class. If we don't pass an address in the reserved + // space here, lots of asserts fire. + MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords); + HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true); + // We'll re-use the same region whether the alloc region will + // require BOT updates or not and, if it doesn't, then a non-young + // region will complain that it cannot support allocations without + // BOT updates. So we'll tag the dummy region as young to avoid that. + dummy_region->set_young(); + // Make sure it's full. + dummy_region->set_top(dummy_region->end()); + G1AllocRegion::setup(this, dummy_region); + + init_mutator_alloc_region(); + return JNI_OK; } @@ -2201,16 +1995,16 @@ SharedHeap::ref_processing_init(); MemRegion mr = reserved_region(); - _ref_processor = ReferenceProcessor::create_ref_processor( - mr, // span - false, // Reference discovery is not atomic - true, // mt_discovery - &_is_alive_closure, // is alive closure - // for efficiency - ParallelGCThreads, - ParallelRefProcEnabled, - true); // Setting next fields of discovered - // lists requires a barrier. + _ref_processor = + new ReferenceProcessor(mr, // span + ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing + (int) ParallelGCThreads, // degree of mt processing + ParallelGCThreads > 1 || ConcGCThreads > 1, // mt discovery + (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery + false, // Reference discovery is not atomic + &_is_alive_closure, // is alive closure for efficiency + true); // Setting next fields of discovered + // lists requires a barrier. } size_t G1CollectedHeap::capacity() const { @@ -2243,7 +2037,7 @@ "Should be owned on this thread's behalf."); size_t result = _summary_bytes_used; // Read only once in case it is set to NULL concurrently - HeapRegion* hr = _cur_alloc_region; + HeapRegion* hr = _mutator_alloc_region.get(); if (hr != NULL) result += hr->used(); return result; @@ -2306,13 +2100,11 @@ // to free(), resulting in a SIGSEGV. Note that this doesn't appear // to be a problem in the optimized build, since the two loads of the // current allocation region field are optimized away. - HeapRegion* car = _cur_alloc_region; - - // FIXME: should iterate over all regions? - if (car == NULL) { + HeapRegion* hr = _mutator_alloc_region.get(); + if (hr == NULL) { return 0; } - return car->free(); + return hr->free(); } bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { @@ -2763,16 +2555,12 @@ // since we can't allow tlabs to grow big enough to accomodate // humongous objects. - // We need to store the cur alloc region locally, since it might change - // between when we test for NULL and when we use it later. - ContiguousSpace* cur_alloc_space = _cur_alloc_region; + HeapRegion* hr = _mutator_alloc_region.get(); size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize; - - if (cur_alloc_space == NULL) { + if (hr == NULL) { return max_tlab_size; } else { - return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize), - max_tlab_size); + return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size); } } @@ -3346,6 +3134,7 @@ } verify_region_sets_optional(); + verify_dirty_young_regions(); { // This call will decide whether this pause is an initial-mark @@ -3377,15 +3166,14 @@ TraceMemoryManagerStats tms(false /* fullGC */); - // If there are any free regions available on the secondary_free_list - // make sure we append them to the free_list. However, we don't - // have to wait for the rest of the cleanup operation to - // finish. If it's still going on that's OK. If we run out of - // regions, the region allocation code will check the - // secondary_free_list and potentially wait if more free regions - // are coming (see new_region_try_secondary_free_list()). + // If the secondary_free_list is not empty, append it to the + // free_list. No need to wait for the cleanup operation to finish; + // the region allocation code will check the secondary_free_list + // and wait if necessary. If the G1StressConcRegionFreeing flag is + // set, skip this step so that the region allocation code has to + // get entries from the secondary_free_list. if (!G1StressConcRegionFreeing) { - append_secondary_free_list_if_not_empty(); + append_secondary_free_list_if_not_empty_with_lock(); } increment_gc_time_stamp(); @@ -3408,8 +3196,8 @@ if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification + gclog_or_tty->print(" VerifyBeforeGC:"); prepare_for_verify(); - gclog_or_tty->print(" VerifyBeforeGC:"); Universe::verify(false); } @@ -3425,7 +3213,7 @@ // Forget the current alloc region (we might even choose it to be part // of the collection set!). - abandon_cur_alloc_region(); + release_mutator_alloc_region(); // The elapsed time induced by the start time below deliberately elides // the possible verification above. @@ -3556,6 +3344,8 @@ g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE + init_mutator_alloc_region(); + double end_time_sec = os::elapsedTime(); double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS; g1_policy()->record_pause_time_ms(pause_time_ms); @@ -3638,6 +3428,15 @@ return gclab_word_size; } +void G1CollectedHeap::init_mutator_alloc_region() { + assert(_mutator_alloc_region.get() == NULL, "pre-condition"); + _mutator_alloc_region.init(); +} + +void G1CollectedHeap::release_mutator_alloc_region() { + _mutator_alloc_region.release(); + assert(_mutator_alloc_region.get() == NULL, "post-condition"); +} void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) { assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose"); @@ -3862,7 +3661,7 @@ if (r->is_empty()) { // We didn't actually allocate anything in it; let's just put // it back on the free list. - _free_list.add_as_tail(r); + _free_list.add_as_head(r); } else if (_retain_gc_alloc_region[ap] && !totally) { // retain it so that we can use it at the beginning of the next GC _retained_gc_alloc_regions[ap] = r; @@ -4996,7 +4795,7 @@ *pre_used += hr->used(); hr->hr_clear(par, true /* clear_space */); - free_list->add_as_tail(hr); + free_list->add_as_head(hr); } void G1CollectedHeap::free_humongous_region(HeapRegion* hr, @@ -5048,7 +4847,7 @@ } if (free_list != NULL && !free_list->is_empty()) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); - _free_list.add_as_tail(free_list); + _free_list.add_as_head(free_list); } if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) { MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); @@ -5123,10 +4922,8 @@ CardTableModRefBS* _ct_bs; public: G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs) - : _ct_bs(ct_bs) - { } - virtual bool doHeapRegion(HeapRegion* r) - { + : _ct_bs(ct_bs) { } + virtual bool doHeapRegion(HeapRegion* r) { MemRegion mr(r->bottom(), r->end()); if (r->is_survivor()) { _ct_bs->verify_dirty_region(mr); @@ -5136,6 +4933,29 @@ return false; } }; + +void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { + CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); + for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { + // We cannot guarantee that [bottom(),end()] is dirty. Threads + // dirty allocated blocks as they allocate them. The thread that + // retires each region and replaces it with a new one will do a + // maximal allocation to fill in [pre_dummy_top(),end()] but will + // not dirty that area (one less thing to have to do while holding + // a lock). So we can only verify that [bottom(),pre_dummy_top()] + // is dirty. Also note that verify_dirty_region() requires + // mr.start() and mr.end() to be card aligned and pre_dummy_top() + // is not guaranteed to be. + MemRegion mr(hr->bottom(), + ct_bs->align_to_card_boundary(hr->pre_dummy_top())); + ct_bs->verify_dirty_region(mr); + } +} + +void G1CollectedHeap::verify_dirty_young_regions() { + verify_dirty_young_list(_young_list->first_region()); + verify_dirty_young_list(_young_list->first_survivor_region()); +} #endif void G1CollectedHeap::cleanUpCardTable() { @@ -5199,7 +5019,7 @@ size_t rs_lengths = 0; while (cur != NULL) { - assert(!is_on_free_list(cur), "sanity"); + assert(!is_on_master_free_list(cur), "sanity"); if (non_young) { if (cur->is_young()) { @@ -5474,8 +5294,6 @@ _refine_cte_cl->set_concurrent(concurrent); } -#ifdef ASSERT - bool G1CollectedHeap::is_in_closed_subset(const void* p) const { HeapRegion* hr = heap_region_containing(p); if (hr == NULL) { @@ -5484,7 +5302,44 @@ return hr->is_in(p); } } -#endif // ASSERT + +HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size, + bool force) { + assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); + assert(!force || g1_policy()->can_expand_young_list(), + "if force is true we should be able to expand the young list"); + if (force || !g1_policy()->is_young_list_full()) { + HeapRegion* new_alloc_region = new_region(word_size, + false /* do_expand */); + if (new_alloc_region != NULL) { + g1_policy()->update_region_num(true /* next_is_young */); + set_region_short_lived_locked(new_alloc_region); + return new_alloc_region; + } + } + return NULL; +} + +void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region, + size_t allocated_bytes) { + assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); + assert(alloc_region->is_young(), "all mutator alloc regions should be young"); + + g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); + _summary_bytes_used += allocated_bytes; +} + +HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, + bool force) { + return _g1h->new_mutator_alloc_region(word_size, force); +} + +void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, + size_t allocated_bytes) { + _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes); +} + +// Heap region set verification class VerifyRegionListsClosure : public HeapRegionClosure { private: @@ -5546,13 +5401,10 @@ return; } - { - MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); - // Make sure we append the secondary_free_list on the free_list so - // that all free regions we will come across can be safely - // attributed to the free_list. - append_secondary_free_list(); - } + // Make sure we append the secondary_free_list on the free_list so + // that all free regions we will come across can be safely + // attributed to the free_list. + append_secondary_free_list_if_not_empty_with_lock(); // Finally, make sure that the region accounting in the lists is // consistent with what we see in the heap.
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP #include "gc_implementation/g1/concurrentMark.hpp" +#include "gc_implementation/g1/g1AllocRegion.hpp" #include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/heapRegionSets.hpp" #include "gc_implementation/parNew/parGCAllocBuffer.hpp" @@ -56,7 +57,6 @@ class ConcurrentMark; class ConcurrentMarkThread; class ConcurrentG1Refine; -class ConcurrentZFThread; typedef OverflowTaskQueue<StarTask> RefToScanQueue; typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet; @@ -64,12 +64,6 @@ typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) -enum G1GCThreadGroups { - G1CRGroup = 0, - G1ZFGroup = 1, - G1CMGroup = 2 -}; - enum GCAllocPurpose { GCAllocForTenured, GCAllocForSurvived, @@ -135,6 +129,15 @@ void print(); }; +class MutatorAllocRegion : public G1AllocRegion { +protected: + virtual HeapRegion* allocate_new_region(size_t word_size, bool force); + virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); +public: + MutatorAllocRegion() + : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } +}; + class RefineCardTableEntryClosure; class G1CollectedHeap : public SharedHeap { friend class VM_G1CollectForAllocation; @@ -142,6 +145,7 @@ friend class VM_G1CollectFull; friend class VM_G1IncCollectionPause; friend class VMStructs; + friend class MutatorAllocRegion; // Closures used in implementation. friend class G1ParCopyHelper; @@ -204,12 +208,15 @@ // The sequence of all heap regions in the heap. HeapRegionSeq* _hrs; - // The region from which normal-sized objects are currently being - // allocated. May be NULL. - HeapRegion* _cur_alloc_region; + // Alloc region used to satisfy mutator allocation requests. + MutatorAllocRegion _mutator_alloc_region; - // Postcondition: cur_alloc_region == NULL. - void abandon_cur_alloc_region(); + // It resets the mutator alloc region before new allocations can take place. + void init_mutator_alloc_region(); + + // It releases the mutator alloc region. + void release_mutator_alloc_region(); + void abandon_gc_alloc_regions(); // The to-space memory regions into which objects are being copied during @@ -294,9 +301,9 @@ // These are macros so that, if the assert fires, we get the correct // line number, file, etc. -#define heap_locking_asserts_err_msg(__extra_message) \ +#define heap_locking_asserts_err_msg(_extra_message_) \ err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \ - (__extra_message), \ + (_extra_message_), \ BOOL_TO_STR(Heap_lock->owned_by_self()), \ BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \ BOOL_TO_STR(Thread::current()->is_VM_thread())) @@ -307,11 +314,11 @@ heap_locking_asserts_err_msg("should be holding the Heap_lock")); \ } while (0) -#define assert_heap_locked_or_at_safepoint(__should_be_vm_thread) \ +#define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \ do { \ assert(Heap_lock->owned_by_self() || \ (SafepointSynchronize::is_at_safepoint() && \ - ((__should_be_vm_thread) == Thread::current()->is_VM_thread())), \ + ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \ heap_locking_asserts_err_msg("should be holding the Heap_lock or " \ "should be at a safepoint")); \ } while (0) @@ -338,10 +345,10 @@ "should not be at a safepoint")); \ } while (0) -#define assert_at_safepoint(__should_be_vm_thread) \ +#define assert_at_safepoint(_should_be_vm_thread_) \ do { \ assert(SafepointSynchronize::is_at_safepoint() && \ - ((__should_be_vm_thread) == Thread::current()->is_VM_thread()), \ + ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \ heap_locking_asserts_err_msg("should be at a safepoint")); \ } while (0) @@ -367,39 +374,38 @@ G1CollectorPolicy* _g1_policy; // This is the second level of trying to allocate a new region. If - // new_region_work didn't find a region in the free_list, this call - // will check whether there's anything available in the - // secondary_free_list and/or wait for more regions to appear in that - // list, if _free_regions_coming is set. - HeapRegion* new_region_try_secondary_free_list(size_t word_size); + // new_region() didn't find a region on the free_list, this call will + // check whether there's anything available on the + // secondary_free_list and/or wait for more regions to appear on + // that list, if _free_regions_coming is set. + HeapRegion* new_region_try_secondary_free_list(); - // It will try to allocate a single non-humongous HeapRegion - // sufficient for an allocation of the given word_size. If - // do_expand is true, it will attempt to expand the heap if - // necessary to satisfy the allocation request. Note that word_size - // is only used to make sure that we expand sufficiently but, given - // that the allocation request is assumed not to be humongous, - // having word_size is not strictly necessary (expanding by a single - // region will always be sufficient). But let's keep that parameter - // in case we need it in the future. - HeapRegion* new_region_work(size_t word_size, bool do_expand); + // Try to allocate a single non-humongous HeapRegion sufficient for + // an allocation of the given word_size. If do_expand is true, + // attempt to expand the heap if necessary to satisfy the allocation + // request. + HeapRegion* new_region(size_t word_size, bool do_expand); - // It will try to allocate a new region to be used for allocation by - // mutator threads. It will not try to expand the heap if not region - // is available. - HeapRegion* new_alloc_region(size_t word_size) { - return new_region_work(word_size, false /* do_expand */); - } - - // It will try to allocate a new region to be used for allocation by + // Try to allocate a new region to be used for allocation by // a GC thread. It will try to expand the heap if no region is // available. HeapRegion* new_gc_alloc_region(int purpose, size_t word_size); + // Attempt to satisfy a humongous allocation request of the given + // size by finding a contiguous set of free regions of num_regions + // length and remove them from the master free list. Return the + // index of the first region or -1 if the search was unsuccessful. int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size); - // Attempt to allocate an object of the given (very large) "word_size". - // Returns "NULL" on failure. + // Initialize a contiguous set of free regions of length num_regions + // and starting at index first so that they appear as a single + // humongous region. + HeapWord* humongous_obj_allocate_initialize_regions(int first, + size_t num_regions, + size_t word_size); + + // Attempt to allocate a humongous object of the given size. Return + // NULL if unsuccessful. HeapWord* humongous_obj_allocate(size_t word_size); // The following two methods, allocate_new_tlab() and @@ -417,10 +423,6 @@ // * All non-TLAB allocation requests should go to mem_allocate() // and mem_allocate() should never be called with is_tlab == true. // - // * If the GC locker is active we currently stall until we can - // allocate a new young region. This will be changed in the - // near future (see CR 6994056). - // // * If either call cannot satisfy the allocation request using the // current allocating region, they will try to get a new one. If // this fails, they will attempt to do an evacuation pause and @@ -443,122 +445,38 @@ bool is_tlab, /* expected to be false */ bool* gc_overhead_limit_was_exceeded); - // The following methods, allocate_from_cur_allocation_region(), - // attempt_allocation(), attempt_allocation_locked(), - // replace_cur_alloc_region_and_allocate(), - // attempt_allocation_slow(), and attempt_allocation_humongous() - // have very awkward pre- and post-conditions with respect to - // locking: - // - // If they are called outside a safepoint they assume the caller - // holds the Heap_lock when it calls them. However, on exit they - // will release the Heap_lock if they return a non-NULL result, but - // keep holding the Heap_lock if they return a NULL result. The - // reason for this is that we need to dirty the cards that span - // allocated blocks on young regions to avoid having to take the - // slow path of the write barrier (for performance reasons we don't - // update RSets for references whose source is a young region, so we - // don't need to look at dirty cards on young regions). But, doing - // this card dirtying while holding the Heap_lock can be a - // scalability bottleneck, especially given that some allocation - // requests might be of non-trivial size (and the larger the region - // size is, the fewer allocations requests will be considered - // humongous, as the humongous size limit is a fraction of the - // region size). So, when one of these calls succeeds in allocating - // a block it does the card dirtying after it releases the Heap_lock - // which is why it will return without holding it. - // - // The above assymetry is the reason why locking / unlocking is done - // explicitly (i.e., with Heap_lock->lock() and - // Heap_lock->unlocked()) instead of using MutexLocker and - // MutexUnlocker objects. The latter would ensure that the lock is - // unlocked / re-locked at every possible exit out of the basic - // block. However, we only want that action to happen in selected - // places. - // - // Further, if the above methods are called during a safepoint, then - // naturally there's no assumption about the Heap_lock being held or - // there's no attempt to unlock it. The parameter at_safepoint - // indicates whether the call is made during a safepoint or not (as - // an optimization, to avoid reading the global flag with - // SafepointSynchronize::is_at_safepoint()). - // - // The methods share these parameters: - // - // * word_size : the size of the allocation request in words - // * at_safepoint : whether the call is done at a safepoint; this - // also determines whether a GC is permitted - // (at_safepoint == false) or not (at_safepoint == true) - // * do_dirtying : whether the method should dirty the allocated - // block before returning - // - // They all return either the address of the block, if they - // successfully manage to allocate it, or NULL. + // The following three methods take a gc_count_before_ret + // parameter which is used to return the GC count if the method + // returns NULL. Given that we are required to read the GC count + // while holding the Heap_lock, and these paths will take the + // Heap_lock at some point, it's easier to get them to read the GC + // count while holding the Heap_lock before they return NULL instead + // of the caller (namely: mem_allocate()) having to also take the + // Heap_lock just to read the GC count. + + // First-level mutator allocation attempt: try to allocate out of + // the mutator alloc region without taking the Heap_lock. This + // should only be used for non-humongous allocations. + inline HeapWord* attempt_allocation(size_t word_size, + unsigned int* gc_count_before_ret); - // It tries to satisfy an allocation request out of the current - // alloc region, which is passed as a parameter. It assumes that the - // caller has checked that the current alloc region is not NULL. - // Given that the caller has to check the current alloc region for - // at least NULL, it might as well pass it as the first parameter so - // that the method doesn't have to read it from the - // _cur_alloc_region field again. It is called from both - // attempt_allocation() and attempt_allocation_locked() and the - // with_heap_lock parameter indicates whether the caller was holding - // the heap lock when it called it or not. - inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, - size_t word_size, - bool with_heap_lock); - - // First-level of allocation slow path: it attempts to allocate out - // of the current alloc region in a lock-free manner using a CAS. If - // that fails it takes the Heap_lock and calls - // attempt_allocation_locked() for the second-level slow path. - inline HeapWord* attempt_allocation(size_t word_size); - - // Second-level of allocation slow path: while holding the Heap_lock - // it tries to allocate out of the current alloc region and, if that - // fails, tries to allocate out of a new current alloc region. - inline HeapWord* attempt_allocation_locked(size_t word_size); + // Second-level mutator allocation attempt: take the Heap_lock and + // retry the allocation attempt, potentially scheduling a GC + // pause. This should only be used for non-humongous allocations. + HeapWord* attempt_allocation_slow(size_t word_size, + unsigned int* gc_count_before_ret); - // It assumes that the current alloc region has been retired and - // tries to allocate a new one. If it's successful, it performs the - // allocation out of the new current alloc region and updates - // _cur_alloc_region. Normally, it would try to allocate a new - // region if the young gen is not full, unless can_expand is true in - // which case it would always try to allocate a new region. - HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size, - bool at_safepoint, - bool do_dirtying, - bool can_expand); - - // Third-level of allocation slow path: when we are unable to - // allocate a new current alloc region to satisfy an allocation - // request (i.e., when attempt_allocation_locked() fails). It will - // try to do an evacuation pause, which might stall due to the GC - // locker, and retry the allocation attempt when appropriate. - HeapWord* attempt_allocation_slow(size_t word_size); + // Takes the Heap_lock and attempts a humongous allocation. It can + // potentially schedule a GC pause. + HeapWord* attempt_allocation_humongous(size_t word_size, + unsigned int* gc_count_before_ret); - // The method that tries to satisfy a humongous allocation - // request. If it cannot satisfy it it will try to do an evacuation - // pause to perhaps reclaim enough space to be able to satisfy the - // allocation request afterwards. - HeapWord* attempt_allocation_humongous(size_t word_size, - bool at_safepoint); - - // It does the common work when we are retiring the current alloc region. - inline void retire_cur_alloc_region_common(HeapRegion* cur_alloc_region); - - // It retires the current alloc region, which is passed as a - // parameter (since, typically, the caller is already holding on to - // it). It sets _cur_alloc_region to NULL. - void retire_cur_alloc_region(HeapRegion* cur_alloc_region); - - // It attempts to do an allocation immediately before or after an - // evacuation pause and can only be called by the VM thread. It has - // slightly different assumptions that the ones before (i.e., - // assumes that the current alloc region has been retired). + // Allocation attempt that should be called during safepoints (e.g., + // at the end of a successful GC). expect_null_mutator_alloc_region + // specifies whether the mutator alloc region is expected to be NULL + // or not. HeapWord* attempt_allocation_at_safepoint(size_t word_size, - bool expect_null_cur_alloc_region); + bool expect_null_mutator_alloc_region); // It dirties the cards that cover the block so that so that the post // write barrier never queues anything when updating objects on this @@ -585,6 +503,12 @@ // GC pause. void retire_alloc_region(HeapRegion* alloc_region, bool par); + // These two methods are the "callbacks" from the G1AllocRegion class. + + HeapRegion* new_mutator_alloc_region(size_t word_size, bool force); + void retire_mutator_alloc_region(HeapRegion* alloc_region, + size_t allocated_bytes); + // - if explicit_gc is true, the GC is for a System.gc() or a heap // inspection request and should collect the entire heap // - if clear_all_soft_refs is true, all soft references should be @@ -776,7 +700,7 @@ // Invoke "save_marks" on all heap regions. void save_marks(); - // It frees a non-humongous region by initializing its contents and + // Frees a non-humongous region by initializing its contents and // adding it to the free list that's passed as a parameter (this is // usually a local list which will be appended to the master free // list later). The used bytes of freed regions are accumulated in @@ -787,13 +711,13 @@ FreeRegionList* free_list, bool par); - // It frees a humongous region by collapsing it into individual - // regions and calling free_region() for each of them. The freed - // regions will be added to the free list that's passed as a parameter - // (this is usually a local list which will be appended to the - // master free list later). The used bytes of freed regions are - // accumulated in pre_used. If par is true, the region's RSet will - // not be freed up. The assumption is that this will be done later. + // Frees a humongous region by collapsing it into individual regions + // and calling free_region() for each of them. The freed regions + // will be added to the free list that's passed as a parameter (this + // is usually a local list which will be appended to the master free + // list later). The used bytes of freed regions are accumulated in + // pre_used. If par is true, the region's RSet will not be freed + // up. The assumption is that this will be done later. void free_humongous_region(HeapRegion* hr, size_t* pre_used, FreeRegionList* free_list, @@ -1029,6 +953,9 @@ // The number of regions available for "regular" expansion. size_t expansion_regions() { return _expansion_regions; } + void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; + void verify_dirty_young_regions() PRODUCT_RETURN; + // verify_region_sets() performs verification over the region // lists. It will be compiled in the product code to be used when // necessary (i.e., during heap verification). @@ -1046,13 +973,13 @@ #endif // HEAP_REGION_SET_FORCE_VERIFY #ifdef ASSERT - bool is_on_free_list(HeapRegion* hr) { + bool is_on_master_free_list(HeapRegion* hr) { return hr->containing_set() == &_free_list; } - bool is_on_humongous_set(HeapRegion* hr) { + bool is_in_humongous_set(HeapRegion* hr) { return hr->containing_set() == &_humongous_set; -} + } #endif // ASSERT // Wrapper for the region list operations that can be called from @@ -1063,10 +990,12 @@ } void append_secondary_free_list() { - _free_list.add_as_tail(&_secondary_free_list); + _free_list.add_as_head(&_secondary_free_list); } - void append_secondary_free_list_if_not_empty() { + void append_secondary_free_list_if_not_empty_with_lock() { + // If the secondary free list looks empty there's no reason to + // take the lock and then try to append it. if (!_secondary_free_list.is_empty()) { MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); append_secondary_free_list(); @@ -1128,13 +1057,19 @@ return _g1_reserved.contains(p); } - // Returns a MemRegion that corresponds to the space that has been + // Returns a MemRegion that corresponds to the space that has been + // reserved for the heap + MemRegion g1_reserved() { + return _g1_reserved; + } + + // Returns a MemRegion that corresponds to the space that has been // committed in the heap MemRegion g1_committed() { return _g1_committed; } - NOT_PRODUCT(bool is_in_closed_subset(const void* p) const;) + virtual bool is_in_closed_subset(const void* p) const; // Dirty card table entries covering a list of young regions. void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -27,6 +27,7 @@ #include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/g1CollectedHeap.hpp" +#include "gc_implementation/g1/g1AllocRegion.inline.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "utilities/taskqueue.hpp" @@ -59,131 +60,23 @@ return r != NULL && r->in_collection_set(); } -// See the comment in the .hpp file about the locking protocol and -// assumptions of this method (and other related ones). inline HeapWord* -G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, - size_t word_size, - bool with_heap_lock) { - assert_not_at_safepoint(); - assert(with_heap_lock == Heap_lock->owned_by_self(), - "with_heap_lock and Heap_lock->owned_by_self() should be a tautology"); - assert(cur_alloc_region != NULL, "pre-condition of the method"); - assert(cur_alloc_region->is_young(), - "we only support young current alloc regions"); - assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() " - "should not be used for humongous allocations"); - assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug."); - - assert(!cur_alloc_region->is_empty(), - err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty", - cur_alloc_region->bottom(), cur_alloc_region->end())); - HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size); - if (result != NULL) { - assert(is_in(result), "result should be in the heap"); - - if (with_heap_lock) { - Heap_lock->unlock(); - } - assert_heap_not_locked(); - // Do the dirtying after we release the Heap_lock. - dirty_young_block(result, word_size); - return result; - } - - if (with_heap_lock) { - assert_heap_locked(); - } else { - assert_heap_not_locked(); - } - return NULL; -} - -// See the comment in the .hpp file about the locking protocol and -// assumptions of this method (and other related ones). -inline HeapWord* -G1CollectedHeap::attempt_allocation(size_t word_size) { +G1CollectedHeap::attempt_allocation(size_t word_size, + unsigned int* gc_count_before_ret) { assert_heap_not_locked_and_not_at_safepoint(); - assert(!isHumongous(word_size), "attempt_allocation() should not be called " - "for humongous allocation requests"); - - HeapRegion* cur_alloc_region = _cur_alloc_region; - if (cur_alloc_region != NULL) { - HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region, - word_size, - false /* with_heap_lock */); - assert_heap_not_locked(); - if (result != NULL) { - return result; - } - } + assert(!isHumongous(word_size), "attempt_allocation() should not " + "be called for humongous allocation requests"); - // Our attempt to allocate lock-free failed as the current - // allocation region is either NULL or full. So, we'll now take the - // Heap_lock and retry. - Heap_lock->lock(); - - HeapWord* result = attempt_allocation_locked(word_size); - if (result != NULL) { - assert_heap_not_locked(); - return result; + HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, + false /* bot_updates */); + if (result == NULL) { + result = attempt_allocation_slow(word_size, gc_count_before_ret); } - - assert_heap_locked(); - return NULL; -} - -inline void -G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) { - assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); - assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region, - "pre-condition of the call"); - assert(cur_alloc_region->is_young(), - "we only support young current alloc regions"); - - // The region is guaranteed to be young - g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region); - _summary_bytes_used += cur_alloc_region->used(); - _cur_alloc_region = NULL; -} - -inline HeapWord* -G1CollectedHeap::attempt_allocation_locked(size_t word_size) { - assert_heap_locked_and_not_at_safepoint(); - assert(!isHumongous(word_size), "attempt_allocation_locked() " - "should not be called for humongous allocation requests"); - - // First, reread the current alloc region and retry the allocation - // in case somebody replaced it while we were waiting to get the - // Heap_lock. - HeapRegion* cur_alloc_region = _cur_alloc_region; - if (cur_alloc_region != NULL) { - HeapWord* result = allocate_from_cur_alloc_region( - cur_alloc_region, word_size, - true /* with_heap_lock */); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - - // We failed to allocate out of the current alloc region, so let's - // retire it before getting a new one. - retire_cur_alloc_region(cur_alloc_region); + assert_heap_not_locked(); + if (result != NULL) { + dirty_young_block(result, word_size); } - - assert_heap_locked(); - // Try to get a new region and allocate out of it - HeapWord* result = replace_cur_alloc_region_and_allocate(word_size, - false, /* at_safepoint */ - true, /* do_dirtying */ - false /* can_expand */); - if (result != NULL) { - assert_heap_not_locked(); - return result; - } - - assert_heap_locked(); - return NULL; + return result; } // It dirties the cards that cover the block so that so that the post
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -81,6 +81,57 @@ // </NEW PREDICTION> +// Help class for avoiding interleaved logging +class LineBuffer: public StackObj { + +private: + static const int BUFFER_LEN = 1024; + static const int INDENT_CHARS = 3; + char _buffer[BUFFER_LEN]; + int _indent_level; + int _cur; + + void vappend(const char* format, va_list ap) { + int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap); + if (res != -1) { + _cur += res; + } else { + DEBUG_ONLY(warning("buffer too small in LineBuffer");) + _buffer[BUFFER_LEN -1] = 0; + _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again + } + } + +public: + explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) { + for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) { + _buffer[_cur] = ' '; + } + } + +#ifndef PRODUCT + ~LineBuffer() { + assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?"); + } +#endif + + void append(const char* format, ...) { + va_list ap; + va_start(ap, format); + vappend(format, ap); + va_end(ap); + } + + void append_and_print_cr(const char* format, ...) { + va_list ap; + va_start(ap, format); + vappend(format, ap); + va_end(ap); + gclog_or_tty->print_cr("%s", _buffer); + _cur = _indent_level * INDENT_CHARS; + } +}; + G1CollectorPolicy::G1CollectorPolicy() : _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1), @@ -256,6 +307,7 @@ _par_last_termination_times_ms = new double[_parallel_gc_threads]; _par_last_termination_attempts = new double[_parallel_gc_threads]; _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads]; + _par_last_gc_worker_times_ms = new double[_parallel_gc_threads]; // start conservatively _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis; @@ -860,6 +912,7 @@ _par_last_termination_times_ms[i] = -1234.0; _par_last_termination_attempts[i] = -1234.0; _par_last_gc_worker_end_times_ms[i] = -1234.0; + _par_last_gc_worker_times_ms[i] = -1234.0; } #endif @@ -1012,14 +1065,11 @@ void G1CollectorPolicy::print_par_stats(int level, const char* str, - double* data, - bool summary) { + double* data) { double min = data[0], max = data[0]; double total = 0.0; - int j; - for (j = 0; j < level; ++j) - gclog_or_tty->print(" "); - gclog_or_tty->print("[%s (ms):", str); + LineBuffer buf(level); + buf.append("[%s (ms):", str); for (uint i = 0; i < ParallelGCThreads; ++i) { double val = data[i]; if (val < min) @@ -1027,30 +1077,21 @@ if (val > max) max = val; total += val; - gclog_or_tty->print(" %3.1lf", val); + buf.append(" %3.1lf", val); } - if (summary) { - gclog_or_tty->print_cr(""); - double avg = total / (double) ParallelGCThreads; - gclog_or_tty->print(" "); - for (j = 0; j < level; ++j) - gclog_or_tty->print(" "); - gclog_or_tty->print("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf", - avg, min, max); - } - gclog_or_tty->print_cr("]"); + buf.append_and_print_cr(""); + double avg = total / (double) ParallelGCThreads; + buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]", + avg, min, max, max - min); } void G1CollectorPolicy::print_par_sizes(int level, const char* str, - double* data, - bool summary) { + double* data) { double min = data[0], max = data[0]; double total = 0.0; - int j; - for (j = 0; j < level; ++j) - gclog_or_tty->print(" "); - gclog_or_tty->print("[%s :", str); + LineBuffer buf(level); + buf.append("[%s :", str); for (uint i = 0; i < ParallelGCThreads; ++i) { double val = data[i]; if (val < min) @@ -1058,34 +1099,24 @@ if (val > max) max = val; total += val; - gclog_or_tty->print(" %d", (int) val); + buf.append(" %d", (int) val); } - if (summary) { - gclog_or_tty->print_cr(""); - double avg = total / (double) ParallelGCThreads; - gclog_or_tty->print(" "); - for (j = 0; j < level; ++j) - gclog_or_tty->print(" "); - gclog_or_tty->print("Sum: %d, Avg: %d, Min: %d, Max: %d", - (int)total, (int)avg, (int)min, (int)max); - } - gclog_or_tty->print_cr("]"); + buf.append_and_print_cr(""); + double avg = total / (double) ParallelGCThreads; + buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]", + (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min); } void G1CollectorPolicy::print_stats (int level, const char* str, double value) { - for (int j = 0; j < level; ++j) - gclog_or_tty->print(" "); - gclog_or_tty->print_cr("[%s: %5.1lf ms]", str, value); + LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value); } void G1CollectorPolicy::print_stats (int level, const char* str, int value) { - for (int j = 0; j < level; ++j) - gclog_or_tty->print(" "); - gclog_or_tty->print_cr("[%s: %d]", str, value); + LineBuffer(level).append_and_print_cr("[%s: %d]", str, value); } double G1CollectorPolicy::avg_value (double* data) { @@ -1382,22 +1413,22 @@ } if (parallel) { print_stats(1, "Parallel Time", _cur_collection_par_time_ms); - print_par_stats(2, "GC Worker Start Time", - _par_last_gc_worker_start_times_ms, false); + print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms); print_par_stats(2, "Update RS", _par_last_update_rs_times_ms); - print_par_sizes(3, "Processed Buffers", - _par_last_update_rs_processed_buffers, true); - print_par_stats(2, "Ext Root Scanning", - _par_last_ext_root_scan_times_ms); - print_par_stats(2, "Mark Stack Scanning", - _par_last_mark_stack_scan_times_ms); + print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers); + print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms); + print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms); print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms); print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms); print_par_stats(2, "Termination", _par_last_termination_times_ms); - print_par_sizes(3, "Termination Attempts", - _par_last_termination_attempts, true); - print_par_stats(2, "GC Worker End Time", - _par_last_gc_worker_end_times_ms, false); + print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts); + print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms); + + for (int i = 0; i < _parallel_gc_threads; i++) { + _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i]; + } + print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms); + print_stats(2, "Other", parallel_other_time); print_stats(1, "Clear CT", _cur_clear_ct_time_ms); } else { @@ -2060,17 +2091,11 @@ _g1->collection_set_iterate(&cs_closure); } -static void print_indent(int level) { - for (int j = 0; j < level+1; ++j) - gclog_or_tty->print(" "); -} - void G1CollectorPolicy::print_summary (int level, const char* str, NumberSeq* seq) const { double sum = seq->sum(); - print_indent(level); - gclog_or_tty->print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)", + LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)", str, sum / 1000.0, seq->avg()); } @@ -2078,8 +2103,7 @@ const char* str, NumberSeq* seq) const { print_summary(level, str, seq); - print_indent(level + 5); - gclog_or_tty->print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", + LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", seq->num(), seq->sd(), seq->maximum()); } @@ -2087,6 +2111,7 @@ NumberSeq* other_times_ms, NumberSeq* calc_other_times_ms) const { bool should_print = false; + LineBuffer buf(level + 2); double max_sum = MAX2(fabs(other_times_ms->sum()), fabs(calc_other_times_ms->sum())); @@ -2095,8 +2120,7 @@ double sum_ratio = max_sum / min_sum; if (sum_ratio > 1.1) { should_print = true; - print_indent(level + 1); - gclog_or_tty->print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###"); + buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###"); } double max_avg = MAX2(fabs(other_times_ms->avg()), @@ -2106,30 +2130,25 @@ double avg_ratio = max_avg / min_avg; if (avg_ratio > 1.1) { should_print = true; - print_indent(level + 1); - gclog_or_tty->print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###"); + buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###"); } if (other_times_ms->sum() < -0.01) { - print_indent(level + 1); - gclog_or_tty->print_cr("## RECORDED OTHER SUM IS NEGATIVE ###"); + buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###"); } if (other_times_ms->avg() < -0.01) { - print_indent(level + 1); - gclog_or_tty->print_cr("## RECORDED OTHER AVG IS NEGATIVE ###"); + buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###"); } if (calc_other_times_ms->sum() < -0.01) { should_print = true; - print_indent(level + 1); - gclog_or_tty->print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###"); + buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###"); } if (calc_other_times_ms->avg() < -0.01) { should_print = true; - print_indent(level + 1); - gclog_or_tty->print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###"); + buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###"); } if (should_print) @@ -2210,10 +2229,9 @@ } } } else { - print_indent(0); - gclog_or_tty->print_cr("none"); + LineBuffer(1).append_and_print_cr("none"); } - gclog_or_tty->print_cr(""); + LineBuffer(0).append_and_print_cr(""); } void G1CollectorPolicy::print_tracing_info() const { @@ -2532,7 +2550,7 @@ jint regions_added = parKnownGarbageCl.marked_regions_added(); _hrSorted->incNumMarkedHeapRegions(regions_added); if (G1PrintParCleanupStats) { - gclog_or_tty->print(" Thread %d called %d times, added %d regions to list.\n", + gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.", i, parKnownGarbageCl.invokes(), regions_added); } }
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -182,6 +182,7 @@ double* _par_last_termination_times_ms; double* _par_last_termination_attempts; double* _par_last_gc_worker_end_times_ms; + double* _par_last_gc_worker_times_ms; // indicates that we are in young GC mode bool _in_young_gc_mode; @@ -569,11 +570,8 @@ void print_stats(int level, const char* str, double value); void print_stats(int level, const char* str, int value); - void print_par_stats(int level, const char* str, double* data) { - print_par_stats(level, str, data, true); - } - void print_par_stats(int level, const char* str, double* data, bool summary); - void print_par_sizes(int level, const char* str, double* data, bool summary); + void print_par_stats(int level, const char* str, double* data); + void print_par_sizes(int level, const char* str, double* data); void check_other_times(int level, NumberSeq* other_times_ms,
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -185,22 +185,22 @@ G1CollectedHeap* _g1h; ModRefBarrierSet* _mrbs; CompactPoint _cp; - size_t _pre_used; - FreeRegionList _free_list; HumongousRegionSet _humongous_proxy_set; void free_humongous_region(HeapRegion* hr) { HeapWord* end = hr->end(); + size_t dummy_pre_used; + FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep"); + assert(hr->startsHumongous(), "Only the start of a humongous region should be freed."); - _g1h->free_humongous_region(hr, &_pre_used, &_free_list, + _g1h->free_humongous_region(hr, &dummy_pre_used, &dummy_free_list, &_humongous_proxy_set, false /* par */); - // Do we also need to do this for the continues humongous regions - // we just collapsed? hr->prepare_for_compaction(&_cp); // Also clear the part of the card table that will be unused after // compaction. _mrbs->clear(MemRegion(hr->compaction_top(), end)); + dummy_free_list.remove_all(); } public: @@ -208,8 +208,6 @@ : _g1h(G1CollectedHeap::heap()), _mrbs(G1CollectedHeap::heap()->mr_bs()), _cp(NULL, cs, cs->initialize_threshold()), - _pre_used(0), - _free_list("Local Free List for G1MarkSweep"), _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { } void update_sets() { @@ -219,7 +217,6 @@ NULL, /* free_list */ &_humongous_proxy_set, false /* par */); - _free_list.remove_all(); } bool doHeapRegion(HeapRegion* hr) {
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -86,28 +86,6 @@ bool idempotent() { return true; } }; -class IntoCSRegionClosure: public HeapRegionClosure { - IntoCSOopClosure _blk; - G1CollectedHeap* _g1; -public: - IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) : - _g1(g1), _blk(g1, blk) {} - bool doHeapRegion(HeapRegion* r) { - if (!r->in_collection_set()) { - _blk.set_region(r); - if (r->isHumongous()) { - if (r->startsHumongous()) { - oop obj = oop(r->bottom()); - obj->oop_iterate(&_blk); - } - } else { - r->oop_before_save_marks_iterate(&_blk); - } - } - return false; - } -}; - class VerifyRSCleanCardOopClosure: public OopClosure { G1CollectedHeap* _g1; public: @@ -329,7 +307,7 @@ // is during RSet updating within an evacuation pause. // In this case worker_i should be the id of a GC worker thread. assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); - assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "should be a GC worker"); + assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker"); if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) { // 'card_ptr' contains references that point into the collection
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,11 +83,15 @@ } template <class T> void write_ref_array_pre_work(T* dst, int count); - virtual void write_ref_array_pre(oop* dst, int count) { - write_ref_array_pre_work(dst, count); + virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) { + if (!dest_uninitialized) { + write_ref_array_pre_work(dst, count); + } } - virtual void write_ref_array_pre(narrowOop* dst, int count) { - write_ref_array_pre_work(dst, count); + virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) { + if (!dest_uninitialized) { + write_ref_array_pre_work(dst, count); + } } };
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -89,6 +89,11 @@ "The number of discovered reference objects to process before " \ "draining concurrent marking work queues.") \ \ + experimental(bool, G1UseConcMarkReferenceProcessing, false, \ + "If true, enable reference discovery during concurrent " \ + "marking and reference processing at the end of remark " \ + "(unsafe).") \ + \ develop(bool, G1SATBBarrierPrintNullPreVals, false, \ "If true, count frac of ptr writes with null pre-vals.") \ \ @@ -138,9 +143,9 @@ develop(bool, G1RSCountHisto, false, \ "If true, print a histogram of RS occupancies after each pause") \ \ - develop(intx, G1PrintRegionLivenessInfo, 0, \ - "When > 0, print the occupancies of the <n> best and worst" \ - "regions.") \ + product(bool, G1PrintRegionLivenessInfo, false, \ + "Prints the liveness information for all regions in the heap " \ + "at the end of a marking cycle.") \ \ develop(bool, G1PrintParCleanupStats, false, \ "When true, print extra stats about parallel cleanup.") \ @@ -193,6 +198,10 @@ develop(intx, G1ConcRSHotCardLimit, 4, \ "The threshold that defines (>=) a hot card.") \ \ + develop(intx, G1MaxHotCardCountSizePercent, 25, \ + "The maximum size of the hot card count cache as a " \ + "percentage of the number of cards for the maximum heap.") \ + \ develop(bool, G1PrintOopAppls, false, \ "When true, print applications of closures to external locs.") \ \
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -360,6 +360,7 @@ set_young_index_in_cset(-1); uninstall_surv_rate_group(); set_young_type(NotYoung); + reset_pre_dummy_top(); if (!par) { // If this is parallel, this will be done later. @@ -923,11 +924,11 @@ ContiguousSpace::set_saved_mark(); OrderAccess::storestore(); _gc_time_stamp = curr_gc_time_stamp; - // The following fence is to force a flush of the writes above, but - // is strictly not needed because when an allocating worker thread - // calls set_saved_mark() it does so under the ParGCRareEvent_lock; - // when the lock is released, the write will be flushed. - // OrderAccess::fence(); + // No need to do another barrier to flush the writes above. If + // this is called in parallel with other threads trying to + // allocate into the region, the caller should call this while + // holding a lock and when the lock is released the writes will be + // flushed. } }
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -53,8 +53,8 @@ class HeapRegionSetBase; #define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]" -#define HR_FORMAT_PARAMS(__hr) (__hr)->hrs_index(), (__hr)->bottom(), \ - (__hr)->top(), (__hr)->end() +#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \ + (_hr_)->top(), (_hr_)->end() // A dirty card to oop closure for heap regions. It // knows how to get the G1 heap and how to use the bitmap @@ -149,6 +149,13 @@ G1BlockOffsetArrayContigSpace _offsets; Mutex _par_alloc_lock; volatile unsigned _gc_time_stamp; + // When we need to retire an allocation region, while other threads + // are also concurrently trying to allocate into it, we typically + // allocate a dummy object at the end of the region to ensure that + // no more allocations can take place in it. However, sometimes we + // want to know where the end of the last "real" object we allocated + // into the region was and this is what this keeps track. + HeapWord* _pre_dummy_top; public: // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be @@ -163,6 +170,17 @@ virtual void set_saved_mark(); void reset_gc_time_stamp() { _gc_time_stamp = 0; } + // See the comment above in the declaration of _pre_dummy_top for an + // explanation of what it is. + void set_pre_dummy_top(HeapWord* pre_dummy_top) { + assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition"); + _pre_dummy_top = pre_dummy_top; + } + HeapWord* pre_dummy_top() { + return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top; + } + void reset_pre_dummy_top() { _pre_dummy_top = NULL; } + virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); virtual void clear(bool mangle_space); @@ -380,13 +398,16 @@ // The number of bytes marked live in the region in the last marking phase. size_t marked_bytes() { return _prev_marked_bytes; } + size_t live_bytes() { + return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes(); + } + // The number of bytes counted in the next marking. size_t next_marked_bytes() { return _next_marked_bytes; } // The number of bytes live wrt the next marking. size_t next_live_bytes() { - return (top() - next_top_at_mark_start()) - * HeapWordSize - + next_marked_bytes(); + return + (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes(); } // A lower bound on the amount of garbage bytes in the region. @@ -518,13 +539,13 @@ containing_set, _containing_set)); _containing_set = containing_set; -} + } HeapRegionSetBase* containing_set() { return _containing_set; } #else // ASSERT void set_containing_set(HeapRegionSetBase* containing_set) { } - // containing_set() is only used in asserts so there's not reason + // containing_set() is only used in asserts so there's no reason // to provide a dummy version of it. #endif // ASSERT @@ -535,14 +556,15 @@ bool pending_removal() { return _pending_removal; } void set_pending_removal(bool pending_removal) { - // We can only set pending_removal to true, if it's false and the - // region belongs to a set. - assert(!pending_removal || - (!_pending_removal && containing_set() != NULL), "pre-condition"); - // We can only set pending_removal to false, if it's true and the - // region does not belong to a set. - assert( pending_removal || - ( _pending_removal && containing_set() == NULL), "pre-condition"); + if (pending_removal) { + assert(!_pending_removal && containing_set() != NULL, + "can only set pending removal to true if it's false and " + "the region belongs to a region set"); + } else { + assert( _pending_removal && containing_set() == NULL, + "can only set pending removal to false if it's true and " + "the region does not belong to a region set"); + } _pending_removal = pending_removal; }
--- a/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -38,15 +38,8 @@ // this is used for larger LAB allocations only. inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { MutexLocker x(&_par_alloc_lock); - // This ought to be just "allocate", because of the lock above, but that - // ContiguousSpace::allocate asserts that either the allocating thread - // holds the heap lock or it is the VM thread and we're at a safepoint. - // The best I (dld) could figure was to put a field in ContiguousSpace - // meaning "locking at safepoint taken care of", and set/reset that - // here. But this will do for now, especially in light of the comment - // above. Perhaps in the future some lock-free manner of keeping the - // coordination. - HeapWord* res = ContiguousSpace::par_allocate(size); + // Given that we take the lock no need to use par_allocate() here. + HeapWord* res = ContiguousSpace::allocate(size); if (res != NULL) { _offsets.alloc_block(res, size); }
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -165,7 +165,7 @@ assert(num_so_far <= num, "post-condition"); if (num_so_far == num) { - // we find enough space for the humongous object + // we found enough space for the humongous object assert(from <= first && first < _regions.length(), "post-condition"); assert(first < curr && (curr - first) == (int) num, "post-condition"); for (int i = first; i < first + (int) num; ++i) {
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -76,7 +76,8 @@ // that are available for allocation. size_t free_suffix(); - // Finds a contiguous set of empty regions of length num. + // Find a contiguous set of empty regions of length num and return + // the index of the first region or -1 if the search was unsuccessful. int find_contiguous(size_t num); // Apply the "doHeapRegion" method of "blk" to all regions in "this",
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -42,7 +42,7 @@ return region_num; } -void HeapRegionSetBase::fill_in_ext_msg(hrl_ext_msg* msg, const char* message) { +void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) { msg->append("[%s] %s " "ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" " "cy: "SIZE_FORMAT" ud: "SIZE_FORMAT, @@ -109,30 +109,30 @@ // for the verification calls. If we do verification without the // appropriate locks and the set changes underneath our feet // verification might fail and send us on a wild goose chase. - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); guarantee(( is_empty() && length() == 0 && region_num() == 0 && total_used_bytes() == 0 && total_capacity_bytes() == 0) || (!is_empty() && length() >= 0 && region_num() >= 0 && total_used_bytes() >= 0 && total_capacity_bytes() >= 0), - hrl_ext_msg(this, "invariant")); + hrs_ext_msg(this, "invariant")); guarantee((!regions_humongous() && region_num() == length()) || ( regions_humongous() && region_num() >= length()), - hrl_ext_msg(this, "invariant")); + hrs_ext_msg(this, "invariant")); guarantee(!regions_empty() || total_used_bytes() == 0, - hrl_ext_msg(this, "invariant")); + hrs_ext_msg(this, "invariant")); guarantee(total_used_bytes() <= total_capacity_bytes(), - hrl_ext_msg(this, "invariant")); + hrs_ext_msg(this, "invariant")); } void HeapRegionSetBase::verify_start() { // See comment in verify() about MT safety and verification. - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); assert(!_verify_in_progress, - hrl_ext_msg(this, "verification should not be in progress")); + hrs_ext_msg(this, "verification should not be in progress")); // Do the basic verification first before we do the checks over the regions. HeapRegionSetBase::verify(); @@ -146,11 +146,11 @@ void HeapRegionSetBase::verify_next_region(HeapRegion* hr) { // See comment in verify() about MT safety and verification. - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); assert(_verify_in_progress, - hrl_ext_msg(this, "verification should be in progress")); + hrs_ext_msg(this, "verification should be in progress")); - guarantee(verify_region(hr, this), hrl_ext_msg(this, "region verification")); + guarantee(verify_region(hr, this), hrs_ext_msg(this, "region verification")); _calc_length += 1; if (!hr->isHumongous()) { @@ -164,28 +164,28 @@ void HeapRegionSetBase::verify_end() { // See comment in verify() about MT safety and verification. - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); assert(_verify_in_progress, - hrl_ext_msg(this, "verification should be in progress")); + hrs_ext_msg(this, "verification should be in progress")); guarantee(length() == _calc_length, - hrl_err_msg("[%s] length: "SIZE_FORMAT" should be == " + hrs_err_msg("[%s] length: "SIZE_FORMAT" should be == " "calc length: "SIZE_FORMAT, name(), length(), _calc_length)); guarantee(region_num() == _calc_region_num, - hrl_err_msg("[%s] region num: "SIZE_FORMAT" should be == " + hrs_err_msg("[%s] region num: "SIZE_FORMAT" should be == " "calc region num: "SIZE_FORMAT, name(), region_num(), _calc_region_num)); guarantee(total_capacity_bytes() == _calc_total_capacity_bytes, - hrl_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == " + hrs_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == " "calc capacity bytes: "SIZE_FORMAT, name(), total_capacity_bytes(), _calc_total_capacity_bytes)); guarantee(total_used_bytes() == _calc_total_used_bytes, - hrl_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == " + hrs_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == " "calc used bytes: "SIZE_FORMAT, name(), total_used_bytes(), _calc_total_used_bytes)); @@ -221,9 +221,9 @@ //////////////////// HeapRegionSet //////////////////// void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) { - hrl_assert_mt_safety_ok(this); - hrl_assert_mt_safety_ok(proxy_set); - hrl_assert_sets_match(this, proxy_set); + hrs_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(proxy_set); + hrs_assert_sets_match(this, proxy_set); verify_optional(); proxy_set->verify_optional(); @@ -231,19 +231,19 @@ if (proxy_set->is_empty()) return; assert(proxy_set->length() <= _length, - hrl_err_msg("[%s] proxy set length: "SIZE_FORMAT" " + hrs_err_msg("[%s] proxy set length: "SIZE_FORMAT" " "should be <= length: "SIZE_FORMAT, name(), proxy_set->length(), _length)); _length -= proxy_set->length(); assert(proxy_set->region_num() <= _region_num, - hrl_err_msg("[%s] proxy set region num: "SIZE_FORMAT" " + hrs_err_msg("[%s] proxy set region num: "SIZE_FORMAT" " "should be <= region num: "SIZE_FORMAT, name(), proxy_set->region_num(), _region_num)); _region_num -= proxy_set->region_num(); assert(proxy_set->total_used_bytes() <= _total_used_bytes, - hrl_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" " + hrs_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" " "should be <= used bytes: "SIZE_FORMAT, name(), proxy_set->total_used_bytes(), _total_used_bytes)); @@ -257,13 +257,52 @@ //////////////////// HeapRegionLinkedList //////////////////// -void HeapRegionLinkedList::fill_in_ext_msg_extra(hrl_ext_msg* msg) { +void HeapRegionLinkedList::fill_in_ext_msg_extra(hrs_ext_msg* msg) { msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail()); } +void HeapRegionLinkedList::add_as_head(HeapRegionLinkedList* from_list) { + hrs_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(from_list); + + verify_optional(); + from_list->verify_optional(); + + if (from_list->is_empty()) return; + +#ifdef ASSERT + HeapRegionLinkedListIterator iter(from_list); + while (iter.more_available()) { + HeapRegion* hr = iter.get_next(); + // In set_containing_set() we check that we either set the value + // from NULL to non-NULL or vice versa to catch bugs. So, we have + // to NULL it first before setting it to the value. + hr->set_containing_set(NULL); + hr->set_containing_set(this); + } +#endif // ASSERT + + if (_head != NULL) { + assert(length() > 0 && _tail != NULL, hrs_ext_msg(this, "invariant")); + from_list->_tail->set_next(_head); + } else { + assert(length() == 0 && _head == NULL, hrs_ext_msg(this, "invariant")); + _tail = from_list->_tail; + } + _head = from_list->_head; + + _length += from_list->length(); + _region_num += from_list->region_num(); + _total_used_bytes += from_list->total_used_bytes(); + from_list->clear(); + + verify_optional(); + from_list->verify_optional(); +} + void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) { - hrl_assert_mt_safety_ok(this); - hrl_assert_mt_safety_ok(from_list); + hrs_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(from_list); verify_optional(); from_list->verify_optional(); @@ -283,10 +322,10 @@ #endif // ASSERT if (_tail != NULL) { - assert(length() > 0 && _head != NULL, hrl_ext_msg(this, "invariant")); + assert(length() > 0 && _head != NULL, hrs_ext_msg(this, "invariant")); _tail->set_next(from_list->_head); } else { - assert(length() == 0 && _head == NULL, hrl_ext_msg(this, "invariant")); + assert(length() == 0 && _head == NULL, hrs_ext_msg(this, "invariant")); _head = from_list->_head; } _tail = from_list->_tail; @@ -301,12 +340,12 @@ } void HeapRegionLinkedList::remove_all() { - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); verify_optional(); HeapRegion* curr = _head; while (curr != NULL) { - hrl_assert_region_ok(this, curr, this); + hrs_assert_region_ok(this, curr, this); HeapRegion* next = curr->next(); curr->set_next(NULL); @@ -319,9 +358,9 @@ } void HeapRegionLinkedList::remove_all_pending(size_t target_count) { - hrl_assert_mt_safety_ok(this); - assert(target_count > 1, hrl_ext_msg(this, "pre-condition")); - assert(!is_empty(), hrl_ext_msg(this, "pre-condition")); + hrs_assert_mt_safety_ok(this); + assert(target_count > 1, hrs_ext_msg(this, "pre-condition")); + assert(!is_empty(), hrs_ext_msg(this, "pre-condition")); verify_optional(); DEBUG_ONLY(size_t old_length = length();) @@ -330,27 +369,27 @@ HeapRegion* prev = NULL; size_t count = 0; while (curr != NULL) { - hrl_assert_region_ok(this, curr, this); + hrs_assert_region_ok(this, curr, this); HeapRegion* next = curr->next(); if (curr->pending_removal()) { assert(count < target_count, - hrl_err_msg("[%s] should not come across more regions " + hrs_err_msg("[%s] should not come across more regions " "pending for removal than target_count: "SIZE_FORMAT, name(), target_count)); if (prev == NULL) { - assert(_head == curr, hrl_ext_msg(this, "invariant")); + assert(_head == curr, hrs_ext_msg(this, "invariant")); _head = next; } else { - assert(_head != curr, hrl_ext_msg(this, "invariant")); + assert(_head != curr, hrs_ext_msg(this, "invariant")); prev->set_next(next); } if (next == NULL) { - assert(_tail == curr, hrl_ext_msg(this, "invariant")); + assert(_tail == curr, hrs_ext_msg(this, "invariant")); _tail = prev; } else { - assert(_tail != curr, hrl_ext_msg(this, "invariant")); + assert(_tail != curr, hrs_ext_msg(this, "invariant")); } curr->set_next(NULL); @@ -371,10 +410,10 @@ } assert(count == target_count, - hrl_err_msg("[%s] count: "SIZE_FORMAT" should be == " + hrs_err_msg("[%s] count: "SIZE_FORMAT" should be == " "target_count: "SIZE_FORMAT, name(), count, target_count)); assert(length() + target_count == old_length, - hrl_err_msg("[%s] new length should be consistent " + hrs_err_msg("[%s] new length should be consistent " "new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" " "target_count: "SIZE_FORMAT, name(), length(), old_length, target_count)); @@ -385,7 +424,7 @@ void HeapRegionLinkedList::verify() { // See comment in HeapRegionSetBase::verify() about MT safety and // verification. - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); // This will also do the basic verification too. verify_start(); @@ -399,7 +438,7 @@ count += 1; guarantee(count < _unrealistically_long_length, - hrl_err_msg("[%s] the calculated length: "SIZE_FORMAT" " + hrs_err_msg("[%s] the calculated length: "SIZE_FORMAT" " "seems very long, is there maybe a cycle? " "curr: "PTR_FORMAT" prev0: "PTR_FORMAT" " "prev1: "PTR_FORMAT" length: "SIZE_FORMAT, @@ -410,7 +449,7 @@ curr = curr->next(); } - guarantee(_tail == prev0, hrl_ext_msg(this, "post-condition")); + guarantee(_tail == prev0, hrs_ext_msg(this, "post-condition")); verify_end(); }
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -28,8 +28,8 @@ #include "gc_implementation/g1/heapRegion.hpp" // Large buffer for some cases where the output might be larger than normal. -#define HRL_ERR_MSG_BUFSZ 512 -typedef FormatBuffer<HRL_ERR_MSG_BUFSZ> hrl_err_msg; +#define HRS_ERR_MSG_BUFSZ 512 +typedef FormatBuffer<HRS_ERR_MSG_BUFSZ> hrs_err_msg; // Set verification will be forced either if someone defines // HEAP_REGION_SET_FORCE_VERIFY to be 1, or in builds in which @@ -45,10 +45,10 @@ // (e.g., length, region num, used bytes sum) plus any shared // functionality (e.g., verification). -class hrl_ext_msg; +class hrs_ext_msg; class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC { - friend class hrl_ext_msg; + friend class hrs_ext_msg; protected: static size_t calculate_region_num(HeapRegion* hr); @@ -104,10 +104,10 @@ virtual bool check_mt_safety() { return true; } // fill_in_ext_msg() writes the the values of the set's attributes - // in the custom err_msg (hrl_ext_msg). fill_in_ext_msg_extra() + // in the custom err_msg (hrs_ext_msg). fill_in_ext_msg_extra() // allows subclasses to append further information. - virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg) { } - void fill_in_ext_msg(hrl_ext_msg* msg, const char* message); + virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg) { } + void fill_in_ext_msg(hrs_ext_msg* msg, const char* message); // It updates the fields of the set to reflect hr being added to // the set. @@ -170,9 +170,9 @@ // the fields of the associated set. This can be very helpful in // diagnosing failures. -class hrl_ext_msg : public hrl_err_msg { +class hrs_ext_msg : public hrs_err_msg { public: - hrl_ext_msg(HeapRegionSetBase* set, const char* message) : hrl_err_msg("") { + hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("") { set->fill_in_ext_msg(this, message); } }; @@ -180,25 +180,25 @@ // These two macros are provided for convenience, to keep the uses of // these two asserts a bit more concise. -#define hrl_assert_mt_safety_ok(_set_) \ +#define hrs_assert_mt_safety_ok(_set_) \ do { \ - assert((_set_)->check_mt_safety(), hrl_ext_msg((_set_), "MT safety")); \ + assert((_set_)->check_mt_safety(), hrs_ext_msg((_set_), "MT safety")); \ } while (0) -#define hrl_assert_region_ok(_set_, _hr_, _expected_) \ +#define hrs_assert_region_ok(_set_, _hr_, _expected_) \ do { \ assert((_set_)->verify_region((_hr_), (_expected_)), \ - hrl_ext_msg((_set_), "region verification")); \ + hrs_ext_msg((_set_), "region verification")); \ } while (0) //////////////////// HeapRegionSet //////////////////// -#define hrl_assert_sets_match(_set1_, _set2_) \ +#define hrs_assert_sets_match(_set1_, _set2_) \ do { \ assert(((_set1_)->regions_humongous() == \ (_set2_)->regions_humongous()) && \ ((_set1_)->regions_empty() == (_set2_)->regions_empty()), \ - hrl_err_msg("the contents of set %s and set %s should match", \ + hrs_err_msg("the contents of set %s and set %s should match", \ (_set1_)->name(), (_set2_)->name())); \ } while (0) @@ -267,7 +267,7 @@ HeapRegion* tail() { return _tail; } protected: - virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg); + virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg); // See the comment for HeapRegionSetBase::clear() virtual void clear(); @@ -277,6 +277,10 @@ } public: + // It adds hr to the list as the new head. The region should not be + // a member of another set. + inline void add_as_head(HeapRegion* hr); + // It adds hr to the list as the new tail. The region should not be // a member of another set. inline void add_as_tail(HeapRegion* hr); @@ -290,6 +294,11 @@ // It moves the regions from from_list to this list and empties // from_list. The new regions will appear in the same order as they + // were in from_list and be linked in the beginning of this list. + void add_as_head(HeapRegionLinkedList* from_list); + + // It moves the regions from from_list to this list and empties + // from_list. The new regions will appear in the same order as they // were in from_list and be linked in the end of this list. void add_as_tail(HeapRegionLinkedList* from_list); @@ -309,10 +318,10 @@ virtual void print_on(outputStream* out, bool print_contents = false); }; -//////////////////// HeapRegionLinkedList //////////////////// +//////////////////// HeapRegionLinkedListIterator //////////////////// -// Iterator class that provides a convenient way to iterator over the -// regions in a HeapRegionLinkedList instance. +// Iterator class that provides a convenient way to iterate over the +// regions of a HeapRegionLinkedList instance. class HeapRegionLinkedListIterator : public StackObj { private:
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -42,8 +42,8 @@ } inline void HeapRegionSetBase::add_internal(HeapRegion* hr) { - hrl_assert_region_ok(this, hr, NULL); - assert(hr->next() == NULL, hrl_ext_msg(this, "should not already be linked")); + hrs_assert_region_ok(this, hr, NULL); + assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked")); update_for_addition(hr); hr->set_containing_set(this); @@ -51,7 +51,7 @@ inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) { // Assumes the caller has already verified the region. - assert(_length > 0, hrl_ext_msg(this, "pre-condition")); + assert(_length > 0, hrs_ext_msg(this, "pre-condition")); _length -= 1; size_t region_num_diff; @@ -61,22 +61,22 @@ region_num_diff = calculate_region_num(hr); } assert(region_num_diff <= _region_num, - hrl_err_msg("[%s] region's region num: "SIZE_FORMAT" " + hrs_err_msg("[%s] region's region num: "SIZE_FORMAT" " "should be <= region num: "SIZE_FORMAT, name(), region_num_diff, _region_num)); _region_num -= region_num_diff; size_t used_bytes = hr->used(); assert(used_bytes <= _total_used_bytes, - hrl_err_msg("[%s] region's used bytes: "SIZE_FORMAT" " + hrs_err_msg("[%s] region's used bytes: "SIZE_FORMAT" " "should be <= used bytes: "SIZE_FORMAT, name(), used_bytes, _total_used_bytes)); _total_used_bytes -= used_bytes; } inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) { - hrl_assert_region_ok(this, hr, this); - assert(hr->next() == NULL, hrl_ext_msg(this, "should already be unlinked")); + hrs_assert_region_ok(this, hr, this); + assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked")); hr->set_containing_set(NULL); update_for_removal(hr); @@ -85,13 +85,13 @@ //////////////////// HeapRegionSet //////////////////// inline void HeapRegionSet::add(HeapRegion* hr) { - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); // add_internal() will verify the region. add_internal(hr); } inline void HeapRegionSet::remove(HeapRegion* hr) { - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); // remove_internal() will verify the region. remove_internal(hr); } @@ -101,8 +101,8 @@ // No need to fo the MT safety check here given that this method // does not update the contents of the set but instead accumulates // the changes in proxy_set which is assumed to be thread-local. - hrl_assert_sets_match(this, proxy_set); - hrl_assert_region_ok(this, hr, this); + hrs_assert_sets_match(this, proxy_set); + hrs_assert_region_ok(this, hr, this); hr->set_containing_set(NULL); proxy_set->update_for_addition(hr); @@ -110,11 +110,28 @@ //////////////////// HeapRegionLinkedList //////////////////// -inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) { - hrl_assert_mt_safety_ok(this); +inline void HeapRegionLinkedList::add_as_head(HeapRegion* hr) { + hrs_assert_mt_safety_ok(this); assert((length() == 0 && _head == NULL && _tail == NULL) || (length() > 0 && _head != NULL && _tail != NULL), - hrl_ext_msg(this, "invariant")); + hrs_ext_msg(this, "invariant")); + // add_internal() will verify the region. + add_internal(hr); + + // Now link the region. + if (_head != NULL) { + hr->set_next(_head); + } else { + _tail = hr; + } + _head = hr; +} + +inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) { + hrs_assert_mt_safety_ok(this); + assert((length() == 0 && _head == NULL && _tail == NULL) || + (length() > 0 && _head != NULL && _tail != NULL), + hrs_ext_msg(this, "invariant")); // add_internal() will verify the region. add_internal(hr); @@ -128,10 +145,10 @@ } inline HeapRegion* HeapRegionLinkedList::remove_head() { - hrl_assert_mt_safety_ok(this); - assert(!is_empty(), hrl_ext_msg(this, "the list should not be empty")); + hrs_assert_mt_safety_ok(this); + assert(!is_empty(), hrs_ext_msg(this, "the list should not be empty")); assert(length() > 0 && _head != NULL && _tail != NULL, - hrl_ext_msg(this, "invariant")); + hrs_ext_msg(this, "invariant")); // We need to unlink it first. HeapRegion* hr = _head; @@ -147,7 +164,7 @@ } inline HeapRegion* HeapRegionLinkedList::remove_head_or_null() { - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); if (!is_empty()) { return remove_head();
--- a/src/share/vm/gc_implementation/g1/heapRegionSets.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/heapRegionSets.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -52,7 +52,7 @@ FreeList_lock->owned_by_self())) || (!SafepointSynchronize::is_at_safepoint() && Heap_lock->owned_by_self()), - hrl_ext_msg(this, "master free list MT safety protocol")); + hrs_ext_msg(this, "master free list MT safety protocol")); return FreeRegionList::check_mt_safety(); } @@ -65,7 +65,7 @@ // while holding the SecondaryFreeList_lock. guarantee(SecondaryFreeList_lock->owned_by_self(), - hrl_ext_msg(this, "secondary free list MT safety protocol")); + hrs_ext_msg(this, "secondary free list MT safety protocol")); return FreeRegionList::check_mt_safety(); } @@ -81,7 +81,7 @@ return HeapRegionSet::verify_region_extra(hr); } -//////////////////// HumongousRegionSet //////////////////// +//////////////////// MasterHumongousRegionSet //////////////////// bool MasterHumongousRegionSet::check_mt_safety() { // Master Humongous Set MT safety protocol: @@ -97,6 +97,6 @@ OldSets_lock->owned_by_self())) || (!SafepointSynchronize::is_at_safepoint() && Heap_lock->owned_by_self()), - hrl_ext_msg(this, "master humongous set MT safety protocol")); + hrs_ext_msg(this, "master humongous set MT safety protocol")); return HumongousRegionSet::check_mt_safety(); }
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1530,13 +1530,15 @@ { if (_ref_processor == NULL) { // Allocate and initialize a reference processor - _ref_processor = ReferenceProcessor::create_ref_processor( - _reserved, // span - refs_discovery_is_atomic(), // atomic_discovery - refs_discovery_is_mt(), // mt_discovery - NULL, // is_alive_non_header - ParallelGCThreads, - ParallelRefProcEnabled); + _ref_processor = + new ReferenceProcessor(_reserved, // span + ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing + (int) ParallelGCThreads, // mt processing degree + refs_discovery_is_mt(), // mt discovery + (int) ParallelGCThreads, // mt discovery degree + refs_discovery_is_atomic(), // atomic_discovery + NULL, // is_alive_non_header + false); // write barrier for next field updates } }
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,9 +58,7 @@ void PSMarkSweep::initialize() { MemRegion mr = Universe::heap()->reserved_region(); - _ref_processor = new ReferenceProcessor(mr, - true, // atomic_discovery - false); // mt_discovery + _ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc _counters = new CollectorCounters("PSMarkSweep", 1); }
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -827,13 +827,15 @@ assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); MemRegion mr = heap->reserved_region(); - _ref_processor = ReferenceProcessor::create_ref_processor( - mr, // span - true, // atomic_discovery - true, // mt_discovery - &_is_alive_closure, - ParallelGCThreads, - ParallelRefProcEnabled); + _ref_processor = + new ReferenceProcessor(mr, // span + ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing + (int) ParallelGCThreads, // mt processing degree + true, // mt discovery + (int) ParallelGCThreads, // mt discovery degree + true, // atomic_discovery + &_is_alive_closure, // non-header is alive closure + false); // write barrier for next field updates _counters = new CollectorCounters("PSParallelCompact", 1); // Initialize static fields in ParCompactionManager.
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -411,7 +411,7 @@ template <class T> void PSPromotionManager::process_array_chunk_work( oop obj, int start, int end) { - assert(start < end, "invariant"); + assert(start <= end, "invariant"); T* const base = (T*)objArrayOop(obj)->base(); T* p = base + start; T* const chunk_end = base + end;
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "classfile/symbolTable.hpp" #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" #include "gc_implementation/parallelScavenge/generationSizer.hpp" @@ -439,6 +440,14 @@ reference_processor()->enqueue_discovered_references(NULL); } + if (!JavaObjectsInPerm) { + // Unlink any dead interned Strings + StringTable::unlink(&_is_alive_closure); + // Process the remaining live ones + PSScavengeRootsClosure root_closure(promotion_manager); + StringTable::oops_do(&root_closure); + } + // Finally, flush the promotion_manager's labs, and deallocate its stacks. PSPromotionManager::post_scavenge(); @@ -796,13 +805,15 @@ // Initialize ref handling object for scavenging. MemRegion mr = young_gen->reserved(); - _ref_processor = ReferenceProcessor::create_ref_processor( - mr, // span - true, // atomic_discovery - true, // mt_discovery - NULL, // is_alive_non_header - ParallelGCThreads, - ParallelRefProcEnabled); + _ref_processor = + new ReferenceProcessor(mr, // span + ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing + (int) ParallelGCThreads, // mt processing degree + true, // mt discovery + (int) ParallelGCThreads, // mt discovery degree + true, // atomic_discovery + NULL, // header provides liveness info + false); // next field updates do not need write barrier // Cache the cardtable BarrierSet* bs = Universe::heap()->barrier_set();
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,4 +86,21 @@ } } +class PSScavengeRootsClosure: public OopClosure { + private: + PSPromotionManager* _promotion_manager; + + protected: + template <class T> void do_oop_work(T *p) { + if (PSScavenge::should_scavenge(p)) { + // We never card mark roots, maybe call a func without test? + PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); + } + } + public: + PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { } + void do_oop(oop* p) { PSScavengeRootsClosure::do_oop_work(p); } + void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); } +}; + #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSSCAVENGE_INLINE_HPP
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" -#include "gc_implementation/parallelScavenge/psScavenge.hpp" +#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" #include "gc_implementation/parallelScavenge/psTasks.hpp" #include "memory/iterator.hpp" #include "memory/universe.hpp" @@ -46,24 +46,6 @@ // ScavengeRootsTask // -// Define before use -class PSScavengeRootsClosure: public OopClosure { - private: - PSPromotionManager* _promotion_manager; - - protected: - template <class T> void do_oop_work(T *p) { - if (PSScavenge::should_scavenge(p)) { - // We never card mark roots, maybe call a func without test? - PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); - } - } - public: - PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { } - void do_oop(oop* p) { PSScavengeRootsClosure::do_oop_work(p); } - void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); } -}; - void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) { assert(Universe::heap()->is_gc_active(), "called outside gc");
--- a/src/share/vm/gc_implementation/shared/allocationStats.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/gcUtil.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/shared/gcUtil.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_implementation/shared/markSweep.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_interface/collectedHeap.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/gc_interface/collectedHeap.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/abstractInterpreter.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/abstractInterpreter.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,7 +96,7 @@ empty, // empty method (code: _return) accessor, // accessor method (code: _aload_0, _getfield, _(a|i)return) abstract, // abstract method (throws an AbstractMethodException) - method_handle, // java.dyn.MethodHandles::invoke + method_handle, // java.lang.invoke.MethodHandles::invoke java_lang_math_sin, // implementation of java.lang.Math.sin (x) java_lang_math_cos, // implementation of java.lang.Math.cos (x) java_lang_math_tan, // implementation of java.lang.Math.tan (x)
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -554,7 +554,7 @@ /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic, /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial, -/* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_default, &&opc_new, +/* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new, /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow, /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, @@ -568,7 +568,7 @@ /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, -/* 0xE4 */ &&opc_default, &&opc_return_register_finalizer, &&opc_default, &&opc_default, +/* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, /* 0xE8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, @@ -656,7 +656,7 @@ // oop rcvr = locals[0].j.r; oop rcvr; if (METHOD->is_static()) { - rcvr = METHOD->constants()->pool_holder()->klass_part()->java_mirror(); + rcvr = METHOD->constants()->pool_holder()->java_mirror(); } else { rcvr = LOCALS_OBJECT(0); VERIFY_OOP(rcvr); @@ -1718,8 +1718,7 @@ } // Need to throw illegal monitor state exception CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception); - // Should never reach here... - assert(false, "Should have thrown illegal monitor exception"); + ShouldNotReachHere(); } /* All of the non-quick opcodes. */ @@ -2111,8 +2110,8 @@ break; case JVM_CONSTANT_Class: - VERIFY_OOP(constants->resolved_klass_at(index)->klass_part()->java_mirror()); - SET_STACK_OBJECT(constants->resolved_klass_at(index)->klass_part()->java_mirror(), 0); + VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); + SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); break; case JVM_CONSTANT_UnresolvedString: @@ -2147,6 +2146,74 @@ UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2); } + CASE(_fast_aldc_w): + CASE(_fast_aldc): { + if (!EnableInvokeDynamic) { + // We should not encounter this bytecode if !EnableInvokeDynamic. + // The verifier will stop it. However, if we get past the verifier, + // this will stop the thread in a reasonable way, without crashing the JVM. + CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD), + handle_exception); + ShouldNotReachHere(); + } + + u2 index; + int incr; + if (opcode == Bytecodes::_fast_aldc) { + index = pc[1]; + incr = 2; + } else { + index = Bytes::get_native_u2(pc+1); + incr = 3; + } + + // We are resolved if the f1 field contains a non-null object (CallSite, etc.) + // This kind of CP cache entry does not need to match the flags byte, because + // there is a 1-1 relation between bytecode type and CP entry type. + ConstantPoolCacheEntry* cache = cp->entry_at(index); + if (cache->is_f1_null()) { + CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), + handle_exception); + } + + VERIFY_OOP(cache->f1()); + SET_STACK_OBJECT(cache->f1(), 0); + UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); + } + + CASE(_invokedynamic): { + if (!EnableInvokeDynamic) { + // We should not encounter this bytecode if !EnableInvokeDynamic. + // The verifier will stop it. However, if we get past the verifier, + // this will stop the thread in a reasonable way, without crashing the JVM. + CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD), + handle_exception); + ShouldNotReachHere(); + } + + int index = Bytes::get_native_u4(pc+1); + + // We are resolved if the f1 field contains a non-null object (CallSite, etc.) + // This kind of CP cache entry does not need to match the flags byte, because + // there is a 1-1 relation between bytecode type and CP entry type. + assert(constantPoolCacheOopDesc::is_secondary_index(index), "incorrect format"); + ConstantPoolCacheEntry* cache = cp->secondary_entry_at(index); + if (cache->is_f1_null()) { + CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD), + handle_exception); + } + + VERIFY_OOP(cache->f1()); + oop method_handle = java_lang_invoke_CallSite::target(cache->f1()); + CHECK_NULL(method_handle); + + istate->set_msg(call_method_handle); + istate->set_callee((methodOop) method_handle); + istate->set_bcp_advance(5); + + UPDATE_PC_AND_RETURN(0); // I'll be back... + } + CASE(_invokeinterface): { u2 index = Bytes::get_native_u2(pc+1); @@ -2383,17 +2450,6 @@ } DEFAULT: -#ifdef ZERO - // Some zero configurations use the C++ interpreter as a - // fallback interpreter and have support for platform - // specific fast bytecodes which aren't supported here, so - // redispatch to the equivalent non-fast bytecode when they - // are encountered. - if (Bytecodes::is_defined((Bytecodes::Code)opcode)) { - opcode = (jubyte)Bytecodes::java_code((Bytecodes::Code)opcode); - goto opcode_switch; - } -#endif fatal(err_msg("Unimplemented opcode %d = %s", opcode, Bytecodes::name((Bytecodes::Code)opcode))); goto finish;
--- a/src/share/vm/interpreter/bytecodeInterpreter.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/bytecodeInterpreter.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -107,6 +107,7 @@ rethrow_exception, // unwinding and throwing exception // requests to frame manager from C++ interpreter call_method, // request for new frame from interpreter, manager responds with method_entry + call_method_handle, // like the above, except the callee is a method handle return_from_method, // request from interpreter to unwind, manager responds with method_continue more_monitors, // need a new monitor throwing_exception, // unwind stack and rethrow
--- a/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/bytecodeTracer.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/bytecodeTracer.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -345,7 +345,6 @@ break; case JVM_CONSTANT_NameAndType: case JVM_CONSTANT_InvokeDynamic: - case JVM_CONSTANT_InvokeDynamicTrans: has_klass = false; break; default:
--- a/src/share/vm/interpreter/cppInterpreter.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/cppInterpreter.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/cppInterpreterGenerator.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/cppInterpreterGenerator.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/interpreter.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/interpreter.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/interpreterGenerator.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/interpreterGenerator.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/interpreterRuntime.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -118,7 +118,7 @@ if (tag.is_unresolved_klass() || tag.is_klass()) { klassOop klass = pool->klass_at(index, CHECK); - oop java_class = klass->klass_part()->java_mirror(); + oop java_class = klass->java_mirror(); thread->set_vm_result(java_class); } else { #ifdef ASSERT @@ -369,7 +369,7 @@ } // create exception - THROW_MSG(vmSymbols::java_dyn_WrongMethodTypeException(), message); + THROW_MSG(vmSymbols::java_lang_invoke_WrongMethodTypeException(), message); } IRT_END @@ -794,7 +794,7 @@ Handle info; // optional argument(s) in JVM_CONSTANT_InvokeDynamic Handle bootm = SystemDictionary::find_bootstrap_method(caller_method, caller_bci, main_index, info, CHECK); - if (!java_dyn_MethodHandle::is_instance(bootm())) { + if (!java_lang_invoke_MethodHandle::is_instance(bootm())) { THROW_MSG(vmSymbols::java_lang_IllegalStateException(), "no bootstrap method found for invokedynamic"); } @@ -980,7 +980,8 @@ ConstantPoolCacheEntry *cp_entry)) // check the access_flags for the field in the klass - instanceKlass* ik = instanceKlass::cast((klassOop)cp_entry->f1()); + + instanceKlass* ik = instanceKlass::cast(java_lang_Class::as_klassOop(cp_entry->f1())); typeArrayOop fields = ik->fields(); int index = cp_entry->field_index(); assert(index < fields->length(), "holders field index is out of range"); @@ -1006,7 +1007,7 @@ // non-static field accessors have an object, but we need a handle h_obj = Handle(thread, obj); } - instanceKlassHandle h_cp_entry_f1(thread, (klassOop)cp_entry->f1()); + instanceKlassHandle h_cp_entry_f1(thread, java_lang_Class::as_klassOop(cp_entry->f1())); jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_cp_entry_f1, cp_entry->f2(), is_static); JvmtiExport::post_field_access(thread, method(thread), bcp(thread), h_cp_entry_f1, h_obj, fid); IRT_END @@ -1014,7 +1015,7 @@ IRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread *thread, oopDesc* obj, ConstantPoolCacheEntry *cp_entry, jvalue *value)) - klassOop k = (klassOop)cp_entry->f1(); + klassOop k = java_lang_Class::as_klassOop(cp_entry->f1()); // check the access_flags for the field in the klass instanceKlass* ik = instanceKlass::cast(k);
--- a/src/share/vm/interpreter/linkResolver.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/linkResolver.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -176,7 +176,7 @@ void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) { methodOop result_oop = klass->uncached_lookup_method(name, signature); - if (EnableMethodHandles && result_oop != NULL) { + if (EnableInvokeDynamic && result_oop != NULL) { switch (result_oop->intrinsic_id()) { case vmIntrinsics::_invokeExact: case vmIntrinsics::_invokeGeneric: @@ -214,14 +214,14 @@ KlassHandle klass, Symbol* name, Symbol* signature, KlassHandle current_klass, TRAPS) { - if (EnableMethodHandles && + if (EnableInvokeDynamic && klass() == SystemDictionary::MethodHandle_klass() && methodOopDesc::is_method_handle_invoke_name(name)) { - if (!MethodHandles::enabled()) { + if (!THREAD->is_Compiler_thread() && !MethodHandles::enabled()) { // Make sure the Java part of the runtime has been booted up. klassOop natives = SystemDictionary::MethodHandleNatives_klass(); if (natives == NULL || instanceKlass::cast(natives)->is_not_initialized()) { - SystemDictionary::resolve_or_fail(vmSymbols::sun_dyn_MethodHandleNatives(), + SystemDictionary::resolve_or_fail(vmSymbols::java_lang_invoke_MethodHandleNatives(), Handle(), Handle(), true, @@ -298,7 +298,7 @@ } void LinkResolver::resolve_dynamic_method(methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS) { - // The class is java.dyn.MethodHandle + // The class is java.lang.invoke.MethodHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass(); Symbol* method_name = vmSymbols::invokeExact_name();
--- a/src/share/vm/interpreter/linkResolver.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/linkResolver.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/rewriter.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/rewriter.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -52,7 +52,6 @@ case JVM_CONSTANT_MethodHandle : // fall through case JVM_CONSTANT_MethodType : // fall through case JVM_CONSTANT_InvokeDynamic : // fall through - case JVM_CONSTANT_InvokeDynamicTrans: // fall through add_cp_cache_entry(i); break; } @@ -62,7 +61,6 @@ "all cp cache indexes fit in a u2"); _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0); - _have_invoke_dynamic |= ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamicTrans)) != 0); } @@ -81,16 +79,10 @@ if (pool_index >= 0 && _pool->tag_at(pool_index).is_invoke_dynamic()) { int bsm_index = _pool->invoke_dynamic_bootstrap_method_ref_index_at(pool_index); - if (bsm_index != 0) { - assert(_pool->tag_at(bsm_index).is_method_handle(), "must be a MH constant"); - // There is a CP cache entry holding the BSM for these calls. - int bsm_cache_index = cp_entry_to_cp_cache(bsm_index); - cache->entry_at(i)->initialize_bootstrap_method_index_in_cache(bsm_cache_index); - } else { - // There is no CP cache entry holding the BSM for these calls. - // We will need to look for a class-global BSM, later. - guarantee(AllowTransitionalJSR292, ""); - } + assert(_pool->tag_at(bsm_index).is_method_handle(), "must be a MH constant"); + // There is a CP cache entry holding the BSM for these calls. + int bsm_cache_index = cp_entry_to_cp_cache(bsm_index); + cache->entry_at(i)->initialize_bootstrap_method_index_in_cache(bsm_cache_index); } } }
--- a/src/share/vm/interpreter/templateInterpreter.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/templateInterpreter.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/templateInterpreterGenerator.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/templateInterpreterGenerator.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/templateTable.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/interpreter/templateTable.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/allocation.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/allocation.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -422,6 +422,9 @@ return sum; // Return total consumed space. } +void Arena::signal_out_of_memory(size_t sz, const char* whence) const { + vm_exit_out_of_memory(sz, whence); +} // Grow a new Chunk void* Arena::grow( size_t x ) { @@ -431,8 +434,9 @@ Chunk *k = _chunk; // Get filled-up chunk address _chunk = new (len) Chunk(len); - if (_chunk == NULL) - vm_exit_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); + if (_chunk == NULL) { + signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); + } if (k) k->set_next(_chunk); // Append new chunk to end of linked list else _first = _chunk; @@ -529,6 +533,7 @@ // for debugging with UseMallocOnly void* Arena::internal_malloc_4(size_t x) { assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); + check_for_overflow(x, "Arena::internal_malloc_4"); if (_hwm + x > _max) { return grow(x); } else {
--- a/src/share/vm/memory/allocation.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/allocation.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -207,6 +207,15 @@ debug_only(void* malloc(size_t size);) debug_only(void* internal_malloc_4(size_t x);) NOT_PRODUCT(void inc_bytes_allocated(size_t x);) + + void signal_out_of_memory(size_t request, const char* whence) const; + + void check_for_overflow(size_t request, const char* whence) const { + if (UINTPTR_MAX - request < (uintptr_t)_hwm) { + signal_out_of_memory(request, whence); + } + } + public: Arena(); Arena(size_t init_size); @@ -220,6 +229,7 @@ assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); x = ARENA_ALIGN(x); debug_only(if (UseMallocOnly) return malloc(x);) + check_for_overflow(x, "Arena::Amalloc"); NOT_PRODUCT(inc_bytes_allocated(x);) if (_hwm + x > _max) { return grow(x); @@ -233,6 +243,7 @@ void *Amalloc_4(size_t x) { assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); debug_only(if (UseMallocOnly) return malloc(x);) + check_for_overflow(x, "Arena::Amalloc_4"); NOT_PRODUCT(inc_bytes_allocated(x);) if (_hwm + x > _max) { return grow(x); @@ -253,6 +264,7 @@ size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; x += delta; #endif + check_for_overflow(x, "Arena::Amalloc_D"); NOT_PRODUCT(inc_bytes_allocated(x);) if (_hwm + x > _max) { return grow(x); // grow() returns a result aligned >= 8 bytes.
--- a/src/share/vm/memory/barrierSet.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/barrierSet.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,9 +35,9 @@ start, count); #endif if (UseCompressedOops) { - Universe::heap()->barrier_set()->write_ref_array_pre((narrowOop*)start, (int)count); + Universe::heap()->barrier_set()->write_ref_array_pre((narrowOop*)start, (int)count, false); } else { - Universe::heap()->barrier_set()->write_ref_array_pre( (oop*)start, (int)count); + Universe::heap()->barrier_set()->write_ref_array_pre( (oop*)start, (int)count, false); } }
--- a/src/share/vm/memory/barrierSet.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/barrierSet.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,6 +44,10 @@ Uninit }; + enum Flags { + None = 0, + TargetUninitialized = 1 + }; protected: int _max_covered_regions; Name _kind; @@ -128,8 +132,10 @@ virtual void read_prim_array(MemRegion mr) = 0; // Below length is the # array elements being written - virtual void write_ref_array_pre( oop* dst, int length) {} - virtual void write_ref_array_pre(narrowOop* dst, int length) {} + virtual void write_ref_array_pre(oop* dst, int length, + bool dest_uninitialized = false) {} + virtual void write_ref_array_pre(narrowOop* dst, int length, + bool dest_uninitialized = false) {} // Below count is the # array elements being written, starting // at the address "start", which may not necessarily be HeapWord-aligned inline void write_ref_array(HeapWord* start, size_t count);
--- a/src/share/vm/memory/cardTableModRefBS.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/cardTableModRefBS.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -382,6 +382,11 @@ return (addr_for(pcard) == p); } + HeapWord* align_to_card_boundary(HeapWord* p) { + jbyte* pcard = byte_for(p + card_size_in_words - 1); + return addr_for(pcard); + } + // The kinds of precision a CardTableModRefBS may offer. enum PrecisionStyle { Precise,
--- a/src/share/vm/memory/cardTableRS.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/cardTableRS.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -318,17 +318,28 @@ protected: template <class T> void do_oop_work(T* p) { HeapWord* jp = (HeapWord*)p; - if (jp >= _begin && jp < _end) { - oop obj = oopDesc::load_decode_heap_oop(p); - guarantee(obj == NULL || - (HeapWord*)p < _boundary || - (HeapWord*)obj >= _boundary, - "pointer on clean card crosses boundary"); - } + assert(jp >= _begin && jp < _end, + err_msg("Error: jp " PTR_FORMAT " should be within " + "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")", + _begin, _end)); + oop obj = oopDesc::load_decode_heap_oop(p); + guarantee(obj == NULL || (HeapWord*)obj >= _boundary, + err_msg("pointer " PTR_FORMAT " at " PTR_FORMAT " on " + "clean card crosses boundary" PTR_FORMAT, + (HeapWord*)obj, jp, _boundary)); } + public: VerifyCleanCardClosure(HeapWord* b, HeapWord* begin, HeapWord* end) : - _boundary(b), _begin(begin), _end(end) {} + _boundary(b), _begin(begin), _end(end) { + assert(b <= begin, + err_msg("Error: boundary " PTR_FORMAT " should be at or below begin " PTR_FORMAT, + b, begin)); + assert(begin <= end, + err_msg("Error: begin " PTR_FORMAT " should be strictly below end " PTR_FORMAT, + begin, end)); + } + virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); } virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); } }; @@ -392,13 +403,14 @@ } } // Now traverse objects until end. - HeapWord* cur = start_block; - VerifyCleanCardClosure verify_blk(gen_boundary, begin, end); - while (cur < end) { - if (s->block_is_obj(cur) && s->obj_is_alive(cur)) { - oop(cur)->oop_iterate(&verify_blk); + if (begin < end) { + MemRegion mr(begin, end); + VerifyCleanCardClosure verify_blk(gen_boundary, begin, end); + for (HeapWord* cur = start_block; cur < end; cur += s->block_size(cur)) { + if (s->block_is_obj(cur) && s->obj_is_alive(cur)) { + oop(cur)->oop_iterate(&verify_blk, mr); + } } - cur += s->block_size(cur); } cur_entry = first_dirty; } else {
--- a/src/share/vm/memory/classify.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/classify.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/collectorPolicy.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/collectorPolicy.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -293,10 +293,11 @@ // Determine maximum size of gen0 size_t max_new_size = 0; - if (FLAG_IS_CMDLINE(MaxNewSize)) { + if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) { if (MaxNewSize < min_alignment()) { max_new_size = min_alignment(); - } else if (MaxNewSize >= max_heap_byte_size()) { + } + if (MaxNewSize >= max_heap_byte_size()) { max_new_size = align_size_down(max_heap_byte_size() - min_alignment(), min_alignment()); warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " @@ -333,7 +334,7 @@ assert(max_new_size > 0, "All paths should set max_new_size"); // Given the maximum gen0 size, determine the initial and - // minimum sizes. + // minimum gen0 sizes. if (max_heap_byte_size() == min_heap_byte_size()) { // The maximum and minimum heap sizes are the same so @@ -396,7 +397,7 @@ } if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 " + gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, min_gen0_size(), initial_gen0_size(), max_gen0_size()); } @@ -448,7 +449,7 @@ // At this point the minimum, initial and maximum sizes // of the overall heap and of gen0 have been determined. // The maximum gen1 size can be determined from the maximum gen0 - // and maximum heap size since not explicit flags exits + // and maximum heap size since no explicit flags exits // for setting the gen1 maximum. _max_gen1_size = max_heap_byte_size() - _max_gen0_size; _max_gen1_size = @@ -494,13 +495,13 @@ "generation sizes: using maximum heap = " SIZE_FORMAT " -XX:OldSize flag is being ignored", max_heap_byte_size()); - } + } // If there is an inconsistency between the OldSize and the minimum and/or // initial size of gen0, since OldSize was explicitly set, OldSize wins. if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, min_heap_byte_size(), OldSize)) { if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 " + gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, min_gen0_size(), initial_gen0_size(), max_gen0_size()); } @@ -509,7 +510,7 @@ if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, initial_heap_byte_size(), OldSize)) { if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 " + gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, min_gen0_size(), initial_gen0_size(), max_gen0_size()); }
--- a/src/share/vm/memory/compactingPermGenGen.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/compactingPermGenGen.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/compactingPermGenGen.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/compactingPermGenGen.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -105,7 +105,7 @@ public: enum { - vtbl_list_size = 16, // number of entries in the shared space vtable list. + vtbl_list_size = 17, // number of entries in the shared space vtable list. num_virtuals = 200 // number of virtual methods in Klass (or // subclass) objects, or greater. };
--- a/src/share/vm/memory/dump.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/dump.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,16 +80,7 @@ oop obj = *p; if (obj->klass() == SystemDictionary::String_klass()) { - int hash; - typeArrayOop value = java_lang_String::value(obj); - int length = java_lang_String::length(obj); - if (length == 0) { - hash = 0; - } else { - int offset = java_lang_String::offset(obj); - jchar* s = value->char_at_addr(offset); - hash = StringTable::hash_string(s, length); - } + int hash = java_lang_String::hash_string(obj); obj->int_field_put(hash_offset, hash); } } @@ -1561,6 +1552,7 @@ // thread because it requires object allocation. LinkClassesClosure lcc(Thread::current()); object_iterate(&lcc); + ensure_parsability(false); // arg is actually don't care tty->print_cr("done. "); // Create and dump the shared spaces.
--- a/src/share/vm/memory/genCollectedHeap.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/genCollectedHeap.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/genMarkSweep.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/genMarkSweep.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/generation.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/generation.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,14 +83,11 @@ } // By default we get a single threaded default reference processor; -// generations needing multi-threaded refs discovery override this method. +// generations needing multi-threaded refs processing or discovery override this method. void Generation::ref_processor_init() { assert(_ref_processor == NULL, "a reference processor already exists"); assert(!_reserved.is_empty(), "empty generation?"); - _ref_processor = - new ReferenceProcessor(_reserved, // span - refs_discovery_is_atomic(), // atomic_discovery - refs_discovery_is_mt()); // mt_discovery + _ref_processor = new ReferenceProcessor(_reserved); // a vanilla reference processor if (_ref_processor == NULL) { vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); }
--- a/src/share/vm/memory/heap.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/heap.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -316,12 +316,19 @@ } size_t CodeHeap::largest_free_block() const { + // First check unused space excluding free blocks. + size_t free_sz = size(_free_segments); + size_t unused = max_capacity() - allocated_capacity() - free_sz; + if (unused >= free_sz) + return unused; + + // Now check largest free block. size_t len = 0; for (FreeBlock* b = _freelist; b != NULL; b = b->link()) { if (b->length() > len) len = b->length(); } - return size(len); + return MAX2(unused, size(len)); } // Free list management
--- a/src/share/vm/memory/heapInspection.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/heapInspection.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/iterator.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/iterator.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/oopFactory.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/oopFactory.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -117,12 +117,12 @@ } -klassOop oopFactory::new_instanceKlass(int vtable_len, int itable_len, +klassOop oopFactory::new_instanceKlass(Symbol* name, int vtable_len, int itable_len, int static_field_size, unsigned int nonstatic_oop_map_count, ReferenceType rt, TRAPS) { instanceKlassKlass* ikk = instanceKlassKlass::cast(Universe::instanceKlassKlassObj()); - return ikk->allocate_instance_klass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_count, rt, CHECK_NULL); + return ikk->allocate_instance_klass(name, vtable_len, itable_len, static_field_size, nonstatic_oop_map_count, rt, CHECK_NULL); }
--- a/src/share/vm/memory/oopFactory.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/oopFactory.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,7 +72,8 @@ TRAPS); // Instance classes - static klassOop new_instanceKlass(int vtable_len, int itable_len, + static klassOop new_instanceKlass(Symbol* name, + int vtable_len, int itable_len, int static_field_size, unsigned int nonstatic_oop_map_count, ReferenceType rt, TRAPS);
--- a/src/share/vm/memory/referenceProcessor.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/referenceProcessor.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -102,40 +102,17 @@ "Unrecongnized RefDiscoveryPolicy"); } -ReferenceProcessor* -ReferenceProcessor::create_ref_processor(MemRegion span, - bool atomic_discovery, - bool mt_discovery, - BoolObjectClosure* is_alive_non_header, - int parallel_gc_threads, - bool mt_processing, - bool dl_needs_barrier) { - int mt_degree = 1; - if (parallel_gc_threads > 1) { - mt_degree = parallel_gc_threads; - } - ReferenceProcessor* rp = - new ReferenceProcessor(span, atomic_discovery, - mt_discovery, mt_degree, - mt_processing && (parallel_gc_threads > 0), - dl_needs_barrier); - if (rp == NULL) { - vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); - } - rp->set_is_alive_non_header(is_alive_non_header); - rp->setup_policy(false /* default soft ref policy */); - return rp; -} - ReferenceProcessor::ReferenceProcessor(MemRegion span, - bool atomic_discovery, + bool mt_processing, + int mt_processing_degree, bool mt_discovery, - int mt_degree, - bool mt_processing, + int mt_discovery_degree, + bool atomic_discovery, + BoolObjectClosure* is_alive_non_header, bool discovered_list_needs_barrier) : _discovering_refs(false), _enqueuing_is_done(false), - _is_alive_non_header(NULL), + _is_alive_non_header(is_alive_non_header), _discovered_list_needs_barrier(discovered_list_needs_barrier), _bs(NULL), _processing_is_mt(mt_processing), @@ -144,8 +121,8 @@ _span = span; _discovery_is_atomic = atomic_discovery; _discovery_is_mt = mt_discovery; - _num_q = mt_degree; - _max_num_q = mt_degree; + _num_q = MAX2(1, mt_processing_degree); + _max_num_q = MAX2(_num_q, mt_discovery_degree); _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref); if (_discoveredSoftRefs == NULL) { vm_exit_during_initialization("Could not allocated RefProc Array"); @@ -163,6 +140,7 @@ if (discovered_list_needs_barrier) { _bs = Universe::heap()->barrier_set(); } + setup_policy(false /* default soft ref policy */); } #ifndef PRODUCT @@ -405,15 +383,14 @@ { } virtual void work(unsigned int work_id) { - assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds"); + assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); // Simplest first cut: static partitioning. int index = work_id; // The increment on "index" must correspond to the maximum number of queues // (n_queues) with which that ReferenceProcessor was created. That // is because of the "clever" way the discovered references lists were - // allocated and are indexed into. That number is ParallelGCThreads - // currently. Assert that. - assert(_n_queues == (int) ParallelGCThreads, "Different number not expected"); + // allocated and are indexed into. + assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); for (int j = 0; j < subclasses_of_ref; j++, index += _n_queues) { @@ -672,7 +649,7 @@ } } NOT_PRODUCT( - if (PrintGCDetails && TraceReferenceGC) { + if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " "Refs in discovered list " INTPTR_FORMAT, iter.removed(), iter.processed(), (address)refs_list.head()); @@ -711,7 +688,7 @@ // Now close the newly reachable set complete_gc->do_void(); NOT_PRODUCT( - if (PrintGCDetails && TraceReferenceGC) { + if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " "Refs in discovered list " INTPTR_FORMAT, iter.removed(), iter.processed(), (address)refs_list.head()); @@ -951,7 +928,7 @@ } if (PrintReferenceGC && PrintGCDetails) { size_t total = 0; - for (int i = 0; i < _num_q; ++i) { + for (int i = 0; i < _max_num_q; ++i) { total += refs_lists[i].length(); } gclog_or_tty->print(", %u refs", total); @@ -967,7 +944,7 @@ RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); task_executor->execute(phase1); } else { - for (int i = 0; i < _num_q; i++) { + for (int i = 0; i < _max_num_q; i++) { process_phase1(refs_lists[i], policy, is_alive, keep_alive, complete_gc); } @@ -983,7 +960,7 @@ RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); task_executor->execute(phase2); } else { - for (int i = 0; i < _num_q; i++) { + for (int i = 0; i < _max_num_q; i++) { process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); } } @@ -994,7 +971,7 @@ RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); task_executor->execute(phase3); } else { - for (int i = 0; i < _num_q; i++) { + for (int i = 0; i < _max_num_q; i++) { process_phase3(refs_lists[i], clear_referent, is_alive, keep_alive, complete_gc); } @@ -1008,7 +985,7 @@ // for (int j = 0; j < _num_q; j++) { // int index = i * _max_num_q + j; for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { - if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) { + if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { gclog_or_tty->print_cr( "\nScrubbing %s discovered list of Null referents", list_name(i)); @@ -1350,7 +1327,7 @@ { TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); - for (int i = 0; i < _num_q; i++) { + for (int i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; } @@ -1363,7 +1340,7 @@ { TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); - for (int i = 0; i < _num_q; i++) { + for (int i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; } @@ -1376,7 +1353,7 @@ { TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); - for (int i = 0; i < _num_q; i++) { + for (int i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; } @@ -1433,7 +1410,7 @@ complete_gc->do_void(); NOT_PRODUCT( - if (PrintGCDetails && PrintReferenceGC) { + if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) { gclog_or_tty->print_cr(" Dropped %d Refs out of %d " "Refs in discovered list " INTPTR_FORMAT, iter.removed(), iter.processed(), (address)refs_list.head());
--- a/src/share/vm/memory/referenceProcessor.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/referenceProcessor.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,7 +71,7 @@ bool _enqueuing_is_done; // true if all weak references enqueued bool _processing_is_mt; // true during phases when // reference processing is MT. - int _next_id; // round-robin counter in + int _next_id; // round-robin mod _num_q counter in // support of work distribution // For collectors that do not keep GC marking information @@ -103,7 +103,8 @@ public: int num_q() { return _num_q; } - void set_mt_degree(int v) { _num_q = v; } + int max_num_q() { return _max_num_q; } + void set_active_mt_degree(int v) { _num_q = v; } DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } static oop sentinel_ref() { return _sentinelRef; } static oop* adr_sentinel_ref() { return &_sentinelRef; } @@ -216,6 +217,7 @@ VoidClosure* complete_gc, YieldClosure* yield); + // round-robin mod _num_q (not: _not_ mode _max_num_q) int next_id() { int id = _next_id; if (++_next_id == _num_q) { @@ -256,23 +258,15 @@ _max_num_q(0), _processing_is_mt(false), _next_id(0) - {} - - ReferenceProcessor(MemRegion span, bool atomic_discovery, - bool mt_discovery, - int mt_degree = 1, - bool mt_processing = false, - bool discovered_list_needs_barrier = false); + { } - // Allocates and initializes a reference processor. - static ReferenceProcessor* create_ref_processor( - MemRegion span, - bool atomic_discovery, - bool mt_discovery, - BoolObjectClosure* is_alive_non_header = NULL, - int parallel_gc_threads = 1, - bool mt_processing = false, - bool discovered_list_needs_barrier = false); + // Default parameters give you a vanilla reference processor. + ReferenceProcessor(MemRegion span, + bool mt_processing = false, int mt_processing_degree = 1, + bool mt_discovery = false, int mt_discovery_degree = 1, + bool atomic_discovery = true, + BoolObjectClosure* is_alive_non_header = NULL, + bool discovered_list_needs_barrier = false); // RefDiscoveryPolicy values enum DiscoveryPolicy { @@ -397,20 +391,20 @@ // A utility class to temporarily change the MT'ness of // reference discovery for the given ReferenceProcessor // in the scope that contains it. -class ReferenceProcessorMTMutator: StackObj { +class ReferenceProcessorMTDiscoveryMutator: StackObj { private: ReferenceProcessor* _rp; bool _saved_mt; public: - ReferenceProcessorMTMutator(ReferenceProcessor* rp, - bool mt): + ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp, + bool mt): _rp(rp) { _saved_mt = _rp->discovery_is_mt(); _rp->set_mt_discovery(mt); } - ~ReferenceProcessorMTMutator() { + ~ReferenceProcessorMTDiscoveryMutator() { _rp->set_mt_discovery(_saved_mt); } };
--- a/src/share/vm/memory/restore.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/restore.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/serialize.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/serialize.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/sharedHeap.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/sharedHeap.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -171,11 +171,13 @@ } if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) { - if (so & SO_Strings) { - StringTable::oops_do(roots); - } - // Verify if the string table contents are in the perm gen - NOT_PRODUCT(StringTable::oops_do(&assert_is_perm_closure)); + if (so & SO_Strings || (!collecting_perm_gen && !JavaObjectsInPerm)) { + StringTable::oops_do(roots); + } + if (JavaObjectsInPerm) { + // Verify the string table contents are in the perm gen + NOT_PRODUCT(StringTable::oops_do(&assert_is_perm_closure)); + } } if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
--- a/src/share/vm/memory/space.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/space.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -818,9 +818,14 @@ // This version requires locking. inline HeapWord* ContiguousSpace::allocate_impl(size_t size, HeapWord* const end_value) { + // In G1 there are places where a GC worker can allocates into a + // region using this serial allocation code without being prone to a + // race with other GC workers (we ensure that no other GC worker can + // access the same region at the same time). So the assert below is + // too strong in the case of G1. assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && - Thread::current()->is_VM_thread()), + (Thread::current()->is_VM_thread() || UseG1GC)), "not locked"); HeapWord* obj = top(); if (pointer_delta(end_value, obj) >= size) {
--- a/src/share/vm/memory/universe.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/universe.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,6 +51,7 @@ #include "oops/cpCacheKlass.hpp" #include "oops/cpCacheOop.hpp" #include "oops/instanceKlass.hpp" +#include "oops/instanceMirrorKlass.hpp" #include "oops/instanceKlassKlass.hpp" #include "oops/instanceRefKlass.hpp" #include "oops/klassKlass.hpp" @@ -521,6 +522,7 @@ { objArrayKlassKlass o; add_vtable(list, &n, &o, count); } { instanceKlassKlass o; add_vtable(list, &n, &o, count); } { instanceKlass o; add_vtable(list, &n, &o, count); } + { instanceMirrorKlass o; add_vtable(list, &n, &o, count); } { instanceRefKlass o; add_vtable(list, &n, &o, count); } { typeArrayKlassKlass o; add_vtable(list, &n, &o, count); } { typeArrayKlass o; add_vtable(list, &n, &o, count); } @@ -547,7 +549,7 @@ KlassHandle k(THREAD, klassOop(obj)); // We will never reach the CATCH below since Exceptions::_throw will cause // the VM to exit if an exception is thrown during initialization - java_lang_Class::create_mirror(k, CATCH); + java_lang_Class::fixup_mirror(k, CATCH); // This call unconditionally creates a new mirror for k, // and links in k's component_mirror field if k is an array. // If k is an objArray, k's element type must already have @@ -605,6 +607,10 @@ // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note // that the number of objects allocated at this point is very small. assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded"); + + // Cache the start of the static fields + instanceMirrorKlass::init_offset_of_static_fields(); + FixupMirrorClosure blk; Universe::heap()->permanent_object_iterate(&blk); } @@ -1313,6 +1319,8 @@ JNIHandles::verify(); if (!silent) gclog_or_tty->print("C-heap "); os::check_heap(); + if (!silent) gclog_or_tty->print("code cache "); + CodeCache::verify_oops(); if (!silent) gclog_or_tty->print_cr("]"); _verify_in_progress = false;
--- a/src/share/vm/memory/universe.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/memory/universe.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/arrayKlass.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/arrayKlass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/arrayKlassKlass.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/arrayKlassKlass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -28,6 +28,13 @@ #include "oops/arrayKlassKlass.hpp" #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" +#ifndef SERIALGC +#include "gc_implementation/parNew/parOopClosures.inline.hpp" +#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" +#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" +#include "memory/cardTableRS.hpp" +#include "oops/oop.pcgc.inline.hpp" +#endif klassOop arrayKlassKlass::create_klass(TRAPS) { @@ -104,9 +111,12 @@ int arrayKlassKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) { assert(obj->is_klass(), "must be klass"); arrayKlass* ak = arrayKlass::cast(klassOop(obj)); - blk->do_oop(ak->adr_component_mirror()); - blk->do_oop(ak->adr_lower_dimension()); - blk->do_oop(ak->adr_higher_dimension()); + oop* addr = ak->adr_component_mirror(); + if (mr.contains(addr)) blk->do_oop(addr); + addr = ak->adr_lower_dimension(); + if (mr.contains(addr)) blk->do_oop(addr); + addr = ak->adr_higher_dimension(); + if (mr.contains(addr)) blk->do_oop(addr); ak->vtable()->oop_oop_iterate_m(blk, mr); return klassKlass::oop_oop_iterate_m(obj, blk, mr); } @@ -114,6 +124,12 @@ #ifndef SERIALGC void arrayKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { assert(obj->blueprint()->oop_is_arrayKlass(),"must be an array klass"); + arrayKlass* ak = arrayKlass::cast(klassOop(obj)); + oop* p = ak->adr_component_mirror(); + if (PSScavenge::should_scavenge(p)) { + pm->claim_or_forward_depth(p); + } + klassKlass::oop_push_contents(pm, obj); } int arrayKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
--- a/src/share/vm/oops/arrayOop.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/arrayOop.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/constantPoolKlass.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/constantPoolKlass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -245,13 +245,13 @@ } oop* addr; addr = cp->tags_addr(); - blk->do_oop(addr); + if (mr.contains(addr)) blk->do_oop(addr); addr = cp->cache_addr(); - blk->do_oop(addr); + if (mr.contains(addr)) blk->do_oop(addr); addr = cp->operands_addr(); - blk->do_oop(addr); + if (mr.contains(addr)) blk->do_oop(addr); addr = cp->pool_holder_addr(); - blk->do_oop(addr); + if (mr.contains(addr)) blk->do_oop(addr); return size; } @@ -285,10 +285,11 @@ void constantPoolKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { assert(obj->is_constantPool(), "should be constant pool"); constantPoolOop cp = (constantPoolOop) obj; - if (AnonymousClasses && cp->has_pseudo_string() && cp->tags() != NULL) { - oop* base = (oop*)cp->base(); - for (int i = 0; i < cp->length(); ++i, ++base) { + if (cp->tags() != NULL && + (!JavaObjectsInPerm || (EnableInvokeDynamic && cp->has_pseudo_string()))) { + for (int i = 1; i < cp->length(); ++i) { if (cp->tag_at(i).is_string()) { + oop* base = cp->obj_at_addr_raw(i); if (PSScavenge::should_scavenge(base)) { pm->claim_or_forward_depth(base); } @@ -380,7 +381,6 @@ case JVM_CONSTANT_MethodType : st->print("signature_index=%d", cp->method_type_index_at(index)); break; - case JVM_CONSTANT_InvokeDynamicTrans : case JVM_CONSTANT_InvokeDynamic : { st->print("bootstrap_method_index=%d", cp->invoke_dynamic_bootstrap_method_ref_index_at(index)); @@ -460,7 +460,8 @@ if (cp->tag_at(i).is_string()) { if (!cp->has_pseudo_string()) { if (entry.is_oop()) { - guarantee(entry.get_oop()->is_perm(), "should be in permspace"); + guarantee(!JavaObjectsInPerm || entry.get_oop()->is_perm(), + "should be in permspace"); guarantee(entry.get_oop()->is_instance(), "should be instance"); } } else {
--- a/src/share/vm/oops/constantPoolOop.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/constantPoolOop.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -284,17 +284,13 @@ if (constantPoolCacheOopDesc::is_secondary_index(which)) { // Invokedynamic index. int pool_index = cache()->main_entry_at(which)->constant_pool_index(); - if (!AllowTransitionalJSR292 || tag_at(pool_index).is_invoke_dynamic()) - pool_index = invoke_dynamic_name_and_type_ref_index_at(pool_index); + pool_index = invoke_dynamic_name_and_type_ref_index_at(pool_index); assert(tag_at(pool_index).is_name_and_type(), ""); return pool_index; } // change byte-ordering and go via cache i = remap_instruction_operand_from_cache(which); } else { - if (AllowTransitionalJSR292 && tag_at(which).is_name_and_type()) - // invokedynamic index is a simple name-and-type - return which; if (tag_at(which).is_invoke_dynamic()) { int pool_index = invoke_dynamic_name_and_type_ref_index_at(which); assert(tag_at(pool_index).is_name_and_type(), ""); @@ -481,7 +477,7 @@ { klassOop resolved = klass_at_impl(this_oop, index, CHECK_NULL); // ldc wants the java mirror. - result_oop = resolved->klass_part()->java_mirror(); + result_oop = resolved->java_mirror(); break; } @@ -953,7 +949,6 @@ } break; case JVM_CONSTANT_InvokeDynamic: - case JVM_CONSTANT_InvokeDynamicTrans: { int k1 = invoke_dynamic_bootstrap_method_ref_index_at(index1); int k2 = cp2->invoke_dynamic_bootstrap_method_ref_index_at(index2); @@ -1175,8 +1170,15 @@ case JVM_CONSTANT_UnresolvedClass: { - Symbol* k = from_cp->unresolved_klass_at(from_i); - to_cp->unresolved_klass_at_put(to_i, k); + // Can be resolved after checking tag, so check the slot first. + CPSlot entry = from_cp->slot_at(from_i); + if (entry.is_oop()) { + assert(entry.get_oop()->is_klass(), "must be"); + // Already resolved + to_cp->klass_at_put(to_i, (klassOop)entry.get_oop()); + } else { + to_cp->unresolved_klass_at_put(to_i, entry.get_symbol()); + } } break; case JVM_CONSTANT_UnresolvedClassInError: @@ -1189,8 +1191,14 @@ case JVM_CONSTANT_UnresolvedString: { - Symbol* s = from_cp->unresolved_string_at(from_i); - to_cp->unresolved_string_at_put(to_i, s); + // Can be resolved after checking tag, so check the slot first. + CPSlot entry = from_cp->slot_at(from_i); + if (entry.is_oop()) { + // Already resolved (either string or pseudo-string) + to_cp->string_at_put(to_i, entry.get_oop()); + } else { + to_cp->unresolved_string_at_put(to_i, entry.get_symbol()); + } } break; case JVM_CONSTANT_Utf8: @@ -1214,13 +1222,6 @@ to_cp->method_handle_index_at_put(to_i, k1, k2); } break; - case JVM_CONSTANT_InvokeDynamicTrans: - { - int k1 = from_cp->invoke_dynamic_bootstrap_method_ref_index_at(from_i); - int k2 = from_cp->invoke_dynamic_name_and_type_ref_index_at(from_i); - to_cp->invoke_dynamic_trans_at_put(to_i, k1, k2); - } break; - case JVM_CONSTANT_InvokeDynamic: { int k1 = from_cp->invoke_dynamic_bootstrap_specifier_index(from_i); @@ -1446,7 +1447,6 @@ return 5; case JVM_CONSTANT_InvokeDynamic: - case JVM_CONSTANT_InvokeDynamicTrans: // u1 tag, u2 bsm, u2 nt return 5; @@ -1661,7 +1661,6 @@ DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1)); break; } - case JVM_CONSTANT_InvokeDynamicTrans: case JVM_CONSTANT_InvokeDynamic: { *bytes = tag; idx1 = extract_low_short_from_int(*int_at_addr(idx));
--- a/src/share/vm/oops/constantPoolOop.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/constantPoolOop.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -244,12 +244,6 @@ *int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_specifier_index; } - void invoke_dynamic_trans_at_put(int which, int bootstrap_method_index, int name_and_type_index) { - tag_at_put(which, JVM_CONSTANT_InvokeDynamicTrans); - *int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_method_index; - assert(AllowTransitionalJSR292, ""); - } - // Temporary until actual use void unresolved_string_at_put(int which, Symbol* s) { release_tag_at_put(which, JVM_CONSTANT_UnresolvedString); @@ -429,7 +423,7 @@ // A "pseudo-string" is an non-string oop that has found is way into // a String entry. - // Under AnonymousClasses this can happen if the user patches a live + // Under EnableInvokeDynamic this can happen if the user patches a live // object into a CONSTANT_String entry of an anonymous class. // Method oops internally created for method handles may also // use pseudo-strings to link themselves to related metaobjects. @@ -442,7 +436,7 @@ } void pseudo_string_at_put(int which, oop x) { - assert(AnonymousClasses, ""); + assert(EnableInvokeDynamic, ""); set_pseudo_string(); // mark header assert(tag_at(which).is_string() || tag_at(which).is_unresolved_string(), "Corrupted constant pool"); string_at_put(which, x); // this works just fine @@ -570,15 +564,11 @@ }; int invoke_dynamic_bootstrap_method_ref_index_at(int which) { assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool"); - if (tag_at(which).value() == JVM_CONSTANT_InvokeDynamicTrans) - return extract_low_short_from_int(*int_at_addr(which)); int op_base = invoke_dynamic_operand_base(which); return operands()->short_at(op_base + _indy_bsm_offset); } int invoke_dynamic_argument_count_at(int which) { assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool"); - if (tag_at(which).value() == JVM_CONSTANT_InvokeDynamicTrans) - return 0; int op_base = invoke_dynamic_operand_base(which); int argc = operands()->short_at(op_base + _indy_argc_offset); DEBUG_ONLY(int end_offset = op_base + _indy_argv_offset + argc;
--- a/src/share/vm/oops/cpCacheKlass.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/cpCacheKlass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -168,22 +168,18 @@ void constantPoolCacheKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { assert(obj->is_constantPoolCache(), "should be constant pool"); - if (EnableInvokeDynamic) { + if (ScavengeRootsInCode) { constantPoolCacheOop cache = (constantPoolCacheOop)obj; // during a scavenge, it is safe to inspect my pool, since it is perm constantPoolOop pool = cache->constant_pool(); assert(pool->is_constantPool(), "should be constant pool"); - if (pool->has_invokedynamic()) { - for (int i = 0; i < cache->length(); i++) { - ConstantPoolCacheEntry* e = cache->entry_at(i); - oop* p = (oop*)&e->_f1; - if (e->is_secondary_entry()) { - if (PSScavenge::should_scavenge(p)) - pm->claim_or_forward_depth(p); - assert(!(e->is_vfinal() && PSScavenge::should_scavenge((oop*)&e->_f2)), - "no live oops here"); - } - } + for (int i = 0; i < cache->length(); i++) { + ConstantPoolCacheEntry* e = cache->entry_at(i); + oop* p = (oop*)&e->_f1; + if (PSScavenge::should_scavenge(p)) + pm->claim_or_forward_depth(p); + assert(!(e->is_vfinal() && PSScavenge::should_scavenge((oop*)&e->_f2)), + "no live oops here"); } } }
--- a/src/share/vm/oops/cpCacheKlass.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/cpCacheKlass.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/cpCacheOop.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/cpCacheOop.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -133,7 +133,7 @@ TosState field_type, bool is_final, bool is_volatile) { - set_f1(field_holder()); + set_f1(field_holder()->java_mirror()); set_f2(field_offset); // The field index is used by jvm/ti and is the index into fields() array // in holder instanceKlass. This is scaled by instanceKlass::next_offset. @@ -185,7 +185,7 @@ this->print(tty, 0); } assert(method->can_be_statically_bound(), "must be a MH invoker method"); - assert(AllowTransitionalJSR292 || _f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized"); + assert(_f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized"); // SystemDictionary::find_method_handle_invoke only caches // methods which signature classes are on the boot classpath, // otherwise the newly created method is returned. To avoid
--- a/src/share/vm/oops/cpCacheOop.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/cpCacheOop.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -190,7 +190,7 @@ ); void set_dynamic_call( - Handle call_site, // Resolved java.dyn.CallSite (f1) + Handle call_site, // Resolved java.lang.invoke.CallSite (f1) methodHandle signature_invoker // determines signature information );
--- a/src/share/vm/oops/generateOopMap.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/generateOopMap.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/instanceKlass.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/instanceKlass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -37,6 +37,7 @@ #include "memory/oopFactory.hpp" #include "memory/permGen.hpp" #include "oops/instanceKlass.hpp" +#include "oops/instanceMirrorKlass.hpp" #include "oops/instanceOop.hpp" #include "oops/methodOop.hpp" #include "oops/objArrayKlassKlass.hpp" @@ -649,6 +650,7 @@ } instanceOop instanceKlass::allocate_instance(TRAPS) { + assert(!oop_is_instanceMirror(), "wrong allocation path"); bool has_finalizer_flag = has_finalizer(); // Query before possible GC int size = size_helper(); // Query before forming handle. @@ -669,6 +671,7 @@ // instances so simply disallow finalizable perm objects. This can // be relaxed if a need for it is found. assert(!has_finalizer(), "perm objects not allowed to have finalizers"); + assert(!oop_is_instanceMirror(), "wrong allocation path"); int size = size_helper(); // Query before forming handle. KlassHandle h_k(THREAD, as_klassOop()); instanceOop i = (instanceOop) @@ -735,7 +738,12 @@ static int call_class_initializer_impl_counter = 0; // for debugging methodOop instanceKlass::class_initializer() { - return find_method(vmSymbols::class_initializer_name(), vmSymbols::void_method_signature()); + methodOop clinit = find_method( + vmSymbols::class_initializer_name(), vmSymbols::void_method_signature()); + if (clinit != NULL && clinit->has_valid_initializer_flags()) { + return clinit; + } + return NULL; } void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) { @@ -893,6 +901,7 @@ } } + void instanceKlass::do_local_static_fields(FieldClosure* cl) { fieldDescriptor fd; int length = fields()->length(); @@ -1604,36 +1613,6 @@ // The following macros call specialized macros, passing either oop or // narrowOop as the specialization type. These test the UseCompressedOops // flag. -#define InstanceKlass_OOP_ITERATE(start_p, count, \ - do_oop, assert_fn) \ -{ \ - if (UseCompressedOops) { \ - InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ - start_p, count, \ - do_oop, assert_fn) \ - } else { \ - InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ - start_p, count, \ - do_oop, assert_fn) \ - } \ -} - -#define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \ - do_oop, assert_fn) \ -{ \ - if (UseCompressedOops) { \ - InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ - start_p, count, \ - low, high, \ - do_oop, assert_fn) \ - } else { \ - InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ - start_p, count, \ - low, high, \ - do_oop, assert_fn) \ - } \ -} - #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \ { \ /* Compute oopmap block range. The common case \ @@ -1706,38 +1685,6 @@ } \ } -void instanceKlass::follow_static_fields() { - InstanceKlass_OOP_ITERATE( \ - start_of_static_fields(), static_oop_field_size(), \ - MarkSweep::mark_and_push(p), \ - assert_is_in_closed_subset) -} - -#ifndef SERIALGC -void instanceKlass::follow_static_fields(ParCompactionManager* cm) { - InstanceKlass_OOP_ITERATE( \ - start_of_static_fields(), static_oop_field_size(), \ - PSParallelCompact::mark_and_push(cm, p), \ - assert_is_in) -} -#endif // SERIALGC - -void instanceKlass::adjust_static_fields() { - InstanceKlass_OOP_ITERATE( \ - start_of_static_fields(), static_oop_field_size(), \ - MarkSweep::adjust_pointer(p), \ - assert_nothing) -} - -#ifndef SERIALGC -void instanceKlass::update_static_fields() { - InstanceKlass_OOP_ITERATE( \ - start_of_static_fields(), static_oop_field_size(), \ - PSParallelCompact::adjust_pointer(p), \ - assert_nothing) -} -#endif // SERIALGC - void instanceKlass::oop_follow_contents(oop obj) { assert(obj != NULL, "can't follow the content of NULL object"); obj->follow_header(); @@ -1824,22 +1771,6 @@ ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) #endif // !SERIALGC -void instanceKlass::iterate_static_fields(OopClosure* closure) { - InstanceKlass_OOP_ITERATE( \ - start_of_static_fields(), static_oop_field_size(), \ - closure->do_oop(p), \ - assert_is_in_reserved) -} - -void instanceKlass::iterate_static_fields(OopClosure* closure, - MemRegion mr) { - InstanceKlass_BOUNDED_OOP_ITERATE( \ - start_of_static_fields(), static_oop_field_size(), \ - mr.start(), mr.end(), \ - (closure)->do_oop_v(p), \ - assert_is_in_closed_subset) -} - int instanceKlass::oop_adjust_pointers(oop obj) { int size = size_helper(); InstanceKlass_OOP_MAP_ITERATE( \ @@ -1868,21 +1799,6 @@ return size_helper(); } -void instanceKlass::push_static_fields(PSPromotionManager* pm) { - InstanceKlass_OOP_ITERATE( \ - start_of_static_fields(), static_oop_field_size(), \ - if (PSScavenge::should_scavenge(p)) { \ - pm->claim_or_forward_depth(p); \ - }, \ - assert_nothing ) -} - -void instanceKlass::copy_static_fields(ParCompactionManager* cm) { - InstanceKlass_OOP_ITERATE( \ - start_of_static_fields(), static_oop_field_size(), \ - PSParallelCompact::adjust_pointer(p), \ - assert_is_in) -} #endif // SERIALGC // This klass is alive but the implementor link is not followed/updated. @@ -1997,6 +1913,11 @@ if (_source_debug_extension != NULL) _source_debug_extension->increment_refcount(); } +address instanceKlass::static_field_addr(int offset) { + return (address)(offset + instanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror()); +} + + const char* instanceKlass::signature_name() const { const char* src = (const char*) (name()->as_C_string()); const int src_length = (int)strlen(src); @@ -2364,7 +2285,7 @@ void FieldPrinter::do_field(fieldDescriptor* fd) { _st->print(BULLET); - if (fd->is_static() || (_obj == NULL)) { + if (_obj == NULL) { fd->print_on(_st); _st->cr(); } else { @@ -2394,8 +2315,8 @@ } st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj)); - FieldPrinter print_nonstatic_field(st, obj); - do_nonstatic_fields(&print_nonstatic_field); + FieldPrinter print_field(st, obj); + do_nonstatic_fields(&print_field); if (as_klassOop() == SystemDictionary::Class_klass()) { st->print(BULLET"signature: "); @@ -2413,9 +2334,15 @@ st->print(BULLET"fake entry for array: "); array_klass->print_value_on(st); st->cr(); + st->print_cr(BULLET"fake entry for oop_size: %d", java_lang_Class::oop_size(obj)); + st->print_cr(BULLET"fake entry for static_oop_field_count: %d", java_lang_Class::static_oop_field_count(obj)); + klassOop real_klass = java_lang_Class::as_klassOop(obj); + if (real_klass != NULL && real_klass->klass_part()->oop_is_instance()) { + instanceKlass::cast(real_klass)->do_local_static_fields(&print_field); + } } else if (as_klassOop() == SystemDictionary::MethodType_klass()) { st->print(BULLET"signature: "); - java_dyn_MethodType::print_signature(obj, st); + java_lang_invoke_MethodType::print_signature(obj, st); st->cr(); } } @@ -2446,7 +2373,7 @@ } } else if (as_klassOop() == SystemDictionary::MethodType_klass()) { st->print(" = "); - java_dyn_MethodType::print_signature(obj, st); + java_lang_invoke_MethodType::print_signature(obj, st); } else if (java_lang_boxing_object::is_instance(obj)) { st->print(" = "); java_lang_boxing_object::print(obj, st); @@ -2555,7 +2482,7 @@ void JNIid::verify(klassOop holder) { - int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields(); + int first_field_offset = instanceMirrorKlass::offset_of_static_fields(); int end_field_offset; end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
--- a/src/share/vm/oops/instanceKlass.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/instanceKlass.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -75,8 +75,6 @@ // [Java vtable length ] // [oop map cache (stack maps) ] // [EMBEDDED Java vtable ] size in words = vtable_len -// [EMBEDDED static oop fields ] size in words = static_oop_fields_size -// [ static non-oop fields ] size in words = static_field_size - static_oop_fields_size // [EMBEDDED nonstatic oop-map blocks] size in words = nonstatic_oop_map_size // // The embedded nonstatic oop-map blocks are short pairs (offset, length) indicating @@ -184,7 +182,7 @@ // Protection domain. oop _protection_domain; // Host class, which grants its access privileges to this class also. - // This is only non-null for an anonymous class (AnonymousClasses enabled). + // This is only non-null for an anonymous class (JSR 292 enabled). // The host class is either named, or a previously loaded anonymous class. klassOop _host_klass; // Class signers. @@ -193,8 +191,6 @@ typeArrayOop _inner_classes; // Implementors of this interface (not valid if it overflows) klassOop _implementors[implementors_limit]; - // invokedynamic bootstrap method (a java.dyn.MethodHandle) - oop _bootstrap_method; // Annotations for this class, or null if none. typeArrayOop _class_annotations; // Annotation objects (byte arrays) for fields, or null if no annotations. @@ -230,7 +226,7 @@ // (including inherited fields but after header_size()). int _nonstatic_field_size; int _static_field_size; // number words used by static fields (oop and non-oop) in this klass - int _static_oop_field_size;// number of static oop fields in this klass + int _static_oop_field_count;// number of static oop fields in this klass int _nonstatic_oop_map_size;// size in words of nonstatic oop map blocks bool _is_marked_dependent; // used for marking during flushing and deoptimization bool _rewritten; // methods rewritten. @@ -281,8 +277,8 @@ int static_field_size() const { return _static_field_size; } void set_static_field_size(int size) { _static_field_size = size; } - int static_oop_field_size() const { return _static_oop_field_size; } - void set_static_oop_field_size(int size) { _static_oop_field_size = size; } + int static_oop_field_count() const { return _static_oop_field_count; } + void set_static_oop_field_count(int size) { _static_oop_field_count = size; } // Java vtable int vtable_length() const { return _vtable_len; } @@ -528,10 +524,6 @@ u2 method_index) { _enclosing_method_class_index = class_index; _enclosing_method_method_index = method_index; } - // JSR 292 support - oop bootstrap_method() const { return _bootstrap_method; } - void set_bootstrap_method(oop mh) { oop_store(&_bootstrap_method, mh); } - // jmethodID support static jmethodID get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h); @@ -660,6 +652,7 @@ // Casting from klassOop static instanceKlass* cast(klassOop k) { + assert(k->is_klass(), "must be"); Klass* kp = k->klass_part(); assert(kp->null_vtbl() || kp->oop_is_instance_slow(), "cast to instanceKlass"); return (instanceKlass*) kp; @@ -667,7 +660,7 @@ // Sizing (in words) static int header_size() { return align_object_offset(oopDesc::header_size() + sizeof(instanceKlass)/HeapWordSize); } - int object_size() const { return object_size(align_object_offset(vtable_length()) + align_object_offset(itable_length()) + static_field_size() + nonstatic_oop_map_size()); } + int object_size() const { return object_size(align_object_offset(vtable_length()) + align_object_offset(itable_length()) + nonstatic_oop_map_size()); } static int vtable_start_offset() { return header_size(); } static int vtable_length_offset() { return oopDesc::header_size() + offset_of(instanceKlass, _vtable_len) / HeapWordSize; } static int object_size(int extra) { return align_object_size(header_size() + extra); } @@ -676,20 +669,12 @@ intptr_t* start_of_itable() const { return start_of_vtable() + align_object_offset(vtable_length()); } int itable_offset_in_words() const { return start_of_itable() - (intptr_t*)as_klassOop(); } - // Static field offset is an offset into the Heap, should be converted by - // based on UseCompressedOop for traversal - HeapWord* start_of_static_fields() const { - return (HeapWord*)(start_of_itable() + align_object_offset(itable_length())); - } - intptr_t* end_of_itable() const { return start_of_itable() + itable_length(); } - int offset_of_static_fields() const { - return (intptr_t)start_of_static_fields() - (intptr_t)as_klassOop(); - } + address static_field_addr(int offset); OopMapBlock* start_of_nonstatic_oop_maps() const { - return (OopMapBlock*) (start_of_static_fields() + static_field_size()); + return (OopMapBlock*)(start_of_itable() + align_object_offset(itable_length())); } // Allocation profiling support @@ -719,8 +704,6 @@ // Garbage collection void oop_follow_contents(oop obj); - void follow_static_fields(); - void adjust_static_fields(); int oop_adjust_pointers(oop obj); bool object_is_parsable() const { return _init_state != unparsable_by_gc; } // Value of _init_state must be zero (unparsable_by_gc) when klass field is set. @@ -732,16 +715,6 @@ // Parallel Scavenge and Parallel Old PARALLEL_GC_DECLS -#ifndef SERIALGC - // Parallel Scavenge - void push_static_fields(PSPromotionManager* pm); - - // Parallel Old - void follow_static_fields(ParCompactionManager* cm); - void copy_static_fields(ParCompactionManager* cm); - void update_static_fields(); -#endif // SERIALGC - // Naming const char* signature_name() const; @@ -770,9 +743,6 @@ ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) #endif // !SERIALGC - void iterate_static_fields(OopClosure* closure); - void iterate_static_fields(OopClosure* closure, MemRegion mr); - private: // initialization state #ifdef ASSERT @@ -817,7 +787,6 @@ oop* adr_signers() const { return (oop*)&this->_signers;} oop* adr_inner_classes() const { return (oop*)&this->_inner_classes;} oop* adr_implementors() const { return (oop*)&this->_implementors[0];} - oop* adr_bootstrap_method() const { return (oop*)&this->_bootstrap_method;} oop* adr_methods_jmethod_ids() const { return (oop*)&this->_methods_jmethod_ids;} oop* adr_methods_cached_itable_indices() const { return (oop*)&this->_methods_cached_itable_indices;} oop* adr_class_annotations() const { return (oop*)&this->_class_annotations;} @@ -926,6 +895,10 @@ // Identifier lookup JNIid* find(int offset); + bool find_local_field(fieldDescriptor* fd) { + return instanceKlass::cast(holder())->find_local_field_from_offset(offset(), true, fd); + } + // Garbage collection support oop* holder_addr() { return (oop*)&_holder; } void oops_do(OopClosure* f);
--- a/src/share/vm/oops/instanceKlassKlass.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/instanceKlassKlass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -31,6 +31,7 @@ #include "memory/gcLocker.hpp" #include "oops/constantPoolOop.hpp" #include "oops/instanceKlass.hpp" +#include "oops/instanceMirrorKlass.hpp" #include "oops/instanceKlassKlass.hpp" #include "oops/instanceRefKlass.hpp" #include "oops/objArrayKlassKlass.hpp" @@ -86,7 +87,6 @@ assert(klassOop(obj)->klass_part()->oop_is_instance_slow(), "must be instance klass"); instanceKlass* ik = instanceKlass::cast(klassOop(obj)); - ik->follow_static_fields(); { HandleMark hm; ik->vtable()->oop_follow_contents(); @@ -105,7 +105,6 @@ MarkSweep::mark_and_push(ik->adr_protection_domain()); MarkSweep::mark_and_push(ik->adr_host_klass()); MarkSweep::mark_and_push(ik->adr_signers()); - MarkSweep::mark_and_push(ik->adr_bootstrap_method()); MarkSweep::mark_and_push(ik->adr_class_annotations()); MarkSweep::mark_and_push(ik->adr_fields_annotations()); MarkSweep::mark_and_push(ik->adr_methods_annotations()); @@ -127,7 +126,6 @@ assert(klassOop(obj)->klass_part()->oop_is_instance_slow(), "must be instance klass"); instanceKlass* ik = instanceKlass::cast(klassOop(obj)); - ik->follow_static_fields(cm); ik->vtable()->oop_follow_contents(cm); ik->itable()->oop_follow_contents(cm); @@ -143,7 +141,6 @@ PSParallelCompact::mark_and_push(cm, ik->adr_protection_domain()); PSParallelCompact::mark_and_push(cm, ik->adr_host_klass()); PSParallelCompact::mark_and_push(cm, ik->adr_signers()); - PSParallelCompact::mark_and_push(cm, ik->adr_bootstrap_method()); PSParallelCompact::mark_and_push(cm, ik->adr_class_annotations()); PSParallelCompact::mark_and_push(cm, ik->adr_fields_annotations()); PSParallelCompact::mark_and_push(cm, ik->adr_methods_annotations()); @@ -168,7 +165,6 @@ // Don't call size() or oop_size() since that is a virtual call. int size = ik->object_size(); - ik->iterate_static_fields(blk); ik->vtable()->oop_oop_iterate(blk); ik->itable()->oop_oop_iterate(blk); @@ -187,7 +183,6 @@ for (int i = 0; i < instanceKlass::implementors_limit; i++) { blk->do_oop(&ik->adr_implementors()[i]); } - blk->do_oop(ik->adr_bootstrap_method()); blk->do_oop(ik->adr_class_annotations()); blk->do_oop(ik->adr_fields_annotations()); blk->do_oop(ik->adr_methods_annotations()); @@ -209,7 +204,6 @@ // Don't call size() or oop_size() since that is a virtual call. int size = ik->object_size(); - ik->iterate_static_fields(blk, mr); ik->vtable()->oop_oop_iterate_m(blk, mr); ik->itable()->oop_oop_iterate_m(blk, mr); @@ -242,8 +236,6 @@ for (int i = 0; i < instanceKlass::implementors_limit; i++) { if (mr.contains(&adr[i])) blk->do_oop(&adr[i]); } - adr = ik->adr_bootstrap_method(); - if (mr.contains(adr)) blk->do_oop(adr); adr = ik->adr_class_annotations(); if (mr.contains(adr)) blk->do_oop(adr); adr = ik->adr_fields_annotations(); @@ -266,7 +258,6 @@ assert(klassOop(obj)->klass_part()->oop_is_instance_slow(), "must be instance klass"); instanceKlass* ik = instanceKlass::cast(klassOop(obj)); - ik->adjust_static_fields(); ik->vtable()->oop_adjust_pointers(); ik->itable()->oop_adjust_pointers(); @@ -285,7 +276,6 @@ for (int i = 0; i < instanceKlass::implementors_limit; i++) { MarkSweep::adjust_pointer(&ik->adr_implementors()[i]); } - MarkSweep::adjust_pointer(ik->adr_bootstrap_method()); MarkSweep::adjust_pointer(ik->adr_class_annotations()); MarkSweep::adjust_pointer(ik->adr_fields_annotations()); MarkSweep::adjust_pointer(ik->adr_methods_annotations()); @@ -300,7 +290,6 @@ #ifndef SERIALGC void instanceKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { instanceKlass* ik = instanceKlass::cast(klassOop(obj)); - ik->push_static_fields(pm); oop* loader_addr = ik->adr_class_loader(); if (PSScavenge::should_scavenge(loader_addr)) { @@ -322,11 +311,6 @@ pm->claim_or_forward_depth(sg_addr); } - oop* bsm_addr = ik->adr_bootstrap_method(); - if (PSScavenge::should_scavenge(bsm_addr)) { - pm->claim_or_forward_depth(bsm_addr); - } - klassKlass::oop_push_contents(pm, obj); } @@ -336,7 +320,6 @@ "must be instance klass"); instanceKlass* ik = instanceKlass::cast(klassOop(obj)); - ik->update_static_fields(); ik->vtable()->oop_update_pointers(cm); ik->itable()->oop_update_pointers(cm); @@ -356,22 +339,28 @@ #endif // SERIALGC klassOop -instanceKlassKlass::allocate_instance_klass(int vtable_len, int itable_len, +instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int itable_len, int static_field_size, unsigned nonstatic_oop_map_count, ReferenceType rt, TRAPS) { const int nonstatic_oop_map_size = instanceKlass::nonstatic_oop_map_size(nonstatic_oop_map_count); - int size = instanceKlass::object_size(align_object_offset(vtable_len) + align_object_offset(itable_len) + static_field_size + nonstatic_oop_map_size); + int size = instanceKlass::object_size(align_object_offset(vtable_len) + align_object_offset(itable_len) + nonstatic_oop_map_size); // Allocation KlassHandle h_this_klass(THREAD, as_klassOop()); KlassHandle k; if (rt == REF_NONE) { - // regular klass - instanceKlass o; - k = base_create_klass(h_this_klass, size, o.vtbl_value(), CHECK_NULL); + if (name != vmSymbols::java_lang_Class()) { + // regular klass + instanceKlass o; + k = base_create_klass(h_this_klass, size, o.vtbl_value(), CHECK_NULL); + } else { + // Class + instanceMirrorKlass o; + k = base_create_klass(h_this_klass, size, o.vtbl_value(), CHECK_NULL); + } } else { // reference klass instanceRefKlass o; @@ -408,7 +397,7 @@ ik->set_source_debug_extension(NULL); ik->set_array_name(NULL); ik->set_inner_classes(NULL); - ik->set_static_oop_field_size(0); + ik->set_static_oop_field_count(0); ik->set_nonstatic_field_size(0); ik->set_is_marked_dependent(false); ik->set_init_state(instanceKlass::allocated); @@ -420,7 +409,6 @@ ik->set_breakpoints(NULL); ik->init_previous_versions(); ik->set_generic_signature(NULL); - ik->set_bootstrap_method(NULL); ik->release_set_methods_jmethod_ids(NULL); ik->release_set_methods_cached_itable_indices(NULL); ik->set_class_annotations(NULL); @@ -442,9 +430,6 @@ // To get verify to work - must be set to partial loaded before first GC point. k()->set_partially_loaded(); } - - // GC can happen here - java_lang_Class::create_mirror(k, CHECK_NULL); // Allocate mirror return k(); } @@ -545,11 +530,6 @@ } // pvw is cleaned up } // rm is cleaned up - if (ik->bootstrap_method() != NULL) { - st->print(BULLET"bootstrap method: "); - ik->bootstrap_method()->print_value_on(st); - st->cr(); - } if (ik->generic_signature() != NULL) { st->print(BULLET"generic signature: "); ik->generic_signature()->print_value_on(st); @@ -566,13 +546,6 @@ FieldPrinter print_nonstatic_field(st); ik->do_nonstatic_fields(&print_nonstatic_field); - st->print(BULLET"static oop maps: "); - if (ik->static_oop_field_size() > 0) { - int first_offset = ik->offset_of_static_fields(); - st->print("%d-%d", first_offset, first_offset + ik->static_oop_field_size() - 1); - } - st->cr(); - st->print(BULLET"non-static oop maps: "); OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); OopMapBlock* end_map = map + ik->nonstatic_oop_map_count(); @@ -630,7 +603,6 @@ // Verify static fields VerifyFieldClosure blk; - ik->iterate_static_fields(&blk); // Verify vtables if (ik->is_linked()) {
--- a/src/share/vm/oops/instanceKlassKlass.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/instanceKlassKlass.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,8 @@ // Allocation DEFINE_ALLOCATE_PERMANENT(instanceKlassKlass); static klassOop create_klass(TRAPS); - klassOop allocate_instance_klass(int vtable_len, + klassOop allocate_instance_klass(Symbol* name, + int vtable_len, int itable_len, int static_field_size, unsigned int nonstatic_oop_map_count,
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/oops/instanceMirrorKlass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/javaClasses.hpp" +#include "classfile/systemDictionary.hpp" +#include "gc_implementation/shared/markSweep.inline.hpp" +#include "gc_interface/collectedHeap.inline.hpp" +#include "memory/genOopClosures.inline.hpp" +#include "memory/oopFactory.hpp" +#include "memory/permGen.hpp" +#include "oops/instanceKlass.hpp" +#include "oops/instanceMirrorKlass.hpp" +#include "oops/instanceOop.hpp" +#include "oops/oop.inline.hpp" +#include "oops/symbol.hpp" +#include "runtime/handles.inline.hpp" +#ifndef SERIALGC +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "gc_implementation/g1/g1OopClosures.inline.hpp" +#include "gc_implementation/g1/g1RemSet.inline.hpp" +#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/parNew/parOopClosures.inline.hpp" +#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" +#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" +#include "oops/oop.pcgc.inline.hpp" +#endif + +int instanceMirrorKlass::_offset_of_static_fields = 0; + +#ifdef ASSERT +template <class T> void assert_is_in(T *p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in(o), "should be in heap"); + } +} +template <class T> void assert_is_in_closed_subset(T *p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in_closed_subset(o), "should be in closed"); + } +} +template <class T> void assert_is_in_reserved(T *p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); + } +} +template <class T> void assert_nothing(T *p) {} + +#else +template <class T> void assert_is_in(T *p) {} +template <class T> void assert_is_in_closed_subset(T *p) {} +template <class T> void assert_is_in_reserved(T *p) {} +template <class T> void assert_nothing(T *p) {} +#endif // ASSERT + +#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE( \ + T, start_p, count, do_oop, \ + assert_fn) \ +{ \ + T* p = (T*)(start_p); \ + T* const end = p + (count); \ + while (p < end) { \ + (assert_fn)(p); \ + do_oop; \ + ++p; \ + } \ +} + +#define InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ + T, start_p, count, low, high, \ + do_oop, assert_fn) \ +{ \ + T* const l = (T*)(low); \ + T* const h = (T*)(high); \ + assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ + mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ + "bounded region must be properly aligned"); \ + T* p = (T*)(start_p); \ + T* end = p + (count); \ + if (p < l) p = l; \ + if (end > h) end = h; \ + while (p < end) { \ + (assert_fn)(p); \ + do_oop; \ + ++p; \ + } \ +} + + +#define InstanceMirrorKlass_OOP_ITERATE(start_p, count, \ + do_oop, assert_fn) \ +{ \ + if (UseCompressedOops) { \ + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ + start_p, count, \ + do_oop, assert_fn) \ + } else { \ + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(oop, \ + start_p, count, \ + do_oop, assert_fn) \ + } \ +} + +// The following macros call specialized macros, passing either oop or +// narrowOop as the specialization type. These test the UseCompressedOops +// flag. +#define InstanceMirrorKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \ + do_oop, assert_fn) \ +{ \ + if (UseCompressedOops) { \ + InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ + start_p, count, \ + low, high, \ + do_oop, assert_fn) \ + } else { \ + InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ + start_p, count, \ + low, high, \ + do_oop, assert_fn) \ + } \ +} + + +void instanceMirrorKlass::oop_follow_contents(oop obj) { + instanceKlass::oop_follow_contents(obj); + InstanceMirrorKlass_OOP_ITERATE( \ + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ + MarkSweep::mark_and_push(p), \ + assert_is_in_closed_subset) +} + +#ifndef SERIALGC +void instanceMirrorKlass::oop_follow_contents(ParCompactionManager* cm, + oop obj) { + instanceKlass::oop_follow_contents(cm, obj); + InstanceMirrorKlass_OOP_ITERATE( \ + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ + PSParallelCompact::mark_and_push(cm, p), \ + assert_is_in) +} +#endif // SERIALGC + +int instanceMirrorKlass::oop_adjust_pointers(oop obj) { + int size = oop_size(obj); + instanceKlass::oop_adjust_pointers(obj); + InstanceMirrorKlass_OOP_ITERATE( \ + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ + MarkSweep::adjust_pointer(p), \ + assert_nothing) + return size; +} + +#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(T, nv_suffix) \ + InstanceMirrorKlass_OOP_ITERATE( \ + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ + (closure)->do_oop##nv_suffix(p), \ + assert_is_in_closed_subset) \ + return oop_size(obj); \ + +#define InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(T, nv_suffix, mr) \ + InstanceMirrorKlass_BOUNDED_OOP_ITERATE( \ + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ + mr.start(), mr.end(), \ + (closure)->do_oop##nv_suffix(p), \ + assert_is_in_closed_subset) \ + return oop_size(obj); \ + + +// Macro to define instanceMirrorKlass::oop_oop_iterate for virtual/nonvirtual for +// all closures. Macros calling macros above for each oop size. + +#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + \ +int instanceMirrorKlass:: \ +oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ + /* Get size before changing pointers */ \ + SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \ + \ + instanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \ + \ + if (UseCompressedOops) { \ + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix); \ + } else { \ + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix); \ + } \ +} + +#ifndef SERIALGC +#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ + \ +int instanceMirrorKlass:: \ +oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ + /* Get size before changing pointers */ \ + SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \ + \ + instanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \ + \ + if (UseCompressedOops) { \ + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix); \ + } else { \ + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix); \ + } \ +} +#endif // !SERIALGC + + +#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ + \ +int instanceMirrorKlass:: \ +oop_oop_iterate##nv_suffix##_m(oop obj, \ + OopClosureType* closure, \ + MemRegion mr) { \ + SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \ + \ + instanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \ + if (UseCompressedOops) { \ + InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr); \ + } else { \ + InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr); \ + } \ +} + +ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN) +ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN) +#ifndef SERIALGC +ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) +ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) +#endif // SERIALGC +ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m) +ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m) + +#ifndef SERIALGC +void instanceMirrorKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { + instanceKlass::oop_push_contents(pm, obj); + InstanceMirrorKlass_OOP_ITERATE( \ + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj),\ + if (PSScavenge::should_scavenge(p)) { \ + pm->claim_or_forward_depth(p); \ + }, \ + assert_nothing ) +} + +int instanceMirrorKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { + instanceKlass::oop_update_pointers(cm, obj); + InstanceMirrorKlass_OOP_ITERATE( \ + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj),\ + PSParallelCompact::adjust_pointer(p), \ + assert_nothing) + return oop_size(obj); +} +#endif // SERIALGC + +int instanceMirrorKlass::instance_size(KlassHandle k) { + if (k() != NULL && k->oop_is_instance()) { + return align_object_size(size_helper() + instanceKlass::cast(k())->static_field_size()); + } + return size_helper(); +} + +instanceOop instanceMirrorKlass::allocate_instance(KlassHandle k, TRAPS) { + // Query before forming handle. + int size = instance_size(k); + KlassHandle h_k(THREAD, as_klassOop()); + instanceOop i; + + if (JavaObjectsInPerm) { + i = (instanceOop) CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL); + } else { + assert(ScavengeRootsInCode > 0, "must be"); + i = (instanceOop) CollectedHeap::obj_allocate(h_k, size, CHECK_NULL); + } + + return i; +} + +int instanceMirrorKlass::oop_size(oop obj) const { + return java_lang_Class::oop_size(obj); +} + +int instanceMirrorKlass::compute_static_oop_field_count(oop obj) { + klassOop k = java_lang_Class::as_klassOop(obj); + if (k != NULL && k->klass_part()->oop_is_instance()) { + return instanceKlass::cast(k)->static_oop_field_count(); + } + return 0; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/oops/instanceMirrorKlass.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_INSTANCEMIRRORKLASS_HPP +#define SHARE_VM_OOPS_INSTANCEMIRRORKLASS_HPP + +#include "oops/instanceKlass.hpp" + +// An instanceMirrorKlass is a specialized instanceKlass for +// java.lang.Class instances. These instances are special because +// they contain the static fields of the class in addition to the +// normal fields of Class. This means they are variable sized +// instances and need special logic for computing their size and for +// iteration of their oops. + + +class instanceMirrorKlass: public instanceKlass { + friend class VMStructs; + + private: + static int _offset_of_static_fields; + + public: + // Type testing + bool oop_is_instanceMirror() const { return true; } + + // Casting from klassOop + static instanceMirrorKlass* cast(klassOop k) { + assert(k->klass_part()->oop_is_instanceMirror(), "cast to instanceMirrorKlass"); + return (instanceMirrorKlass*) k->klass_part(); + } + + // Returns the size of the instance including the extra static fields. + virtual int oop_size(oop obj) const; + + // Static field offset is an offset into the Heap, should be converted by + // based on UseCompressedOop for traversal + static HeapWord* start_of_static_fields(oop obj) { + return (HeapWord*)((intptr_t)obj + offset_of_static_fields()); + } + + static void init_offset_of_static_fields() { + // Cache the offset of the static fields in the Class instance + assert(_offset_of_static_fields == 0, "once"); + _offset_of_static_fields = instanceMirrorKlass::cast(SystemDictionary::Class_klass())->size_helper() << LogHeapWordSize; + } + + static int offset_of_static_fields() { + return _offset_of_static_fields; + } + + int compute_static_oop_field_count(oop obj); + + // Given a Klass return the size of the instance + int instance_size(KlassHandle k); + + // allocation + DEFINE_ALLOCATE_PERMANENT(instanceMirrorKlass); + instanceOop allocate_instance(KlassHandle k, TRAPS); + + // Garbage collection + int oop_adjust_pointers(oop obj); + void oop_follow_contents(oop obj); + + // Parallel Scavenge and Parallel Old + PARALLEL_GC_DECLS + + int oop_oop_iterate(oop obj, OopClosure* blk) { + return oop_oop_iterate_v(obj, blk); + } + int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr) { + return oop_oop_iterate_v_m(obj, blk, mr); + } + +#define InstanceMirrorKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ + int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr); + + ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL) + +#ifndef SERIALGC +#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); + + ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) +#endif // !SERIALGC +}; + +#endif // SHARE_VM_OOPS_INSTANCEMIRRORKLASS_HPP
--- a/src/share/vm/oops/klass.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/klass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -501,7 +501,7 @@ if (oop_is_instance()) { instanceKlass* ik = (instanceKlass*) this; if (ik->is_anonymous()) { - assert(AnonymousClasses, ""); + assert(EnableInvokeDynamic, ""); intptr_t hash = ik->java_mirror()->identity_hash(); char hash_buf[40]; sprintf(hash_buf, "/" UINTX_FORMAT, (uintx)hash);
--- a/src/share/vm/oops/klass.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/klass.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -584,6 +584,7 @@ public: // type testing operations virtual bool oop_is_instance_slow() const { return false; } + virtual bool oop_is_instanceMirror() const { return false; } virtual bool oop_is_instanceRef() const { return false; } virtual bool oop_is_array() const { return false; } virtual bool oop_is_objArray_slow() const { return false; } @@ -818,4 +819,8 @@ #endif }; + +inline oop klassOopDesc::java_mirror() const { return klass_part()->java_mirror(); } + + #endif // SHARE_VM_OOPS_KLASS_HPP
--- a/src/share/vm/oops/klassKlass.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/klassKlass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -41,6 +41,10 @@ #include "oops/typeArrayKlass.hpp" #include "runtime/handles.inline.hpp" #ifndef SERIALGC +#include "gc_implementation/parNew/parOopClosures.inline.hpp" +#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" +#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" +#include "memory/cardTableRS.hpp" #include "oops/oop.pcgc.inline.hpp" #endif @@ -181,6 +185,12 @@ #ifndef SERIALGC void klassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { + Klass* k = Klass::cast(klassOop(obj)); + + oop* p = k->adr_java_mirror(); + if (PSScavenge::should_scavenge(p)) { + pm->claim_or_forward_depth(p); + } } int klassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { @@ -239,7 +249,7 @@ if (k->java_mirror() != NULL || (k->oop_is_instance() && instanceKlass::cast(klassOop(obj))->is_loaded())) { guarantee(k->java_mirror() != NULL, "should be allocated"); - guarantee(k->java_mirror()->is_perm(), "should be in permspace"); + guarantee(k->java_mirror()->is_perm() || !JavaObjectsInPerm, "should be in permspace"); guarantee(k->java_mirror()->is_instance(), "should be instance"); } }
--- a/src/share/vm/oops/klassOop.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/klassOop.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,73 @@ static int klass_part_offset_in_bytes() { return sizeof(klassOopDesc); } // returns the Klass part containing dispatching behavior - Klass* klass_part() { return (Klass*)((address)this + klass_part_offset_in_bytes()); } + Klass* klass_part() const { return (Klass*)((address)this + klass_part_offset_in_bytes()); } + + // Convenience wrapper + inline oop java_mirror() const; + + private: + // These have no implementation since klassOop should never be accessed in this fashion + oop obj_field(int offset) const; + void obj_field_put(int offset, oop value); + void obj_field_raw_put(int offset, oop value); + + jbyte byte_field(int offset) const; + void byte_field_put(int offset, jbyte contents); + + jchar char_field(int offset) const; + void char_field_put(int offset, jchar contents); + + jboolean bool_field(int offset) const; + void bool_field_put(int offset, jboolean contents); + + jint int_field(int offset) const; + void int_field_put(int offset, jint contents); + + jshort short_field(int offset) const; + void short_field_put(int offset, jshort contents); + + jlong long_field(int offset) const; + void long_field_put(int offset, jlong contents); + + jfloat float_field(int offset) const; + void float_field_put(int offset, jfloat contents); + + jdouble double_field(int offset) const; + void double_field_put(int offset, jdouble contents); + + address address_field(int offset) const; + void address_field_put(int offset, address contents); + + oop obj_field_acquire(int offset) const; + void release_obj_field_put(int offset, oop value); + + jbyte byte_field_acquire(int offset) const; + void release_byte_field_put(int offset, jbyte contents); + + jchar char_field_acquire(int offset) const; + void release_char_field_put(int offset, jchar contents); + + jboolean bool_field_acquire(int offset) const; + void release_bool_field_put(int offset, jboolean contents); + + jint int_field_acquire(int offset) const; + void release_int_field_put(int offset, jint contents); + + jshort short_field_acquire(int offset) const; + void release_short_field_put(int offset, jshort contents); + + jlong long_field_acquire(int offset) const; + void release_long_field_put(int offset, jlong contents); + + jfloat float_field_acquire(int offset) const; + void release_float_field_put(int offset, jfloat contents); + + jdouble double_field_acquire(int offset) const; + void release_double_field_put(int offset, jdouble contents); + + address address_field_acquire(int offset) const; + void release_address_field_put(int offset, address contents); }; #endif // SHARE_VM_OOPS_KLASSOOP_HPP
--- a/src/share/vm/oops/klassVtable.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/klassVtable.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -883,7 +883,7 @@ int ime_num = 0; // Skip first methodOop if it is a class initializer - int i = ((methodOop)methods()->obj_at(0))->name() != vmSymbols::class_initializer_name() ? 0 : 1; + int i = ((methodOop)methods()->obj_at(0))->is_static_initializer() ? 1 : 0; // m, method_name, method_signature, klass reset each loop so they // don't need preserving across check_signature_loaders call @@ -1095,7 +1095,7 @@ itableOffsetEntry* ioe = (itableOffsetEntry*)klass->start_of_itable(); itableMethodEntry* ime = (itableMethodEntry*)(ioe + nof_interfaces); intptr_t* end = klass->end_of_itable(); - assert((oop*)(ime + nof_methods) <= (oop*)klass->start_of_static_fields(), "wrong offset calculation (1)"); + assert((oop*)(ime + nof_methods) <= (oop*)klass->start_of_nonstatic_oop_maps(), "wrong offset calculation (1)"); assert((oop*)(end) == (oop*)(ime + nof_methods), "wrong offset calculation (2)"); // Visit all interfaces and initialize itable offset table @@ -1121,7 +1121,7 @@ assert(index < methods->length(), "should find index for resolve_invoke"); } // Adjust for <clinit>, which is left out of table if first method - if (methods->length() > 0 && ((methodOop)methods->obj_at(0))->name() == vmSymbols::class_initializer_name()) { + if (methods->length() > 0 && ((methodOop)methods->obj_at(0))->is_static_initializer()) { index--; } return index; @@ -1135,7 +1135,7 @@ int index = itable_index; // Adjust for <clinit>, which is left out of table if first method - if (methods->length() > 0 && ((methodOop)methods->obj_at(0))->name() == vmSymbols::class_initializer_name()) { + if (methods->length() > 0 && ((methodOop)methods->obj_at(0))->is_static_initializer()) { index++; }
--- a/src/share/vm/oops/markOop.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/markOop.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/methodDataOop.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/methodDataOop.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -228,7 +228,7 @@ return byte_offset_of(DataLayout, _header._struct._bci); } static ByteSize cell_offset(int index) { - return byte_offset_of(DataLayout, _cells[index]); + return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size); } // Return a value which, when or-ed as a byte into _flags, sets the flag. static int flag_number_to_byte_constant(int flag_number) {
--- a/src/share/vm/oops/methodKlass.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/methodKlass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -103,6 +103,12 @@ m->backedge_counter()->init(); m->clear_number_of_breakpoints(); +#ifdef TIERED + m->set_rate(0); + m->set_prev_event_count(0); + m->set_prev_time(0); +#endif + assert(m->is_parsable(), "must be parsable here."); assert(m->size() == size, "wrong size for object"); // We should not publish an uprasable object's reference
--- a/src/share/vm/oops/methodOop.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/methodOop.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -466,7 +466,20 @@ bool methodOopDesc::is_initializer() const { - return name() == vmSymbols::object_initializer_name() || name() == vmSymbols::class_initializer_name(); + return name() == vmSymbols::object_initializer_name() || is_static_initializer(); +} + +bool methodOopDesc::has_valid_initializer_flags() const { + return (is_static() || + instanceKlass::cast(method_holder())->major_version() < 51); +} + +bool methodOopDesc::is_static_initializer() const { + // For classfiles version 51 or greater, ensure that the clinit method is + // static. Non-static methods with the name "<clinit>" are not static + // initializers. (older classfiles exempted for backward compatibility) + return name() == vmSymbols::class_initializer_name() && + has_valid_initializer_flags(); } @@ -839,11 +852,11 @@ bool methodOopDesc::is_method_handle_invoke_name(vmSymbols::SID name_sid) { switch (name_sid) { case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name): - case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name): + case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name): return true; } - if (AllowTransitionalJSR292 - && name_sid == vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name)) + if (AllowInvokeGeneric + && name_sid == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name)) return true; return false; } @@ -852,7 +865,7 @@ enum { _imcp_invoke_name = 1, // utf8: 'invokeExact' or 'invokeGeneric' _imcp_invoke_signature, // utf8: (variable Symbol*) - _imcp_method_type_value, // string: (variable java/dyn/MethodType, sic) + _imcp_method_type_value, // string: (variable java/lang/invoke/MethodType, sic) _imcp_limit }; @@ -908,6 +921,10 @@ tty->cr(); } + // invariant: cp->symbol_at_put is preceded by a refcount increment (more usually a lookup) + name->increment_refcount(); + signature->increment_refcount(); + constantPoolHandle cp; { constantPoolOop cp_oop = oopFactory::new_constantPool(_imcp_limit, IsSafeConc, CHECK_(empty)); @@ -1078,7 +1095,7 @@ vmSymbols::SID name_id = vmSymbols::find_sid(name()); if (name_id == vmSymbols::NO_SID) return; vmSymbols::SID sig_id = vmSymbols::find_sid(signature()); - if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_MethodHandle) + if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle) && sig_id == vmSymbols::NO_SID) return; jshort flags = access_flags().as_short(); @@ -1104,21 +1121,20 @@ break; // Signature-polymorphic methods: MethodHandle.invoke*, InvokeDynamic.*. - case vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_MethodHandle): + case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle): if (is_static() || !is_native()) break; switch (name_id) { case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name): + if (!AllowInvokeGeneric) break; + case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name): id = vmIntrinsics::_invokeGeneric; break; case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name): id = vmIntrinsics::_invokeExact; break; - case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name): - if (AllowTransitionalJSR292) id = vmIntrinsics::_invokeExact; - break; } break; - case vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_InvokeDynamic): + case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_InvokeDynamic): if (!is_static() || !is_native()) break; id = vmIntrinsics::_invokeDynamic; break;
--- a/src/share/vm/oops/methodOop.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/methodOop.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -84,6 +84,11 @@ // | invocation_counter | // | backedge_counter | // |------------------------------------------------------| +// | prev_time (tiered only, 64 bit wide) | +// | | +// |------------------------------------------------------| +// | rate (tiered) | +// |------------------------------------------------------| // | code (pointer) | // | i2i (pointer) | // | adapter (pointer) | @@ -124,6 +129,11 @@ InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations +#ifdef TIERED + jlong _prev_time; // Previous time the rate was acquired + float _rate; // Events (invocation and backedge counter increments) per millisecond +#endif + #ifndef PRODUCT int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) #endif @@ -304,6 +314,17 @@ InvocationCounter* invocation_counter() { return &_invocation_counter; } InvocationCounter* backedge_counter() { return &_backedge_counter; } +#ifdef TIERED + // We are reusing interpreter_invocation_count as a holder for the previous event count! + // We can do that since interpreter_invocation_count is not used in tiered. + int prev_event_count() const { return _interpreter_invocation_count; } + void set_prev_event_count(int count) { _interpreter_invocation_count = count; } + jlong prev_time() const { return _prev_time; } + void set_prev_time(jlong time) { _prev_time = time; } + float rate() const { return _rate; } + void set_rate(float rate) { _rate = rate; } +#endif + int invocation_count(); int backedge_count(); @@ -497,6 +518,13 @@ // returns true if the method is an initializer (<init> or <clinit>). bool is_initializer() const; + // returns true if the method is static OR if the classfile version < 51 + bool has_valid_initializer_flags() const; + + // returns true if the method name is <clinit> and the method has + // valid static initializer flags. + bool is_static_initializer() const; + // compiled code support // NOTE: code() is inherently racy as deopt can be clearing code // simultaneously. Use with caution. @@ -579,7 +607,7 @@ // method handles want to be able to push a few extra values (e.g., a bound receiver), and // invokedynamic sometimes needs to push a bootstrap method, call site, and arglist, // all without checking for a stack overflow - static int extra_stack_entries() { return (EnableMethodHandles ? (int)MethodHandlePushLimit : 0) + (EnableInvokeDynamic ? 3 : 0); } + static int extra_stack_entries() { return EnableInvokeDynamic ? (int) MethodHandlePushLimit + 3 : 0; } static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize() // RedefineClasses() support:
--- a/src/share/vm/oops/objArrayKlassKlass.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/objArrayKlassKlass.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -31,6 +31,13 @@ #include "oops/objArrayKlassKlass.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.inline2.hpp" +#ifndef SERIALGC +#include "gc_implementation/parNew/parOopClosures.inline.hpp" +#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" +#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" +#include "memory/cardTableRS.hpp" +#include "oops/oop.pcgc.inline.hpp" +#endif klassOop objArrayKlassKlass::create_klass(TRAPS) { objArrayKlassKlass o; @@ -236,12 +243,23 @@ addr = oak->bottom_klass_addr(); if (mr.contains(addr)) blk->do_oop(addr); - return arrayKlassKlass::oop_oop_iterate(obj, blk); + return arrayKlassKlass::oop_oop_iterate_m(obj, blk, mr); } #ifndef SERIALGC void objArrayKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { assert(obj->blueprint()->oop_is_objArrayKlass(),"must be an obj array klass"); + objArrayKlass* oak = objArrayKlass::cast((klassOop)obj); + oop* p = oak->element_klass_addr(); + if (PSScavenge::should_scavenge(p)) { + pm->claim_or_forward_depth(p); + } + p = oak->bottom_klass_addr(); + if (PSScavenge::should_scavenge(p)) { + pm->claim_or_forward_depth(p); + } + + arrayKlassKlass::oop_push_contents(pm, obj); } int objArrayKlassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { @@ -287,7 +305,7 @@ // Verification void objArrayKlassKlass::oop_verify_on(oop obj, outputStream* st) { - klassKlass::oop_verify_on(obj, st); + arrayKlassKlass::oop_verify_on(obj, st); objArrayKlass* oak = objArrayKlass::cast((klassOop)obj); guarantee(oak->element_klass()->is_perm(), "should be in permspace"); guarantee(oak->element_klass()->is_klass(), "should be klass");
--- a/src/share/vm/oops/oop.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/oop.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -129,6 +129,7 @@ // type test operations (inlined in oop.inline.h) bool is_instance() const; + bool is_instanceMirror() const; bool is_instanceRef() const; bool is_array() const; bool is_objArray() const;
--- a/src/share/vm/oops/oop.inline.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/oop.inline.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -141,6 +141,7 @@ inline bool oopDesc::is_a(klassOop k) const { return blueprint()->is_subtype_of(k); } inline bool oopDesc::is_instance() const { return blueprint()->oop_is_instance(); } +inline bool oopDesc::is_instanceMirror() const { return blueprint()->oop_is_instanceMirror(); } inline bool oopDesc::is_instanceRef() const { return blueprint()->oop_is_instanceRef(); } inline bool oopDesc::is_array() const { return blueprint()->oop_is_array(); } inline bool oopDesc::is_objArray() const { return blueprint()->oop_is_objArray(); } @@ -399,7 +400,7 @@ inline int oopDesc::size_given_klass(Klass* klass) { int lh = klass->layout_helper(); - int s = lh >> LogHeapWordSize; // deliver size scaled by wordSize + int s; // lh is now a value computed at class initialization that may hint // at the size. For instances, this is positive and equal to the @@ -412,7 +413,13 @@ // alive or dead. So the speed here is equal in importance to the // speed of allocation. - if (lh <= Klass::_lh_neutral_value) { + if (lh > Klass::_lh_neutral_value) { + if (!Klass::layout_helper_needs_slow_path(lh)) { + s = lh >> LogHeapWordSize; // deliver size scaled by wordSize + } else { + s = klass->oop_size(this); + } + } else if (lh <= Klass::_lh_neutral_value) { // The most common case is instances; fall through if so. if (lh < Klass::_lh_neutral_value) { // Second most common case is arrays. We have to fetch the
--- a/src/share/vm/oops/oopsHierarchy.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/oopsHierarchy.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -174,6 +174,7 @@ class Klass; class instanceKlass; +class instanceMirrorKlass; class instanceRefKlass; class methodKlass; class constMethodKlass;
--- a/src/share/vm/oops/symbol.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/symbol.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/symbol.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/symbol.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/typeArrayOop.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/oops/typeArrayOop.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/buildOopMap.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/buildOopMap.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/bytecodeInfo.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/bytecodeInfo.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" +#include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" #include "interpreter/linkResolver.hpp" #include "oops/objArrayKlass.hpp" @@ -75,13 +76,6 @@ assert(!UseOldInlining, "do not use for old stuff"); } - - -static void print_indent(int depth) { - tty->print(" "); - for (int i = depth; i != 0; --i) tty->print(" "); -} - static bool is_init_with_ea(ciMethod* callee_method, ciMethod* caller_method, Compile* C) { // True when EA is ON and a java constructor is called or @@ -100,7 +94,7 @@ if(callee_method->should_inline()) { *wci_result = *(WarmCallInfo::always_hot()); if (PrintInlining && Verbose) { - print_indent(inline_depth()); + CompileTask::print_inline_indent(inline_depth()); tty->print_cr("Inlined method is hot: "); } return NULL; @@ -116,7 +110,7 @@ size < InlineThrowMaxSize ) { wci_result->set_profit(wci_result->profit() * 100); if (PrintInlining && Verbose) { - print_indent(inline_depth()); + CompileTask::print_inline_indent(inline_depth()); tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count()); } return NULL; @@ -138,9 +132,9 @@ max_size = C->freq_inline_size(); if (size <= max_size && TraceFrequencyInlining) { - print_indent(inline_depth()); + CompileTask::print_inline_indent(inline_depth()); tty->print_cr("Inlined frequent method (freq=%d count=%d):", freq, call_site_count); - print_indent(inline_depth()); + CompileTask::print_inline_indent(inline_depth()); callee_method->print(); tty->cr(); } @@ -315,8 +309,25 @@ if( inline_depth() > MaxInlineLevel ) { return "inlining too deep"; } - if( method() == callee_method && - inline_depth() > MaxRecursiveInlineLevel ) { + + // We need to detect recursive inlining of method handle targets: if + // the current method is a method handle adapter and one of the + // callers is the same method as the callee, we bail out if + // MaxRecursiveInlineLevel is hit. + if (method()->is_method_handle_adapter()) { + JVMState* jvms = caller_jvms(); + int inline_level = 0; + while (jvms != NULL && jvms->has_method()) { + if (jvms->method() == callee_method) { + inline_level++; + if (inline_level > MaxRecursiveInlineLevel) + return "recursively inlining too deep"; + } + jvms = jvms->caller(); + } + } + + if (method() == callee_method && inline_depth() > MaxRecursiveInlineLevel) { return "recursively inlining too deep"; } @@ -368,18 +379,14 @@ #ifndef PRODUCT //------------------------------print_inlining--------------------------------- // Really, the failure_msg can be a success message also. -void InlineTree::print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const { - print_indent(inline_depth()); - tty->print("@ %d ", caller_bci); - if( callee_method ) callee_method->print_short_name(); - else tty->print(" callee not monotonic or profiled"); - tty->print(" %s", (failure_msg ? failure_msg : "inline")); - if( Verbose && callee_method ) { +void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const { + CompileTask::print_inlining(callee_method, inline_depth(), caller_bci, failure_msg ? failure_msg : "inline"); + if (callee_method == NULL) tty->print(" callee not monotonic or profiled"); + if (Verbose && callee_method) { const InlineTree *top = this; while( top->caller_tree() != NULL ) { top = top->caller_tree(); } tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); } - tty->cr(); } #endif @@ -487,7 +494,7 @@ if (caller_jvms->method()->is_method_handle_adapter()) new_depth_adjust -= 1; // don't count actions in MH or indy adapter frames else if (callee_method->is_method_handle_invoke()) { - new_depth_adjust -= 1; // don't count method handle calls from java.dyn implem + new_depth_adjust -= 1; // don't count method handle calls from java.lang.invoke implem } if (new_depth_adjust != 0 && PrintInlining) { stringStream nm1; caller_jvms->method()->print_name(&nm1);
--- a/src/share/vm/opto/c2_globals.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/c2_globals.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -180,6 +180,9 @@ develop(bool, TraceLoopPredicate, false, \ "Trace generation of loop predicates") \ \ + develop(bool, TraceLoopOpts, false, \ + "Trace executed loop optimizations") \ + \ product(bool, OptimizeFill, false, \ "convert fill/copy loops into intrinsic") \ \
--- a/src/share/vm/opto/c2compiler.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/c2compiler.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/callGenerator.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/callGenerator.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -199,7 +199,7 @@ Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw); // Load the target MethodHandle from the CallSite object. - Node* target_mh_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes()); + Node* target_mh_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes()); Node* target_mh = kit.make_load(kit.control(), target_mh_adr, TypeInstPtr::BOTTOM, T_OBJECT); address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub(); @@ -725,7 +725,7 @@ Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw); // Load the target MethodHandle from the CallSite object. - Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes()); + Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes()); Node* target_mh = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT); // Check if the MethodHandle is still the same. @@ -978,31 +978,19 @@ return head; } -WarmCallInfo* WarmCallInfo::_always_hot = NULL; -WarmCallInfo* WarmCallInfo::_always_cold = NULL; +WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(), + WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE()); +WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(), + WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE()); WarmCallInfo* WarmCallInfo::always_hot() { - if (_always_hot == NULL) { - static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0}; - WarmCallInfo* ci = (WarmCallInfo*) bits; - ci->_profit = ci->_count = MAX_VALUE(); - ci->_work = ci->_size = MIN_VALUE(); - _always_hot = ci; - } - assert(_always_hot->is_hot(), "must always be hot"); - return _always_hot; + assert(_always_hot.is_hot(), "must always be hot"); + return &_always_hot; } WarmCallInfo* WarmCallInfo::always_cold() { - if (_always_cold == NULL) { - static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0}; - WarmCallInfo* ci = (WarmCallInfo*) bits; - ci->_profit = ci->_count = MIN_VALUE(); - ci->_work = ci->_size = MAX_VALUE(); - _always_cold = ci; - } - assert(_always_cold->is_cold(), "must always be cold"); - return _always_cold; + assert(_always_cold.is_cold(), "must always be cold"); + return &_always_cold; }
--- a/src/share/vm/opto/callGenerator.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/callGenerator.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -215,8 +215,20 @@ WarmCallInfo* next() const { return _next; } void set_next(WarmCallInfo* n) { _next = n; } - static WarmCallInfo* _always_hot; - static WarmCallInfo* _always_cold; + static WarmCallInfo _always_hot; + static WarmCallInfo _always_cold; + + // Constructor intitialization of always_hot and always_cold + WarmCallInfo(float c, float p, float w, float s) { + _call = NULL; + _hot_cg = NULL; + _next = NULL; + _count = c; + _profit = p; + _work = w; + _size = s; + _heat = 0; + } public: // Because WarmInfo objects live over the entire lifetime of the
--- a/src/share/vm/opto/cfgnode.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/cfgnode.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1349,9 +1349,17 @@ static void split_once(PhaseIterGVN *igvn, Node *phi, Node *val, Node *n, Node *newn) { igvn->hash_delete(n); // Remove from hash before hacking edges + Node* predicate_proj = NULL; uint j = 1; - for( uint i = phi->req()-1; i > 0; i-- ) { - if( phi->in(i) == val ) { // Found a path with val? + for (uint i = phi->req()-1; i > 0; i--) { + if (phi->in(i) == val) { // Found a path with val? + if (n->is_Region()) { + Node* proj = PhaseIdealLoop::find_predicate(n->in(i)); + if (proj != NULL) { + assert(predicate_proj == NULL, "only one predicate entry expected"); + predicate_proj = proj; + } + } // Add to NEW Region/Phi, no DU info newn->set_req( j++, n->in(i) ); // Remove from OLD Region/Phi @@ -1362,6 +1370,12 @@ // Register the new node but do not transform it. Cannot transform until the // entire Region/Phi conglomerate has been hacked as a single huge transform. igvn->register_new_node_with_optimizer( newn ); + + // Clone loop predicates + if (predicate_proj != NULL) { + newn = igvn->clone_loop_predicates(predicate_proj, newn); + } + // Now I can point to the new node. n->add_req(newn); igvn->_worklist.push(n);
--- a/src/share/vm/opto/chaitin.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/chaitin.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -673,7 +673,7 @@ case Op_RegD: lrg.set_num_regs(2); // Define platform specific register pressure -#ifdef SPARC +#if defined(SPARC) || defined(ARM) lrg.set_reg_pressure(2); #elif defined(IA32) if( ireg == Op_RegL ) {
--- a/src/share/vm/opto/compile.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/compile.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1202,11 +1202,15 @@ // Oop pointers need some flattening const TypeInstPtr *to = tj->isa_instptr(); if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) { + ciInstanceKlass *k = to->klass()->as_instance_klass(); if( ptr == TypePtr::Constant ) { - // No constant oop pointers (such as Strings); they alias with - // unknown strings. - assert(!is_known_inst, "not scalarizable allocation"); - tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); + if (to->klass() != ciEnv::current()->Class_klass() || + offset < k->size_helper() * wordSize) { + // No constant oop pointers (such as Strings); they alias with + // unknown strings. + assert(!is_known_inst, "not scalarizable allocation"); + tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); + } } else if( is_known_inst ) { tj = to; // Keep NotNull and klass_is_exact for instance type } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) { @@ -1216,7 +1220,6 @@ tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); } // Canonicalize the holder of this field - ciInstanceKlass *k = to->klass()->as_instance_klass(); if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) { // First handle header references such as a LoadKlassNode, even if the // object's klass is unloaded at compile time (4965979). @@ -1224,9 +1227,13 @@ tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset); } } else if (offset < 0 || offset >= k->size_helper() * wordSize) { - to = NULL; - tj = TypeOopPtr::BOTTOM; - offset = tj->offset(); + // Static fields are in the space above the normal instance + // fields in the java.lang.Class instance. + if (to->klass() != ciEnv::current()->Class_klass()) { + to = NULL; + tj = TypeOopPtr::BOTTOM; + offset = tj->offset(); + } } else { ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset); if (!k->equals(canonical_holder) || tj->offset() != offset) { @@ -1399,7 +1406,7 @@ //--------------------------------find_alias_type------------------------------ -Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create) { +Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) { if (_AliasLevel == 0) return alias_type(AliasIdxBot); @@ -1464,22 +1471,28 @@ // but the base pointer type is not distinctive enough to identify // references into JavaThread.) - // Check for final instance fields. + // Check for final fields. const TypeInstPtr* tinst = flat->isa_instptr(); if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) { - ciInstanceKlass *k = tinst->klass()->as_instance_klass(); - ciField* field = k->get_field_by_offset(tinst->offset(), false); + ciField* field; + if (tinst->const_oop() != NULL && + tinst->klass() == ciEnv::current()->Class_klass() && + tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) { + // static field + ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); + field = k->get_field_by_offset(tinst->offset(), true); + } else { + ciInstanceKlass *k = tinst->klass()->as_instance_klass(); + field = k->get_field_by_offset(tinst->offset(), false); + } + assert(field == NULL || + original_field == NULL || + (field->holder() == original_field->holder() && + field->offset() == original_field->offset() && + field->is_static() == original_field->is_static()), "wrong field?"); // Set field() and is_rewritable() attributes. if (field != NULL) alias_type(idx)->set_field(field); } - const TypeKlassPtr* tklass = flat->isa_klassptr(); - // Check for final static fields. - if (tklass && tklass->klass()->is_instance_klass()) { - ciInstanceKlass *k = tklass->klass()->as_instance_klass(); - ciField* field = k->get_field_by_offset(tklass->offset(), true); - // Set field() and is_rewritable() attributes. - if (field != NULL) alias_type(idx)->set_field(field); - } } // Fill the cache for next time. @@ -1502,10 +1515,10 @@ Compile::AliasType* Compile::alias_type(ciField* field) { const TypeOopPtr* t; if (field->is_static()) - t = TypeKlassPtr::make(field->holder()); + t = TypeInstPtr::make(field->holder()->java_mirror()); else t = TypeOopPtr::make_from_klass_raw(field->holder()); - AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes())); + AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field); assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct"); return atp; } @@ -1522,7 +1535,7 @@ if (adr_type == NULL) return true; if (adr_type == TypePtr::BOTTOM) return true; - return find_alias_type(adr_type, true) != NULL; + return find_alias_type(adr_type, true, NULL) != NULL; } //-----------------------------must_alias-------------------------------------- @@ -1619,7 +1632,6 @@ igvn.replace_node(n, n->in(1)); } assert(predicate_count()==0, "should be clean!"); - igvn.optimize(); } //------------------------------Optimize--------------------------------------- @@ -1676,7 +1688,7 @@ if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) { { TracePhase t2("idealLoop", &_t_idealLoop, true); - PhaseIdealLoop ideal_loop( igvn, true, UseLoopPredicate); + PhaseIdealLoop ideal_loop( igvn, true ); loop_opts_cnt--; if (major_progress()) print_method("PhaseIdealLoop 1", 2); if (failing()) return; @@ -1684,7 +1696,7 @@ // Loop opts pass if partial peeling occurred in previous pass if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) { TracePhase t3("idealLoop", &_t_idealLoop, true); - PhaseIdealLoop ideal_loop( igvn, false, UseLoopPredicate); + PhaseIdealLoop ideal_loop( igvn, false ); loop_opts_cnt--; if (major_progress()) print_method("PhaseIdealLoop 2", 2); if (failing()) return; @@ -1692,7 +1704,7 @@ // Loop opts pass for loop-unrolling before CCP if(major_progress() && (loop_opts_cnt > 0)) { TracePhase t4("idealLoop", &_t_idealLoop, true); - PhaseIdealLoop ideal_loop( igvn, false, UseLoopPredicate); + PhaseIdealLoop ideal_loop( igvn, false ); loop_opts_cnt--; if (major_progress()) print_method("PhaseIdealLoop 3", 2); } @@ -1730,21 +1742,13 @@ // peeling, unrolling, etc. if(loop_opts_cnt > 0) { debug_only( int cnt = 0; ); - bool loop_predication = UseLoopPredicate; while(major_progress() && (loop_opts_cnt > 0)) { TracePhase t2("idealLoop", &_t_idealLoop, true); assert( cnt++ < 40, "infinite cycle in loop optimization" ); - PhaseIdealLoop ideal_loop( igvn, true, loop_predication); + PhaseIdealLoop ideal_loop( igvn, true); loop_opts_cnt--; if (major_progress()) print_method("PhaseIdealLoop iterations", 2); if (failing()) return; - // Perform loop predication optimization during first iteration after CCP. - // After that switch it off and cleanup unused loop predicates. - if (loop_predication) { - loop_predication = false; - cleanup_loop_predicates(igvn); - if (failing()) return; - } } } @@ -2531,6 +2535,36 @@ frc.inc_inner_loop_count(); } break; + case Op_LShiftI: + case Op_RShiftI: + case Op_URShiftI: + case Op_LShiftL: + case Op_RShiftL: + case Op_URShiftL: + if (Matcher::need_masked_shift_count) { + // The cpu's shift instructions don't restrict the count to the + // lower 5/6 bits. We need to do the masking ourselves. + Node* in2 = n->in(2); + juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1); + const TypeInt* t = in2->find_int_type(); + if (t != NULL && t->is_con()) { + juint shift = t->get_con(); + if (shift > mask) { // Unsigned cmp + Compile* C = Compile::current(); + n->set_req(2, ConNode::make(C, TypeInt::make(shift & mask))); + } + } else { + if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) { + Compile* C = Compile::current(); + Node* shift = new (C, 3) AndINode(in2, ConNode::make(C, TypeInt::make(mask))); + n->set_req(2, shift); + } + } + if (in2->outcnt() == 0) { // Remove dead node + in2->disconnect_inputs(NULL); + } + } + break; default: assert( !n->is_Call(), "" ); assert( !n->is_Mem(), "" );
--- a/src/share/vm/opto/compile.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/compile.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -489,6 +489,9 @@ // remove the opaque nodes that protect the predicates so that the unused checks and // uncommon traps will be eliminated from the graph. void cleanup_loop_predicates(PhaseIterGVN &igvn); + bool is_predicate_opaq(Node * n) { + return _predicate_opaqs->contains(n); + } // Compilation environment. Arena* comp_arena() { return &_comp_arena; } @@ -596,7 +599,7 @@ } AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; } - AliasType* alias_type(const TypePtr* adr_type) { return find_alias_type(adr_type, false); } + AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); } bool have_alias_type(const TypePtr* adr_type); AliasType* alias_type(ciField* field); @@ -835,7 +838,7 @@ void grow_alias_types(); AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type); const TypePtr *flatten_alias_type(const TypePtr* adr_type) const; - AliasType* find_alias_type(const TypePtr* adr_type, bool no_create); + AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field); void verify_top(Node*) const PRODUCT_RETURN;
--- a/src/share/vm/opto/doCall.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/doCall.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "ci/ciCallSite.hpp" #include "ci/ciMethodHandle.hpp" #include "classfile/vmSymbols.hpp" +#include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" #include "interpreter/linkResolver.hpp" #include "opto/addnode.hpp" @@ -43,17 +44,17 @@ #ifndef PRODUCT void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) { if (TraceTypeProfile || PrintInlining || PrintOptoInlining) { - tty->print(" "); - for( int i = 0; i < depth; i++ ) tty->print(" "); - if (!PrintOpto) { - method->print_short_name(); - tty->print(" ->"); + if (!PrintInlining) { + if (!PrintOpto && !PrintCompilation) { + method->print_short_name(); + tty->cr(); + } + CompileTask::print_inlining(prof_method, depth, bci); } - tty->print(" @ %d ", bci); - prof_method->print_short_name(); - tty->print(" >>TypeProfile (%d/%d counts) = ", receiver_count, site_count); + CompileTask::print_inline_indent(depth); + tty->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count); prof_klass->name()->print_symbol(); - tty->print_cr(" (%d bytes)", prof_method->code_size()); + tty->cr(); } } #endif @@ -62,6 +63,7 @@ JVMState* jvms, bool allow_inline, float prof_factor) { CallGenerator* cg; + guarantee(call_method != NULL, "failed method resolution"); // Dtrace currently doesn't work unless all calls are vanilla if (env()->dtrace_method_probes()) { @@ -129,8 +131,9 @@ // Get an adapter for the MethodHandle. ciMethod* target_method = method_handle->get_method_handle_adapter(); - - CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); + CallGenerator* hit_cg = NULL; + if (target_method != NULL) + hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); if (hit_cg != NULL && hit_cg->is_inline()) return hit_cg; } @@ -151,8 +154,9 @@ // Get an adapter for the MethodHandle. ciMethod* target_method = method_handle->get_invokedynamic_adapter(); - - CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); + CallGenerator* hit_cg = NULL; + if (target_method != NULL) + hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor); if (hit_cg != NULL && hit_cg->is_inline()) { CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method); return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor); @@ -269,13 +273,13 @@ } if (miss_cg != NULL) { if (next_hit_cg != NULL) { - NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1))); + NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1))); // We don't need to record dependency on a receiver here and below. // Whenever we inline, the dependency is added by Parse::Parse(). miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX); } if (miss_cg != NULL) { - NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count)); + NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count)); cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0)); if (cg != NULL) return cg; }
--- a/src/share/vm/opto/escape.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/escape.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -594,7 +594,7 @@ // // Create a new version of orig_phi if necessary. Returns either the newly -// created phi or an existing phi. Sets create_new to indicate wheter a new +// created phi or an existing phi. Sets create_new to indicate whether a new // phi was created. Cache the last newly created phi in the node map. // PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn, bool &new_created) { @@ -649,7 +649,7 @@ } // -// Return a new version of Memory Phi "orig_phi" with the inputs having the +// Return a new version of Memory Phi "orig_phi" with the inputs having the // specified alias index. // PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn) { @@ -828,11 +828,15 @@ break; // hit one of our sentinels if (result->is_Mem()) { const Type *at = phase->type(result->in(MemNode::Address)); - if (at != Type::TOP) { - assert (at->isa_ptr() != NULL, "pointer type required."); - int idx = C->get_alias_index(at->is_ptr()); - if (idx == alias_idx) - break; + if (at == Type::TOP) + break; // Dead + assert (at->isa_ptr() != NULL, "pointer type required."); + int idx = C->get_alias_index(at->is_ptr()); + if (idx == alias_idx) + break; // Found + if (!is_instance && (at->isa_oopptr() == NULL || + !at->is_oopptr()->is_known_instance())) { + break; // Do not skip store to general memory slice. } result = result->in(MemNode::Memory); } @@ -902,13 +906,13 @@ PhiNode *mphi = result->as_Phi(); assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); const TypePtr *t = mphi->adr_type(); - if (C->get_alias_index(t) != alias_idx) { - // Create a new Phi with the specified alias index type. - result = split_memory_phi(mphi, alias_idx, orig_phis, phase); - } else if (!is_instance) { + if (!is_instance) { // Push all non-instance Phis on the orig_phis worklist to update inputs // during Phase 4 if needed. orig_phis.append_if_missing(mphi); + } else if (C->get_alias_index(t) != alias_idx) { + // Create a new Phi with the specified alias index type. + result = split_memory_phi(mphi, alias_idx, orig_phis, phase); } } // the result is either MemNode, PhiNode, InitializeNode.
--- a/src/share/vm/opto/gcm.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/gcm.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/graphKit.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/graphKit.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -3338,14 +3338,62 @@ return NULL; } +//----------------------------- loop predicates --------------------------- + +//------------------------------add_predicate_impl---------------------------- +void GraphKit::add_predicate_impl(Deoptimization::DeoptReason reason, int nargs) { + // Too many traps seen? + if (too_many_traps(reason)) { +#ifdef ASSERT + if (TraceLoopPredicate) { + int tc = C->trap_count(reason); + tty->print("too many traps=%s tcount=%d in ", + Deoptimization::trap_reason_name(reason), tc); + method()->print(); // which method has too many predicate traps + tty->cr(); + } +#endif + // We cannot afford to take more traps here, + // do not generate predicate. + return; + } + + Node *cont = _gvn.intcon(1); + Node* opq = _gvn.transform(new (C, 2) Opaque1Node(C, cont)); + Node *bol = _gvn.transform(new (C, 2) Conv2BNode(opq)); + IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN); + Node* iffalse = _gvn.transform(new (C, 1) IfFalseNode(iff)); + C->add_predicate_opaq(opq); + { + PreserveJVMState pjvms(this); + set_control(iffalse); + _sp += nargs; + uncommon_trap(reason, Deoptimization::Action_maybe_recompile); + } + Node* iftrue = _gvn.transform(new (C, 1) IfTrueNode(iff)); + set_control(iftrue); +} + +//------------------------------add_predicate--------------------------------- +void GraphKit::add_predicate(int nargs) { + if (UseLoopPredicate) { + add_predicate_impl(Deoptimization::Reason_predicate, nargs); + } +} + //----------------------------- store barriers ---------------------------- #define __ ideal. void GraphKit::sync_kit(IdealKit& ideal) { + set_all_memory(__ merged_memory()); + set_i_o(__ i_o()); + set_control(__ ctrl()); +} + +void GraphKit::final_sync(IdealKit& ideal) { // Final sync IdealKit and graphKit. __ drain_delay_transform(); - set_all_memory(__ merged_memory()); - set_control(__ ctrl()); + sync_kit(ideal); } // vanilla/CMS post barrier @@ -3392,7 +3440,7 @@ // (Else it's an array (or unknown), and we want more precise card marks.) assert(adr != NULL, ""); - IdealKit ideal(gvn(), control(), merged_memory(), true); + IdealKit ideal(this, true); // Convert the pointer to an int prior to doing math on it Node* cast = __ CastPX(__ ctrl(), adr); @@ -3418,7 +3466,7 @@ } // Final sync IdealKit and GraphKit. - sync_kit(ideal); + final_sync(ideal); } // G1 pre/post barriers @@ -3428,7 +3476,7 @@ Node* val, const TypeOopPtr* val_type, BasicType bt) { - IdealKit ideal(gvn(), control(), merged_memory(), true); + IdealKit ideal(this, true); Node* tls = __ thread(); // ThreadLocalStorage @@ -3505,7 +3553,7 @@ } __ end_if(); // (!marking) // Final sync IdealKit and GraphKit. - sync_kit(ideal); + final_sync(ideal); } // @@ -3571,7 +3619,7 @@ // (Else it's an array (or unknown), and we want more precise card marks.) assert(adr != NULL, ""); - IdealKit ideal(gvn(), control(), merged_memory(), true); + IdealKit ideal(this, true); Node* tls = __ thread(); // ThreadLocalStorage @@ -3645,6 +3693,6 @@ } // Final sync IdealKit and GraphKit. - sync_kit(ideal); + final_sync(ideal); } #undef __
--- a/src/share/vm/opto/graphKit.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/graphKit.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -662,7 +662,9 @@ && Universe::heap()->can_elide_tlab_store_barriers()); } + // Sync Ideal and Graph kits. void sync_kit(IdealKit& ideal); + void final_sync(IdealKit& ideal); // vanilla/CMS post barrier void write_barrier_post(Node *store, Node* obj, @@ -793,6 +795,10 @@ if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later return iff; } + + // Insert a loop predicate into the graph + void add_predicate(int nargs = 0); + void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs); }; // Helper class to support building of control flow branches. Upon
--- a/src/share/vm/opto/idealGraphPrinter.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/idealGraphPrinter.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -599,11 +599,35 @@ if (caller != NULL) { stringStream bciStream; + ciMethod* last = NULL; + int last_bci; while(caller) { + if (caller->has_method()) { + last = caller->method(); + last_bci = caller->bci(); + } bciStream.print("%d ", caller->bci()); caller = caller->caller(); } print_prop("bci", bciStream.as_string()); + if (last != NULL && last->has_linenumber_table() && last_bci >= 0) { + print_prop("line", last->line_number_from_bci(last_bci)); + } + } + + if (node->debug_orig() != NULL) { + stringStream dorigStream; + Node* dorig = node->debug_orig(); + if (dorig) { + dorigStream.print("%d ", dorig->_idx); + Node* first = dorig; + dorig = first->debug_orig(); + while (dorig && dorig != first) { + dorigStream.print("%d ", dorig->_idx); + dorig = dorig->debug_orig(); + } + } + print_prop("debug_orig", dorigStream.as_string()); } if (_chaitin && _chaitin != (PhaseChaitin *)0xdeadbeef) { @@ -628,6 +652,17 @@ GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL); nodeStack.push(start); visited.test_set(start->_idx); + if (C->cfg() != NULL) { + // once we have a CFG there are some nodes that aren't really + // reachable but are in the CFG so add them here. + for (uint i = 0; i < C->cfg()->_blocks.size(); i++) { + Block *b = C->cfg()->_blocks[i]; + for (uint s = 0; s < b->_nodes.size(); s++) { + nodeStack.push(b->_nodes[s]); + } + } + } + while(nodeStack.length() > 0) { Node *n = nodeStack.pop(); @@ -686,16 +721,23 @@ end_head(); head(SUCCESSORS_ELEMENT); - for (uint s = 0; s < C->cfg()->_blocks[i]->_num_succs; s++) { + for (uint s = 0; s < b->_num_succs; s++) { begin_elem(SUCCESSOR_ELEMENT); print_attr(BLOCK_NAME_PROPERTY, b->_succs[s]->_pre_order); end_elem(); } tail(SUCCESSORS_ELEMENT); + head(NODES_ELEMENT); + for (uint s = 0; s < b->_nodes.size(); s++) { + begin_elem(NODE_ELEMENT); + print_attr(NODE_ID_PROPERTY, get_node_id(b->_nodes[s])); + end_elem(); + } + tail(NODES_ELEMENT); + tail(BLOCK_ELEMENT); } - tail(CONTROL_FLOW_ELEMENT); } tail(GRAPH_ELEMENT);
--- a/src/share/vm/opto/idealGraphPrinter.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/idealGraphPrinter.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,15 +45,6 @@ { private: - enum State - { - Invalid, - Valid, - New - }; - -private: - static const char *INDENT; static const char *TOP_ELEMENT; static const char *GROUP_ELEMENT;
--- a/src/share/vm/opto/idealKit.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/idealKit.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,15 +38,16 @@ const uint IdealKit::first_var = TypeFunc::Parms + 1; //----------------------------IdealKit----------------------------------------- -IdealKit::IdealKit(PhaseGVN &gvn, Node* control, Node* mem, bool delay_all_transforms, bool has_declarations) : - _gvn(gvn), C(gvn.C) { - _initial_ctrl = control; - _initial_memory = mem; +IdealKit::IdealKit(GraphKit* gkit, bool delay_all_transforms, bool has_declarations) : + _gvn(gkit->gvn()), C(gkit->C) { + _initial_ctrl = gkit->control(); + _initial_memory = gkit->merged_memory(); + _initial_i_o = gkit->i_o(); _delay_all_transforms = delay_all_transforms; _var_ct = 0; _cvstate = NULL; // We can go memory state free or else we need the entire memory state - assert(mem == NULL || mem->Opcode() == Op_MergeMem, "memory must be pre-split"); + assert(_initial_memory == NULL || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split"); int init_size = 5; _pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0); _delay_transform = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0); @@ -56,6 +57,13 @@ } } +//----------------------------sync_kit----------------------------------------- +void IdealKit::sync_kit(GraphKit* gkit) { + set_all_memory(gkit->merged_memory()); + set_i_o(gkit->i_o()); + set_ctrl(gkit->control()); +} + //-------------------------------if_then------------------------------------- // Create: if(left relop right) // / \ @@ -154,8 +162,16 @@ // // Pushes the loop top cvstate first, then the else (loop exit) cvstate // onto the stack. -void IdealKit::loop(IdealVariable& iv, Node* init, BoolTest::mask relop, Node* limit, float prob, float cnt) { +void IdealKit::loop(GraphKit* gkit, int nargs, IdealVariable& iv, Node* init, BoolTest::mask relop, Node* limit, float prob, float cnt) { assert((state() & (BlockS|LoopS|IfThenS|ElseS)), "bad state for new loop"); + if (UseLoopPredicate) { + // Sync IdealKit and graphKit. + gkit->sync_kit(*this); + // Add loop predicate. + gkit->add_predicate(nargs); + // Update IdealKit memory. + sync_kit(gkit); + } set(iv, init); Node* head = make_label(1); bind(head); @@ -270,6 +286,7 @@ _cvstate = new_cvstate(); // initialize current cvstate set_ctrl(_initial_ctrl); // initialize control in current cvstate set_all_memory(_initial_memory);// initialize memory in current cvstate + set_i_o(_initial_i_o); // initialize i_o in current cvstate DEBUG_ONLY(_state->push(BlockS)); } @@ -411,6 +428,9 @@ // Get the region for the join state Node* join_region = join->in(TypeFunc::Control); assert(join_region != NULL, "join region must exist"); + if (join->in(TypeFunc::I_O) == NULL ) { + join->set_req(TypeFunc::I_O, merging->in(TypeFunc::I_O)); + } if (join->in(TypeFunc::Memory) == NULL ) { join->set_req(TypeFunc::Memory, merging->in(TypeFunc::Memory)); return; @@ -457,6 +477,20 @@ mms.set_memory(phi); } } + + Node* join_io = join->in(TypeFunc::I_O); + Node* merging_io = merging->in(TypeFunc::I_O); + if (join_io != merging_io) { + PhiNode* phi; + if (join_io->is_Phi() && join_io->as_Phi()->region() == join_region) { + phi = join_io->as_Phi(); + } else { + phi = PhiNode::make(join_region, join_io, Type::ABIO); + phi = (PhiNode*) delay_transform(phi); + join->set_req(TypeFunc::I_O, phi); + } + phi->set_req(slot, merging_io); + } } @@ -467,7 +501,8 @@ const char *leaf_name, Node* parm0, Node* parm1, - Node* parm2) { + Node* parm2, + Node* parm3) { // We only handle taking in RawMem and modifying RawMem const TypePtr* adr_type = TypeRawPtr::BOTTOM; @@ -488,6 +523,7 @@ if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0); if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1); if (parm2 != NULL) call->init_req(TypeFunc::Parms+2, parm2); + if (parm3 != NULL) call->init_req(TypeFunc::Parms+3, parm3); // Node *c = _gvn.transform(call); call = (CallNode *) _gvn.transform(call); @@ -506,3 +542,51 @@ assert(C->alias_type(call->adr_type()) == C->alias_type(adr_type), "call node must be constructed correctly"); } + + +void IdealKit::make_leaf_call_no_fp(const TypeFunc *slow_call_type, + address slow_call, + const char *leaf_name, + const TypePtr* adr_type, + Node* parm0, + Node* parm1, + Node* parm2, + Node* parm3) { + + // We only handle taking in RawMem and modifying RawMem + uint adr_idx = C->get_alias_index(adr_type); + + // Slow-path leaf call + int size = slow_call_type->domain()->cnt(); + CallNode *call = (CallNode*)new (C, size) CallLeafNoFPNode( slow_call_type, slow_call, leaf_name, adr_type); + + // Set fixed predefined input arguments + call->init_req( TypeFunc::Control, ctrl() ); + call->init_req( TypeFunc::I_O , top() ) ; // does no i/o + // Narrow memory as only memory input + call->init_req( TypeFunc::Memory , memory(adr_idx)); + call->init_req( TypeFunc::FramePtr, top() /* frameptr() */ ); + call->init_req( TypeFunc::ReturnAdr, top() ); + + if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0); + if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1); + if (parm2 != NULL) call->init_req(TypeFunc::Parms+2, parm2); + if (parm3 != NULL) call->init_req(TypeFunc::Parms+3, parm3); + + // Node *c = _gvn.transform(call); + call = (CallNode *) _gvn.transform(call); + Node *c = call; // dbx gets confused with call call->dump() + + // Slow leaf call has no side-effects, sets few values + + set_ctrl(transform( new (C, 1) ProjNode(call,TypeFunc::Control) )); + + // Make memory for the call + Node* mem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) ); + + // Set the RawPtr memory state only. + set_memory(mem, adr_idx); + + assert(C->alias_type(call->adr_type()) == C->alias_type(adr_type), + "call node must be constructed correctly"); +}
--- a/src/share/vm/opto/idealKit.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/idealKit.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ #include "opto/cfgnode.hpp" #include "opto/connode.hpp" #include "opto/divnode.hpp" +#include "opto/graphKit.hpp" #include "opto/mulnode.hpp" #include "opto/phaseX.hpp" #include "opto/subnode.hpp" @@ -107,6 +108,7 @@ bool _delay_all_transforms; // flag forcing all transforms to be delayed Node* _initial_ctrl; // saves initial control until variables declared Node* _initial_memory; // saves initial memory until variables declared + Node* _initial_i_o; // saves initial i_o until variables declared PhaseGVN& gvn() const { return _gvn; } // Create a new cvstate filled with nulls @@ -141,17 +143,21 @@ Node* memory(uint alias_idx); public: - IdealKit(PhaseGVN &gvn, Node* control, Node* memory, bool delay_all_transforms = false, bool has_declarations = false); + IdealKit(GraphKit* gkit, bool delay_all_transforms = false, bool has_declarations = false); ~IdealKit() { stop(); drain_delay_transform(); } + void sync_kit(GraphKit* gkit); + // Control Node* ctrl() { return _cvstate->in(TypeFunc::Control); } void set_ctrl(Node* ctrl) { _cvstate->set_req(TypeFunc::Control, ctrl); } Node* top() { return C->top(); } MergeMemNode* merged_memory() { return _cvstate->in(TypeFunc::Memory)->as_MergeMem(); } void set_all_memory(Node* mem) { _cvstate->set_req(TypeFunc::Memory, mem); } + Node* i_o() { return _cvstate->in(TypeFunc::I_O); } + void set_i_o(Node* c) { _cvstate->set_req(TypeFunc::I_O, c); } void set(IdealVariable& v, Node* rhs) { _cvstate->set_req(first_var + v.id(), rhs); } Node* value(IdealVariable& v) { return _cvstate->in(first_var + v.id()); } void dead(IdealVariable& v) { set(v, (Node*)NULL); } @@ -160,7 +166,7 @@ bool push_new_state = true); void else_(); void end_if(); - void loop(IdealVariable& iv, Node* init, BoolTest::mask cmp, Node* limit, + void loop(GraphKit* gkit, int nargs, IdealVariable& iv, Node* init, BoolTest::mask cmp, Node* limit, float prob = PROB_LIKELY(0.9), float cnt = COUNT_UNKNOWN); void end_loop(); Node* make_label(int goto_ct); @@ -238,7 +244,18 @@ const char *leaf_name, Node* parm0, Node* parm1 = NULL, - Node* parm2 = NULL); + Node* parm2 = NULL, + Node* parm3 = NULL); + + void make_leaf_call_no_fp(const TypeFunc *slow_call_type, + address slow_call, + const char *leaf_name, + const TypePtr* adr_type, + Node* parm0, + Node* parm1, + Node* parm2, + Node* parm3); + }; #endif // SHARE_VM_OPTO_IDEALKIT_HPP
--- a/src/share/vm/opto/ifnode.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/ifnode.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -27,6 +27,7 @@ #include "opto/addnode.hpp" #include "opto/cfgnode.hpp" #include "opto/connode.hpp" +#include "opto/loopnode.hpp" #include "opto/phaseX.hpp" #include "opto/runtime.hpp" #include "opto/subnode.hpp" @@ -222,22 +223,35 @@ // Make a region merging constants and a region merging the rest uint req_c = 0; + Node* predicate_proj = NULL; for (uint ii = 1; ii < r->req(); ii++) { - if( phi->in(ii) == con1 ) { + if (phi->in(ii) == con1) { req_c++; } + Node* proj = PhaseIdealLoop::find_predicate(r->in(ii)); + if (proj != NULL) { + assert(predicate_proj == NULL, "only one predicate entry expected"); + predicate_proj = proj; + } } + Node* predicate_c = NULL; + Node* predicate_x = NULL; + Node *region_c = new (igvn->C, req_c + 1) RegionNode(req_c + 1); Node *phi_c = con1; uint len = r->req(); - Node *region_x = new (igvn->C, len - req_c + 1) RegionNode(len - req_c + 1); + Node *region_x = new (igvn->C, len - req_c) RegionNode(len - req_c); Node *phi_x = PhiNode::make_blank(region_x, phi); for (uint i = 1, i_c = 1, i_x = 1; i < len; i++) { - if( phi->in(i) == con1 ) { + if (phi->in(i) == con1) { region_c->init_req( i_c++, r ->in(i) ); + if (r->in(i) == predicate_proj) + predicate_c = predicate_proj; } else { region_x->init_req( i_x, r ->in(i) ); phi_x ->init_req( i_x++, phi->in(i) ); + if (r->in(i) == predicate_proj) + predicate_x = predicate_proj; } } @@ -277,8 +291,20 @@ // Make the true/false arms Node *iff_c_t = phase->transform(new (igvn->C, 1) IfTrueNode (iff_c)); Node *iff_c_f = phase->transform(new (igvn->C, 1) IfFalseNode(iff_c)); + if (predicate_c != NULL) { + assert(predicate_x == NULL, "only one predicate entry expected"); + // Clone loop predicates to each path + iff_c_t = igvn->clone_loop_predicates(predicate_c, iff_c_t); + iff_c_f = igvn->clone_loop_predicates(predicate_c, iff_c_f); + } Node *iff_x_t = phase->transform(new (igvn->C, 1) IfTrueNode (iff_x)); Node *iff_x_f = phase->transform(new (igvn->C, 1) IfFalseNode(iff_x)); + if (predicate_x != NULL) { + assert(predicate_c == NULL, "only one predicate entry expected"); + // Clone loop predicates to each path + iff_x_t = igvn->clone_loop_predicates(predicate_x, iff_x_t); + iff_x_f = igvn->clone_loop_predicates(predicate_x, iff_x_f); + } // Merge the TRUE paths Node *region_s = new (igvn->C, 3) RegionNode(3);
--- a/src/share/vm/opto/lcm.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/lcm.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,6 +42,9 @@ #ifdef TARGET_ARCH_MODEL_zero # include "adfiles/ad_zero.hpp" #endif +#ifdef TARGET_ARCH_MODEL_arm +# include "adfiles/ad_arm.hpp" +#endif // Optimization - Graph Style
--- a/src/share/vm/opto/library_call.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/library_call.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" +#include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" #include "oops/objArrayKlass.hpp" #include "opto/addnode.hpp" @@ -97,7 +98,7 @@ RegionNode* region); Node* generate_current_thread(Node* &tls_output); address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset, - bool disjoint_bases, const char* &name); + bool disjoint_bases, const char* &name, bool dest_uninitialized); Node* load_mirror_from_klass(Node* klass); Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null, int nargs, @@ -212,26 +213,26 @@ AllocateNode* alloc, Node* src, Node* src_offset, Node* dest, Node* dest_offset, - Node* dest_size); + Node* dest_size, bool dest_uninitialized); void generate_slow_arraycopy(const TypePtr* adr_type, Node* src, Node* src_offset, Node* dest, Node* dest_offset, - Node* copy_length); + Node* copy_length, bool dest_uninitialized); Node* generate_checkcast_arraycopy(const TypePtr* adr_type, Node* dest_elem_klass, Node* src, Node* src_offset, Node* dest, Node* dest_offset, - Node* copy_length); + Node* copy_length, bool dest_uninitialized); Node* generate_generic_arraycopy(const TypePtr* adr_type, Node* src, Node* src_offset, Node* dest, Node* dest_offset, - Node* copy_length); + Node* copy_length, bool dest_uninitialized); void generate_unchecked_arraycopy(const TypePtr* adr_type, BasicType basic_elem_type, bool disjoint_bases, Node* src, Node* src_offset, Node* dest, Node* dest_offset, - Node* copy_length); + Node* copy_length, bool dest_uninitialized); bool inline_unsafe_CAS(BasicType type); bool inline_unsafe_ordered_store(BasicType type); bool inline_fp_conversions(vmIntrinsics::ID id); @@ -388,11 +389,7 @@ #endif if (kit.try_to_inline()) { if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { - tty->print("Inlining intrinsic %s%s at bci:%d in", - vmIntrinsics::name_at(intrinsic_id()), - (is_virtual() ? " (virtual)" : ""), kit.bci()); - kit.caller()->print_short_name(tty); - tty->print_cr(" (%d bytes)", kit.caller()->code_size()); + CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); } C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); if (C->log()) { @@ -1101,6 +1098,8 @@ float likely = PROB_LIKELY(0.9); float unlikely = PROB_UNLIKELY(0.9); + const int nargs = 2; // number of arguments to push back for uncommon trap in predicate + const int value_offset = java_lang_String::value_offset_in_bytes(); const int count_offset = java_lang_String::count_offset_in_bytes(); const int offset_offset = java_lang_String::offset_offset_in_bytes(); @@ -1116,12 +1115,12 @@ Node* sourcea = basic_plus_adr(string_object, string_object, value_offset); Node* source = make_load(no_ctrl, sourcea, source_type, T_OBJECT, string_type->add_offset(value_offset)); - Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array)) ); + Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)) ); jint target_length = target_array->length(); const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin)); const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot); - IdealKit kit(gvn(), control(), merged_memory(), false, true); + IdealKit kit(this, false, true); #define __ kit. Node* zero = __ ConI(0); Node* one = __ ConI(1); @@ -1138,12 +1137,12 @@ Node* return_ = __ make_label(1); __ set(rtn,__ ConI(-1)); - __ loop(i, sourceOffset, BoolTest::lt, sourceEnd); { + __ loop(this, nargs, i, sourceOffset, BoolTest::lt, sourceEnd); { Node* i2 = __ AddI(__ value(i), targetCountLess1); // pin to prohibit loading of "next iteration" value which may SEGV (rare) Node* src = load_array_element(__ ctrl(), source, i2, TypeAryPtr::CHARS); __ if_then(src, BoolTest::eq, lastChar, unlikely); { - __ loop(j, zero, BoolTest::lt, targetCountLess1); { + __ loop(this, nargs, j, zero, BoolTest::lt, targetCountLess1); { Node* tpj = __ AddI(targetOffset, __ value(j)); Node* targ = load_array_element(no_ctrl, target, tpj, target_type); Node* ipj = __ AddI(__ value(i), __ value(j)); @@ -1172,7 +1171,7 @@ __ bind(return_); // Final sync IdealKit and GraphKit. - sync_kit(kit); + final_sync(kit); Node* result = __ value(rtn); #undef __ C->set_has_loops(true); @@ -1193,7 +1192,7 @@ Node* result; // Disable the use of pcmpestri until it can be guaranteed that // the load doesn't cross into the uncommited space. - if (false && Matcher::has_match_rule(Op_StrIndexOf) && + if (Matcher::has_match_rule(Op_StrIndexOf) && UseSSE42Intrinsics) { // Generate SSE4.2 version of indexOf // We currently only have match rules that use SSE4.2 @@ -1211,14 +1210,14 @@ return true; } + ciInstanceKlass* str_klass = env()->String_klass(); + const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(str_klass); + // Make the merge point - RegionNode* result_rgn = new (C, 3) RegionNode(3); - Node* result_phi = new (C, 3) PhiNode(result_rgn, TypeInt::INT); + RegionNode* result_rgn = new (C, 4) RegionNode(4); + Node* result_phi = new (C, 4) PhiNode(result_rgn, TypeInt::INT); Node* no_ctrl = NULL; - ciInstanceKlass* klass = env()->String_klass(); - const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass); - // Get counts for string and substr Node* source_cnta = basic_plus_adr(receiver, receiver, count_offset); Node* source_cnt = make_load(no_ctrl, source_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset)); @@ -1236,6 +1235,17 @@ } if (!stopped()) { + // Check for substr count == 0 + cmp = _gvn.transform( new(C, 3) CmpINode(substr_cnt, intcon(0)) ); + bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::eq) ); + Node* if_zero = generate_slow_guard(bol, NULL); + if (if_zero != NULL) { + result_phi->init_req(3, intcon(0)); + result_rgn->init_req(3, if_zero); + } + } + + if (!stopped()) { result = make_string_method_node(Op_StrIndexOf, receiver, source_cnt, argument, substr_cnt); result_phi->init_req(1, result); result_rgn->init_req(1, control()); @@ -1244,8 +1254,8 @@ record_for_igvn(result_rgn); result = _gvn.transform(result_phi); - } else { //Use LibraryCallKit::string_indexOf - // don't intrinsify is argument isn't a constant string. + } else { // Use LibraryCallKit::string_indexOf + // don't intrinsify if argument isn't a constant string. if (!argument->is_Con()) { return false; } @@ -1281,7 +1291,7 @@ // No null check on the argument is needed since it's a constant String oop. _sp -= 2; if (stopped()) { - return true; + return true; } // The null string as a pattern always returns 0 (match at beginning of string) @@ -2308,22 +2318,20 @@ // of it. So we need to emit code to conditionally do the proper type of // store. - IdealKit ideal(gvn(), control(), merged_memory()); + IdealKit ideal(this); #define __ ideal. // QQQ who knows what probability is here?? __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); { // Sync IdealKit and graphKit. - set_all_memory( __ merged_memory()); - set_control(__ ctrl()); + sync_kit(ideal); Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type); // Update IdealKit memory. - __ set_all_memory(merged_memory()); - __ set_ctrl(control()); + __ sync_kit(this); } __ else_(); { __ store(__ ctrl(), adr, val, type, alias_type->index(), is_volatile); } __ end_if(); // Final sync IdealKit and GraphKit. - sync_kit(ideal); + final_sync(ideal); #undef __ } } @@ -4081,7 +4089,8 @@ const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; bool disjoint_bases = true; generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases, - src, NULL, dest, NULL, countx); + src, NULL, dest, NULL, countx, + /*dest_uninitialized*/true); // If necessary, emit some card marks afterwards. (Non-arrays only.) if (card_mark) { @@ -4283,82 +4292,13 @@ return true; } - -// constants for computing the copy function -enum { - COPYFUNC_UNALIGNED = 0, - COPYFUNC_ALIGNED = 1, // src, dest aligned to HeapWordSize - COPYFUNC_CONJOINT = 0, - COPYFUNC_DISJOINT = 2 // src != dest, or transfer can descend -}; - -// Note: The condition "disjoint" applies also for overlapping copies -// where an descending copy is permitted (i.e., dest_offset <= src_offset). -static address -select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name) { - int selector = - (aligned ? COPYFUNC_ALIGNED : COPYFUNC_UNALIGNED) + - (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT); - -#define RETURN_STUB(xxx_arraycopy) { \ - name = #xxx_arraycopy; \ - return StubRoutines::xxx_arraycopy(); } - - switch (t) { - case T_BYTE: - case T_BOOLEAN: - switch (selector) { - case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_arraycopy); - case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_disjoint_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_disjoint_arraycopy); - } - case T_CHAR: - case T_SHORT: - switch (selector) { - case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_arraycopy); - case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_disjoint_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_disjoint_arraycopy); - } - case T_INT: - case T_FLOAT: - switch (selector) { - case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_arraycopy); - case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_disjoint_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_disjoint_arraycopy); - } - case T_DOUBLE: - case T_LONG: - switch (selector) { - case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_arraycopy); - case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_disjoint_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_disjoint_arraycopy); - } - case T_ARRAY: - case T_OBJECT: - switch (selector) { - case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(oop_arraycopy); - case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_oop_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(oop_disjoint_arraycopy); - case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_oop_disjoint_arraycopy); - } - default: - ShouldNotReachHere(); - return NULL; - } - -#undef RETURN_STUB -} - //------------------------------basictype2arraycopy---------------------------- address LibraryCallKit::basictype2arraycopy(BasicType t, Node* src_offset, Node* dest_offset, bool disjoint_bases, - const char* &name) { + const char* &name, + bool dest_uninitialized) { const TypeInt* src_offset_inttype = gvn().find_int_type(src_offset);; const TypeInt* dest_offset_inttype = gvn().find_int_type(dest_offset);; @@ -4384,7 +4324,7 @@ disjoint = true; } - return select_arraycopy_function(t, aligned, disjoint, name); + return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized); } @@ -4440,7 +4380,8 @@ // The component types are not the same or are not recognized. Punt. // (But, avoid the native method wrapper to JVM_ArrayCopy.) generate_slow_arraycopy(TypePtr::BOTTOM, - src, src_offset, dest, dest_offset, length); + src, src_offset, dest, dest_offset, length, + /*dest_uninitialized*/false); return true; } @@ -4553,7 +4494,7 @@ Node* original_dest = dest; AllocateArrayNode* alloc = NULL; // used for zeroing, if needed - bool must_clear_dest = false; + bool dest_uninitialized = false; // See if this is the initialization of a newly-allocated array. // If so, we will take responsibility here for initializing it to zero. @@ -4576,12 +4517,14 @@ adr_type = TypeRawPtr::BOTTOM; // all initializations are into raw memory // From this point on, every exit path is responsible for // initializing any non-copied parts of the object to zero. - must_clear_dest = true; + // Also, if this flag is set we make sure that arraycopy interacts properly + // with G1, eliding pre-barriers. See CR 6627983. + dest_uninitialized = true; } else { // No zeroing elimination here. alloc = NULL; //original_dest = dest; - //must_clear_dest = false; + //dest_uninitialized = false; } // Results are placed here: @@ -4613,10 +4556,10 @@ Node* checked_value = NULL; if (basic_elem_type == T_CONFLICT) { - assert(!must_clear_dest, ""); + assert(!dest_uninitialized, ""); Node* cv = generate_generic_arraycopy(adr_type, src, src_offset, dest, dest_offset, - copy_length); + copy_length, dest_uninitialized); if (cv == NULL) cv = intcon(-1); // failure (no stub available) checked_control = control(); checked_i_o = i_o(); @@ -4636,7 +4579,7 @@ } // copy_length is 0. - if (!stopped() && must_clear_dest) { + if (!stopped() && dest_uninitialized) { Node* dest_length = alloc->in(AllocateNode::ALength); if (_gvn.eqv_uncast(copy_length, dest_length) || _gvn.find_int_con(dest_length, 1) <= 0) { @@ -4662,7 +4605,7 @@ result_memory->init_req(zero_path, memory(adr_type)); } - if (!stopped() && must_clear_dest) { + if (!stopped() && dest_uninitialized) { // We have to initialize the *uncopied* part of the array to zero. // The copy destination is the slice dest[off..off+len]. The other slices // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length]. @@ -4698,7 +4641,7 @@ { PreserveJVMState pjvms(this); didit = generate_block_arraycopy(adr_type, basic_elem_type, alloc, src, src_offset, dest, dest_offset, - dest_size); + dest_size, dest_uninitialized); if (didit) { // Present the results of the block-copying fast call. result_region->init_req(bcopy_path, control()); @@ -4774,7 +4717,7 @@ Node* cv = generate_checkcast_arraycopy(adr_type, dest_elem_klass, src, src_offset, dest, dest_offset, - ConvI2X(copy_length)); + ConvI2X(copy_length), dest_uninitialized); if (cv == NULL) cv = intcon(-1); // failure (no stub available) checked_control = control(); checked_i_o = i_o(); @@ -4797,7 +4740,7 @@ PreserveJVMState pjvms(this); generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases, src, src_offset, dest, dest_offset, - ConvI2X(copy_length)); + ConvI2X(copy_length), dest_uninitialized); // Present the results of the fast call. result_region->init_req(fast_path, control()); @@ -4876,7 +4819,7 @@ set_memory(slow_mem, adr_type); set_i_o(slow_i_o); - if (must_clear_dest) { + if (dest_uninitialized) { generate_clear_array(adr_type, dest, basic_elem_type, intcon(0), NULL, alloc->in(AllocateNode::AllocSize)); @@ -4884,7 +4827,7 @@ generate_slow_arraycopy(adr_type, src, src_offset, dest, dest_offset, - copy_length); + copy_length, /*dest_uninitialized*/false); result_region->init_req(slow_call_path, control()); result_i_o ->init_req(slow_call_path, i_o()); @@ -5128,7 +5071,7 @@ AllocateNode* alloc, Node* src, Node* src_offset, Node* dest, Node* dest_offset, - Node* dest_size) { + Node* dest_size, bool dest_uninitialized) { // See if there is an advantage from block transfer. int scale = exact_log2(type2aelembytes(basic_elem_type)); if (scale >= LogBytesPerLong) @@ -5173,7 +5116,7 @@ bool disjoint_bases = true; // since alloc != NULL generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases, - sptr, NULL, dptr, NULL, countx); + sptr, NULL, dptr, NULL, countx, dest_uninitialized); return true; } @@ -5186,7 +5129,8 @@ LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type, Node* src, Node* src_offset, Node* dest, Node* dest_offset, - Node* copy_length) { + Node* copy_length, bool dest_uninitialized) { + assert(!dest_uninitialized, "Invariant"); Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON, OptoRuntime::slow_arraycopy_Type(), OptoRuntime::slow_arraycopy_Java(), @@ -5204,10 +5148,10 @@ Node* dest_elem_klass, Node* src, Node* src_offset, Node* dest, Node* dest_offset, - Node* copy_length) { + Node* copy_length, bool dest_uninitialized) { if (stopped()) return NULL; - address copyfunc_addr = StubRoutines::checkcast_arraycopy(); + address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized); if (copyfunc_addr == NULL) { // Stub was not generated, go slow path. return NULL; } @@ -5245,9 +5189,9 @@ LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type, Node* src, Node* src_offset, Node* dest, Node* dest_offset, - Node* copy_length) { + Node* copy_length, bool dest_uninitialized) { + assert(!dest_uninitialized, "Invariant"); if (stopped()) return NULL; - address copyfunc_addr = StubRoutines::generic_arraycopy(); if (copyfunc_addr == NULL) { // Stub was not generated, go slow path. return NULL; @@ -5268,7 +5212,7 @@ bool disjoint_bases, Node* src, Node* src_offset, Node* dest, Node* dest_offset, - Node* copy_length) { + Node* copy_length, bool dest_uninitialized) { if (stopped()) return; // nothing to do Node* src_start = src; @@ -5283,7 +5227,7 @@ const char* copyfunc_name = "arraycopy"; address copyfunc_addr = basictype2arraycopy(basic_elem_type, src_offset, dest_offset, - disjoint_bases, copyfunc_name); + disjoint_bases, copyfunc_name, dest_uninitialized); // Call it. Note that the count_ix value is not scaled to a byte-size. make_runtime_call(RC_LEAF|RC_NO_FP,
--- a/src/share/vm/opto/locknode.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/locknode.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/opto/loopPredicate.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,960 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "opto/loopnode.hpp" +#include "opto/addnode.hpp" +#include "opto/callnode.hpp" +#include "opto/connode.hpp" +#include "opto/loopnode.hpp" +#include "opto/mulnode.hpp" +#include "opto/rootnode.hpp" +#include "opto/subnode.hpp" + +/* + * The general idea of Loop Predication is to insert a predicate on the entry + * path to a loop, and raise a uncommon trap if the check of the condition fails. + * The condition checks are promoted from inside the loop body, and thus + * the checks inside the loop could be eliminated. Currently, loop predication + * optimization has been applied to remove array range check and loop invariant + * checks (such as null checks). +*/ + +//-------------------------------is_uncommon_trap_proj---------------------------- +// Return true if proj is the form of "proj->[region->..]call_uct" +bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason) { + int path_limit = 10; + assert(proj, "invalid argument"); + Node* out = proj; + for (int ct = 0; ct < path_limit; ct++) { + out = out->unique_ctrl_out(); + if (out == NULL) + return false; + if (out->is_CallStaticJava()) { + int req = out->as_CallStaticJava()->uncommon_trap_request(); + if (req != 0) { + Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); + if (trap_reason == reason || reason == Deoptimization::Reason_none) { + return true; + } + } + return false; // don't do further after call + } + if (out->Opcode() != Op_Region) + return false; + } + return false; +} + +//-------------------------------is_uncommon_trap_if_pattern------------------------- +// Return true for "if(test)-> proj -> ... +// | +// V +// other_proj->[region->..]call_uct" +// +// "must_reason_predicate" means the uct reason must be Reason_predicate +bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, Deoptimization::DeoptReason reason) { + Node *in0 = proj->in(0); + if (!in0->is_If()) return false; + // Variation of a dead If node. + if (in0->outcnt() < 2) return false; + IfNode* iff = in0->as_If(); + + // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate + if (reason != Deoptimization::Reason_none) { + if (iff->in(1)->Opcode() != Op_Conv2B || + iff->in(1)->in(1)->Opcode() != Op_Opaque1) { + return false; + } + } + + ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj(); + if (is_uncommon_trap_proj(other_proj, reason)) { + assert(reason == Deoptimization::Reason_none || + Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list"); + return true; + } + return false; +} + +//-------------------------------register_control------------------------- +void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred) { + assert(n->is_CFG(), "must be control node"); + _igvn.register_new_node_with_optimizer(n); + loop->_body.push(n); + set_loop(n, loop); + // When called from beautify_loops() idom is not constructed yet. + if (_idom != NULL) { + set_idom(n, pred, dom_depth(pred)); + } +} + +//------------------------------create_new_if_for_predicate------------------------ +// create a new if above the uct_if_pattern for the predicate to be promoted. +// +// before after +// ---------- ---------- +// ctrl ctrl +// | | +// | | +// v v +// iff new_iff +// / \ / \ +// / \ / \ +// v v v v +// uncommon_proj cont_proj if_uct if_cont +// \ | | | | +// \ | | | | +// v v v | v +// rgn loop | iff +// | | / \ +// | | / \ +// v | v v +// uncommon_trap | uncommon_proj cont_proj +// \ \ | | +// \ \ | | +// v v v v +// rgn loop +// | +// | +// v +// uncommon_trap +// +// +// We will create a region to guard the uct call if there is no one there. +// The true projecttion (if_cont) of the new_iff is returned. +// This code is also used to clone predicates to clonned loops. +ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, + Deoptimization::DeoptReason reason) { + assert(is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!"); + IfNode* iff = cont_proj->in(0)->as_If(); + + ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con); + Node *rgn = uncommon_proj->unique_ctrl_out(); + assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct"); + + uint proj_index = 1; // region's edge corresponding to uncommon_proj + if (!rgn->is_Region()) { // create a region to guard the call + assert(rgn->is_Call(), "must be call uct"); + CallNode* call = rgn->as_Call(); + IdealLoopTree* loop = get_loop(call); + rgn = new (C, 1) RegionNode(1); + rgn->add_req(uncommon_proj); + register_control(rgn, loop, uncommon_proj); + _igvn.hash_delete(call); + call->set_req(0, rgn); + // When called from beautify_loops() idom is not constructed yet. + if (_idom != NULL) { + set_idom(call, rgn, dom_depth(rgn)); + } + } else { + // Find region's edge corresponding to uncommon_proj + for (; proj_index < rgn->req(); proj_index++) + if (rgn->in(proj_index) == uncommon_proj) break; + assert(proj_index < rgn->req(), "sanity"); + } + + Node* entry = iff->in(0); + if (new_entry != NULL) { + // Clonning the predicate to new location. + entry = new_entry; + } + // Create new_iff + IdealLoopTree* lp = get_loop(entry); + IfNode *new_iff = iff->clone()->as_If(); + new_iff->set_req(0, entry); + register_control(new_iff, lp, entry); + Node *if_cont = new (C, 1) IfTrueNode(new_iff); + Node *if_uct = new (C, 1) IfFalseNode(new_iff); + if (cont_proj->is_IfFalse()) { + // Swap + Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp; + } + register_control(if_cont, lp, new_iff); + register_control(if_uct, get_loop(rgn), new_iff); + + // if_uct to rgn + _igvn.hash_delete(rgn); + rgn->add_req(if_uct); + // When called from beautify_loops() idom is not constructed yet. + if (_idom != NULL) { + Node* ridom = idom(rgn); + Node* nrdom = dom_lca(ridom, new_iff); + set_idom(rgn, nrdom, dom_depth(rgn)); + } + + // If rgn has phis add new edges which has the same + // value as on original uncommon_proj pass. + assert(rgn->in(rgn->req() -1) == if_uct, "new edge should be last"); + bool has_phi = false; + for (DUIterator_Fast imax, i = rgn->fast_outs(imax); i < imax; i++) { + Node* use = rgn->fast_out(i); + if (use->is_Phi() && use->outcnt() > 0) { + assert(use->in(0) == rgn, ""); + _igvn.hash_delete(use); + use->add_req(use->in(proj_index)); + _igvn._worklist.push(use); + has_phi = true; + } + } + assert(!has_phi || rgn->req() > 3, "no phis when region is created"); + + if (new_entry == NULL) { + // Attach if_cont to iff + _igvn.hash_delete(iff); + iff->set_req(0, if_cont); + if (_idom != NULL) { + set_idom(iff, if_cont, dom_depth(iff)); + } + } + return if_cont->as_Proj(); +} + +//------------------------------create_new_if_for_predicate------------------------ +// Create a new if below new_entry for the predicate to be cloned (IGVN optimization) +ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, + Deoptimization::DeoptReason reason) { + assert(new_entry != 0, "only used for clone predicate"); + assert(PhaseIdealLoop::is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!"); + IfNode* iff = cont_proj->in(0)->as_If(); + + ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con); + Node *rgn = uncommon_proj->unique_ctrl_out(); + assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct"); + + uint proj_index = 1; // region's edge corresponding to uncommon_proj + if (!rgn->is_Region()) { // create a region to guard the call + assert(rgn->is_Call(), "must be call uct"); + CallNode* call = rgn->as_Call(); + rgn = new (C, 1) RegionNode(1); + register_new_node_with_optimizer(rgn); + rgn->add_req(uncommon_proj); + hash_delete(call); + call->set_req(0, rgn); + } else { + // Find region's edge corresponding to uncommon_proj + for (; proj_index < rgn->req(); proj_index++) + if (rgn->in(proj_index) == uncommon_proj) break; + assert(proj_index < rgn->req(), "sanity"); + } + + // Create new_iff in new location. + IfNode *new_iff = iff->clone()->as_If(); + new_iff->set_req(0, new_entry); + + register_new_node_with_optimizer(new_iff); + Node *if_cont = new (C, 1) IfTrueNode(new_iff); + Node *if_uct = new (C, 1) IfFalseNode(new_iff); + if (cont_proj->is_IfFalse()) { + // Swap + Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp; + } + register_new_node_with_optimizer(if_cont); + register_new_node_with_optimizer(if_uct); + + // if_uct to rgn + hash_delete(rgn); + rgn->add_req(if_uct); + + // If rgn has phis add corresponding new edges which has the same + // value as on original uncommon_proj pass. + assert(rgn->in(rgn->req() -1) == if_uct, "new edge should be last"); + bool has_phi = false; + for (DUIterator_Fast imax, i = rgn->fast_outs(imax); i < imax; i++) { + Node* use = rgn->fast_out(i); + if (use->is_Phi() && use->outcnt() > 0) { + hash_delete(use); + use->add_req(use->in(proj_index)); + _worklist.push(use); + has_phi = true; + } + } + assert(!has_phi || rgn->req() > 3, "no phis when region is created"); + + return if_cont->as_Proj(); +} + +//--------------------------clone_predicate----------------------- +ProjNode* PhaseIdealLoop::clone_predicate(ProjNode* predicate_proj, Node* new_entry, + Deoptimization::DeoptReason reason, + PhaseIdealLoop* loop_phase, + PhaseIterGVN* igvn) { + ProjNode* new_predicate_proj; + if (loop_phase != NULL) { + new_predicate_proj = loop_phase->create_new_if_for_predicate(predicate_proj, new_entry, reason); + } else { + new_predicate_proj = igvn->create_new_if_for_predicate(predicate_proj, new_entry, reason); + } + IfNode* iff = new_predicate_proj->in(0)->as_If(); + Node* ctrl = iff->in(0); + + // Match original condition since predicate's projections could be swapped. + assert(predicate_proj->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be"); + Node* opq = new (igvn->C, 2) Opaque1Node(igvn->C, predicate_proj->in(0)->in(1)->in(1)->in(1)); + igvn->C->add_predicate_opaq(opq); + + Node* bol = new (igvn->C, 2) Conv2BNode(opq); + if (loop_phase != NULL) { + loop_phase->register_new_node(opq, ctrl); + loop_phase->register_new_node(bol, ctrl); + } else { + igvn->register_new_node_with_optimizer(opq); + igvn->register_new_node_with_optimizer(bol); + } + igvn->hash_delete(iff); + iff->set_req(1, bol); + return new_predicate_proj; +} + +//--------------------------move_predicate----------------------- +// Cut predicate from old place and move it to new. +ProjNode* PhaseIdealLoop::move_predicate(ProjNode* predicate_proj, Node* new_entry, + Deoptimization::DeoptReason reason, + PhaseIdealLoop* loop_phase, + PhaseIterGVN* igvn) { + assert(new_entry != NULL, "must be"); + assert(predicate_proj->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be"); + IfNode* iff = predicate_proj->in(0)->as_If(); + Node* old_entry = iff->in(0); + + // Cut predicate from old place. + Node* old = predicate_proj; + igvn->_worklist.push(old); + for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) { + Node* use = old->last_out(i); // for each use... + igvn->hash_delete(use); + igvn->_worklist.push(use); + // Update use-def info + uint uses_found = 0; + for (uint j = 0; j < use->req(); j++) { + if (use->in(j) == old) { + use->set_req(j, old_entry); + uses_found++; + if (loop_phase != NULL) { + if (use->is_CFG()) { + // When called from beautify_loops() idom is not constructed yet. + if (loop_phase->_idom != NULL) + loop_phase->set_idom(use, old_entry, loop_phase->dom_depth(use)); + } else { + loop_phase->set_ctrl(use, old_entry); + } + } + } + } + i -= uses_found; // we deleted 1 or more copies of this edge + } + + // Move predicate. + igvn->hash_delete(iff); + iff->set_req(0, new_entry); + igvn->_worklist.push(iff); + + if (loop_phase != NULL) { + // Fix up idom and ctrl. + loop_phase->set_ctrl(iff->in(1), new_entry); + loop_phase->set_ctrl(iff->in(1)->in(1), new_entry); + // When called from beautify_loops() idom is not constructed yet. + if (loop_phase->_idom != NULL) + loop_phase->set_idom(iff, new_entry, loop_phase->dom_depth(iff)); + } + + return predicate_proj; +} + +//--------------------------clone_loop_predicates----------------------- +// Interface from IGVN +Node* PhaseIterGVN::clone_loop_predicates(Node* old_entry, Node* new_entry) { + return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, false, NULL, this); +} +Node* PhaseIterGVN::move_loop_predicates(Node* old_entry, Node* new_entry) { + return PhaseIdealLoop::clone_loop_predicates(old_entry, new_entry, true, NULL, this); +} + +// Interface from PhaseIdealLoop +Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry) { + return clone_loop_predicates(old_entry, new_entry, false, this, &this->_igvn); +} +Node* PhaseIdealLoop::move_loop_predicates(Node* old_entry, Node* new_entry) { + return clone_loop_predicates(old_entry, new_entry, true, this, &this->_igvn); +} + +// Clone loop predicates to cloned loops (peeled, unswitched, split_if). +Node* PhaseIdealLoop::clone_loop_predicates(Node* old_entry, Node* new_entry, + bool move_predicates, + PhaseIdealLoop* loop_phase, + PhaseIterGVN* igvn) { +#ifdef ASSERT + if (new_entry == NULL || !(new_entry->is_Proj() || new_entry->is_Region() || new_entry->is_SafePoint())) { + if (new_entry != NULL) + new_entry->dump(); + assert(false, "not IfTrue, IfFalse, Region or SafePoint"); + } +#endif + // Search original predicates + Node* entry = old_entry; + if (UseLoopPredicate) { + ProjNode* predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); + if (predicate_proj != NULL) { // right pattern that can be used by loop predication + assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be"); + if (move_predicates) { + new_entry = move_predicate(predicate_proj, new_entry, + Deoptimization::Reason_predicate, + loop_phase, igvn); + assert(new_entry == predicate_proj, "old predicate fall through projection"); + } else { + // clone predicate + new_entry = clone_predicate(predicate_proj, new_entry, + Deoptimization::Reason_predicate, + loop_phase, igvn); + assert(new_entry != NULL && new_entry->is_Proj(), "IfTrue or IfFalse after clone predicate"); + } + if (TraceLoopPredicate) { + tty->print_cr("Loop Predicate %s: ", move_predicates ? "moved" : "cloned"); + debug_only( new_entry->in(0)->dump(); ) + } + } + } + return new_entry; +} + +//--------------------------eliminate_loop_predicates----------------------- +void PhaseIdealLoop::eliminate_loop_predicates(Node* entry) { + if (UseLoopPredicate) { + ProjNode* predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); + if (predicate_proj != NULL) { // right pattern that can be used by loop predication + Node* n = entry->in(0)->in(1)->in(1); + assert(n->Opcode()==Op_Opaque1, "must be"); + // Remove Opaque1 node from predicates list. + // IGVN will remove this predicate check. + _igvn.replace_node(n, n->in(1)); + } + } +} + +//--------------------------skip_loop_predicates------------------------------ +// Skip related predicates. +Node* PhaseIdealLoop::skip_loop_predicates(Node* entry) { + Node* predicate = NULL; + if (UseLoopPredicate) { + predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); + if (predicate != NULL) { // right pattern that can be used by loop predication + assert(entry->is_Proj() && entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be"); + IfNode* iff = entry->in(0)->as_If(); + ProjNode* uncommon_proj = iff->proj_out(1 - entry->as_Proj()->_con); + Node* rgn = uncommon_proj->unique_ctrl_out(); + assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct"); + entry = entry->in(0)->in(0); + while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) { + uncommon_proj = entry->in(0)->as_If()->proj_out(1 - entry->as_Proj()->_con); + if (uncommon_proj->unique_ctrl_out() != rgn) + break; + entry = entry->in(0)->in(0); + } + } + } + return entry; +} + +//--------------------------find_predicate_insertion_point------------------- +// Find a good location to insert a predicate +ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) { + if (start_c == NULL || !start_c->is_Proj()) + return NULL; + if (is_uncommon_trap_if_pattern(start_c->as_Proj(), reason)) { + return start_c->as_Proj(); + } + return NULL; +} + +//--------------------------find_predicate------------------------------------ +// Find a predicate +Node* PhaseIdealLoop::find_predicate(Node* entry) { + Node* predicate = NULL; + if (UseLoopPredicate) { + predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); + if (predicate != NULL) { // right pattern that can be used by loop predication + assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be"); + return entry; + } + } + return NULL; +} + +//------------------------------Invariance----------------------------------- +// Helper class for loop_predication_impl to compute invariance on the fly and +// clone invariants. +class Invariance : public StackObj { + VectorSet _visited, _invariant; + Node_Stack _stack; + VectorSet _clone_visited; + Node_List _old_new; // map of old to new (clone) + IdealLoopTree* _lpt; + PhaseIdealLoop* _phase; + + // Helper function to set up the invariance for invariance computation + // If n is a known invariant, set up directly. Otherwise, look up the + // the possibility to push n onto the stack for further processing. + void visit(Node* use, Node* n) { + if (_lpt->is_invariant(n)) { // known invariant + _invariant.set(n->_idx); + } else if (!n->is_CFG()) { + Node *n_ctrl = _phase->ctrl_or_self(n); + Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG + if (_phase->is_dominator(n_ctrl, u_ctrl)) { + _stack.push(n, n->in(0) == NULL ? 1 : 0); + } + } + } + + // Compute invariance for "the_node" and (possibly) all its inputs recursively + // on the fly + void compute_invariance(Node* n) { + assert(_visited.test(n->_idx), "must be"); + visit(n, n); + while (_stack.is_nonempty()) { + Node* n = _stack.node(); + uint idx = _stack.index(); + if (idx == n->req()) { // all inputs are processed + _stack.pop(); + // n is invariant if it's inputs are all invariant + bool all_inputs_invariant = true; + for (uint i = 0; i < n->req(); i++) { + Node* in = n->in(i); + if (in == NULL) continue; + assert(_visited.test(in->_idx), "must have visited input"); + if (!_invariant.test(in->_idx)) { // bad guy + all_inputs_invariant = false; + break; + } + } + if (all_inputs_invariant) { + _invariant.set(n->_idx); // I am a invariant too + } + } else { // process next input + _stack.set_index(idx + 1); + Node* m = n->in(idx); + if (m != NULL && !_visited.test_set(m->_idx)) { + visit(n, m); + } + } + } + } + + // Helper function to set up _old_new map for clone_nodes. + // If n is a known invariant, set up directly ("clone" of n == n). + // Otherwise, push n onto the stack for real cloning. + void clone_visit(Node* n) { + assert(_invariant.test(n->_idx), "must be invariant"); + if (_lpt->is_invariant(n)) { // known invariant + _old_new.map(n->_idx, n); + } else { // to be cloned + assert(!n->is_CFG(), "should not see CFG here"); + _stack.push(n, n->in(0) == NULL ? 1 : 0); + } + } + + // Clone "n" and (possibly) all its inputs recursively + void clone_nodes(Node* n, Node* ctrl) { + clone_visit(n); + while (_stack.is_nonempty()) { + Node* n = _stack.node(); + uint idx = _stack.index(); + if (idx == n->req()) { // all inputs processed, clone n! + _stack.pop(); + // clone invariant node + Node* n_cl = n->clone(); + _old_new.map(n->_idx, n_cl); + _phase->register_new_node(n_cl, ctrl); + for (uint i = 0; i < n->req(); i++) { + Node* in = n_cl->in(i); + if (in == NULL) continue; + n_cl->set_req(i, _old_new[in->_idx]); + } + } else { // process next input + _stack.set_index(idx + 1); + Node* m = n->in(idx); + if (m != NULL && !_clone_visited.test_set(m->_idx)) { + clone_visit(m); // visit the input + } + } + } + } + + public: + Invariance(Arena* area, IdealLoopTree* lpt) : + _lpt(lpt), _phase(lpt->_phase), + _visited(area), _invariant(area), _stack(area, 10 /* guess */), + _clone_visited(area), _old_new(area) + {} + + // Map old to n for invariance computation and clone + void map_ctrl(Node* old, Node* n) { + assert(old->is_CFG() && n->is_CFG(), "must be"); + _old_new.map(old->_idx, n); // "clone" of old is n + _invariant.set(old->_idx); // old is invariant + _clone_visited.set(old->_idx); + } + + // Driver function to compute invariance + bool is_invariant(Node* n) { + if (!_visited.test_set(n->_idx)) + compute_invariance(n); + return (_invariant.test(n->_idx) != 0); + } + + // Driver function to clone invariant + Node* clone(Node* n, Node* ctrl) { + assert(ctrl->is_CFG(), "must be"); + assert(_invariant.test(n->_idx), "must be an invariant"); + if (!_clone_visited.test(n->_idx)) + clone_nodes(n, ctrl); + return _old_new[n->_idx]; + } +}; + +//------------------------------is_range_check_if ----------------------------------- +// Returns true if the predicate of iff is in "scale*iv + offset u< load_range(ptr)" format +// Note: this function is particularly designed for loop predication. We require load_range +// and offset to be loop invariant computed on the fly by "invar" +bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const { + if (!is_loop_exit(iff)) { + return false; + } + if (!iff->in(1)->is_Bool()) { + return false; + } + const BoolNode *bol = iff->in(1)->as_Bool(); + if (bol->_test._test != BoolTest::lt) { + return false; + } + if (!bol->in(1)->is_Cmp()) { + return false; + } + const CmpNode *cmp = bol->in(1)->as_Cmp(); + if (cmp->Opcode() != Op_CmpU) { + return false; + } + Node* range = cmp->in(2); + if (range->Opcode() != Op_LoadRange) { + const TypeInt* tint = phase->_igvn.type(range)->isa_int(); + if (!OptimizeFill || tint == NULL || tint->empty() || tint->_lo < 0) { + // Allow predication on positive values that aren't LoadRanges. + // This allows optimization of loops where the length of the + // array is a known value and doesn't need to be loaded back + // from the array. + return false; + } + } + if (!invar.is_invariant(range)) { + return false; + } + Node *iv = _head->as_CountedLoop()->phi(); + int scale = 0; + Node *offset = NULL; + if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, &scale, &offset)) { + return false; + } + if (offset && !invar.is_invariant(offset)) { // offset must be invariant + return false; + } + return true; +} + +//------------------------------rc_predicate----------------------------------- +// Create a range check predicate +// +// for (i = init; i < limit; i += stride) { +// a[scale*i+offset] +// } +// +// Compute max(scale*i + offset) for init <= i < limit and build the predicate +// as "max(scale*i + offset) u< a.length". +// +// There are two cases for max(scale*i + offset): +// (1) stride*scale > 0 +// max(scale*i + offset) = scale*(limit-stride) + offset +// (2) stride*scale < 0 +// max(scale*i + offset) = scale*init + offset +BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl, + int scale, Node* offset, + Node* init, Node* limit, Node* stride, + Node* range, bool upper) { + DEBUG_ONLY(ttyLocker ttyl); + if (TraceLoopPredicate) tty->print("rc_predicate "); + + Node* max_idx_expr = init; + int stride_con = stride->get_int(); + if ((stride_con > 0) == (scale > 0) == upper) { + max_idx_expr = new (C, 3) SubINode(limit, stride); + register_new_node(max_idx_expr, ctrl); + if (TraceLoopPredicate) tty->print("(limit - stride) "); + } else { + if (TraceLoopPredicate) tty->print("init "); + } + + if (scale != 1) { + ConNode* con_scale = _igvn.intcon(scale); + max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale); + register_new_node(max_idx_expr, ctrl); + if (TraceLoopPredicate) tty->print("* %d ", scale); + } + + if (offset && (!offset->is_Con() || offset->get_int() != 0)){ + max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset); + register_new_node(max_idx_expr, ctrl); + if (TraceLoopPredicate) + if (offset->is_Con()) tty->print("+ %d ", offset->get_int()); + else tty->print("+ offset "); + } + + CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range); + register_new_node(cmp, ctrl); + BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt); + register_new_node(bol, ctrl); + + if (TraceLoopPredicate) tty->print_cr("<u range"); + return bol; +} + +//------------------------------ loop_predication_impl-------------------------- +// Insert loop predicates for null checks and range checks +bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) { + if (!UseLoopPredicate) return false; + + if (!loop->_head->is_Loop()) { + // Could be a simple region when irreducible loops are present. + return false; + } + + if (loop->_head->unique_ctrl_out()->Opcode() == Op_NeverBranch) { + // do nothing for infinite loops + return false; + } + + CountedLoopNode *cl = NULL; + if (loop->_head->is_CountedLoop()) { + cl = loop->_head->as_CountedLoop(); + // do nothing for iteration-splitted loops + if (!cl->is_normal_loop()) return false; + } + + LoopNode *lpn = loop->_head->as_Loop(); + Node* entry = lpn->in(LoopNode::EntryControl); + + ProjNode *predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); + if (!predicate_proj) { +#ifndef PRODUCT + if (TraceLoopPredicate) { + tty->print("missing predicate:"); + loop->dump_head(); + lpn->dump(1); + } +#endif + return false; + } + ConNode* zero = _igvn.intcon(0); + set_ctrl(zero, C->root()); + + ResourceArea *area = Thread::current()->resource_area(); + Invariance invar(area, loop); + + // Create list of if-projs such that a newer proj dominates all older + // projs in the list, and they all dominate loop->tail() + Node_List if_proj_list(area); + LoopNode *head = loop->_head->as_Loop(); + Node *current_proj = loop->tail(); //start from tail + while (current_proj != head) { + if (loop == get_loop(current_proj) && // still in the loop ? + current_proj->is_Proj() && // is a projection ? + current_proj->in(0)->Opcode() == Op_If) { // is a if projection ? + if_proj_list.push(current_proj); + } + current_proj = idom(current_proj); + } + + bool hoisted = false; // true if at least one proj is promoted + while (if_proj_list.size() > 0) { + // Following are changed to nonnull when a predicate can be hoisted + ProjNode* new_predicate_proj = NULL; + + ProjNode* proj = if_proj_list.pop()->as_Proj(); + IfNode* iff = proj->in(0)->as_If(); + + if (!is_uncommon_trap_if_pattern(proj, Deoptimization::Reason_none)) { + if (loop->is_loop_exit(iff)) { + // stop processing the remaining projs in the list because the execution of them + // depends on the condition of "iff" (iff->in(1)). + break; + } else { + // Both arms are inside the loop. There are two cases: + // (1) there is one backward branch. In this case, any remaining proj + // in the if_proj list post-dominates "iff". So, the condition of "iff" + // does not determine the execution the remining projs directly, and we + // can safely continue. + // (2) both arms are forwarded, i.e. a diamond shape. In this case, "proj" + // does not dominate loop->tail(), so it can not be in the if_proj list. + continue; + } + } + + Node* test = iff->in(1); + if (!test->is_Bool()){ //Conv2B, ... + continue; + } + BoolNode* bol = test->as_Bool(); + if (invar.is_invariant(bol)) { + // Invariant test + new_predicate_proj = create_new_if_for_predicate(predicate_proj, NULL, + Deoptimization::Reason_predicate); + Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0); + BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool(); + + // Negate test if necessary + bool negated = false; + if (proj->_con != predicate_proj->_con) { + new_predicate_bol = new (C, 2) BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate()); + register_new_node(new_predicate_bol, ctrl); + negated = true; + } + IfNode* new_predicate_iff = new_predicate_proj->in(0)->as_If(); + _igvn.hash_delete(new_predicate_iff); + new_predicate_iff->set_req(1, new_predicate_bol); +#ifndef PRODUCT + if (TraceLoopPredicate) { + tty->print("Predicate invariant if%s: %d ", negated ? " negated" : "", new_predicate_iff->_idx); + loop->dump_head(); + } else if (TraceLoopOpts) { + tty->print("Predicate IC "); + loop->dump_head(); + } +#endif + } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) { + assert(proj->_con == predicate_proj->_con, "must match"); + + // Range check for counted loops + const Node* cmp = bol->in(1)->as_Cmp(); + Node* idx = cmp->in(1); + assert(!invar.is_invariant(idx), "index is variant"); + assert(cmp->in(2)->Opcode() == Op_LoadRange || OptimizeFill, "must be"); + Node* rng = cmp->in(2); + assert(invar.is_invariant(rng), "range must be invariant"); + int scale = 1; + Node* offset = zero; + bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset); + assert(ok, "must be index expression"); + + Node* init = cl->init_trip(); + Node* limit = cl->limit(); + Node* stride = cl->stride(); + + // Build if's for the upper and lower bound tests. The + // lower_bound test will dominate the upper bound test and all + // cloned or created nodes will use the lower bound test as + // their declared control. + ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate); + ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate); + assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate"); + Node *ctrl = lower_bound_proj->in(0)->as_If()->in(0); + + // Perform cloning to keep Invariance state correct since the + // late schedule will place invariant things in the loop. + rng = invar.clone(rng, ctrl); + if (offset && offset != zero) { + assert(invar.is_invariant(offset), "offset must be loop invariant"); + offset = invar.clone(offset, ctrl); + } + + // Test the lower bound + Node* lower_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, false); + IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If(); + _igvn.hash_delete(lower_bound_iff); + lower_bound_iff->set_req(1, lower_bound_bol); + if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx); + + // Test the upper bound + Node* upper_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, true); + IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If(); + _igvn.hash_delete(upper_bound_iff); + upper_bound_iff->set_req(1, upper_bound_bol); + if (TraceLoopPredicate) tty->print_cr("upper bound check if: %d", lower_bound_iff->_idx); + + // Fall through into rest of the clean up code which will move + // any dependent nodes onto the upper bound test. + new_predicate_proj = upper_bound_proj; + +#ifndef PRODUCT + if (TraceLoopOpts && !TraceLoopPredicate) { + tty->print("Predicate RC "); + loop->dump_head(); + } +#endif + } else { + // Loop variant check (for example, range check in non-counted loop) + // with uncommon trap. + continue; + } + assert(new_predicate_proj != NULL, "sanity"); + // Success - attach condition (new_predicate_bol) to predicate if + invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate + + // Eliminate the old If in the loop body + dominated_by( new_predicate_proj, iff, proj->_con != new_predicate_proj->_con ); + + hoisted = true; + C->set_major_progress(); + } // end while + +#ifndef PRODUCT + // report that the loop predication has been actually performed + // for this loop + if (TraceLoopPredicate && hoisted) { + tty->print("Loop Predication Performed:"); + loop->dump_head(); + } +#endif + + return hoisted; +} + +//------------------------------loop_predication-------------------------------- +// driver routine for loop predication optimization +bool IdealLoopTree::loop_predication( PhaseIdealLoop *phase) { + bool hoisted = false; + // Recursively promote predicates + if (_child) { + hoisted = _child->loop_predication( phase); + } + + // self + if (!_irreducible && !tail()->is_top()) { + hoisted |= phase->loop_predication_impl(this); + } + + if (_next) { //sibling + hoisted |= _next->loop_predication( phase); + } + + return hoisted; +} +
--- a/src/share/vm/opto/loopTransform.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/loopTransform.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,6 +63,46 @@ } } +//------------------------------compute_exact_trip_count----------------------- +// Compute loop exact trip count if possible. Do not recalculate trip count for +// split loops (pre-main-post) which have their limits and inits behind Opaque node. +void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) { + if (!_head->as_Loop()->is_valid_counted_loop()) { + return; + } + CountedLoopNode* cl = _head->as_CountedLoop(); + // Trip count may become nonexact for iteration split loops since + // RCE modifies limits. Note, _trip_count value is not reset since + // it is used to limit unrolling of main loop. + cl->set_nonexact_trip_count(); + + // Loop's test should be part of loop. + if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) + return; // Infinite loop + +#ifdef ASSERT + BoolTest::mask bt = cl->loopexit()->test_trip(); + assert(bt == BoolTest::lt || bt == BoolTest::gt || + bt == BoolTest::ne, "canonical test is expected"); +#endif + + Node* init_n = cl->init_trip(); + Node* limit_n = cl->limit(); + if (init_n != NULL && init_n->is_Con() && + limit_n != NULL && limit_n->is_Con()) { + // Use longs to avoid integer overflow. + int stride_con = cl->stride_con(); + long init_con = cl->init_trip()->get_int(); + long limit_con = cl->limit()->get_int(); + int stride_m = stride_con - (stride_con > 0 ? 1 : -1); + long trip_count = (limit_con - init_con + stride_m)/stride_con; + if (trip_count > 0 && (julong)trip_count < (julong)max_juint) { + // Set exact trip count. + cl->set_exact_trip_count((uint)trip_count); + } + } +} + //------------------------------compute_profile_trip_cnt---------------------------- // Compute loop trip count from profile data as // (backedge_count + loop_exit_count) / loop_exit_count @@ -205,6 +245,8 @@ } phase->register_new_node(addx, phase->get_ctrl(x)); phase->_igvn.replace_node(n1, addx); + assert(phase->get_loop(phase->get_ctrl(n1)) == this, ""); + _body.yank(n1); return addx; } @@ -299,6 +341,132 @@ // peeled-loop backedge has 2 users. // Step 3: Cut the backedge on the clone (so its not a loop) and remove the // extra backedge user. +// +// orig +// +// stmt1 +// | +// v +// loop predicate +// | +// v +// loop<----+ +// | | +// stmt2 | +// | | +// v | +// if ^ +// / \ | +// / \ | +// v v | +// false true | +// / \ | +// / ----+ +// | +// v +// exit +// +// +// after clone loop +// +// stmt1 +// | +// v +// loop predicate +// / \ +// clone / \ orig +// / \ +// / \ +// v v +// +---->loop clone loop<----+ +// | | | | +// | stmt2 clone stmt2 | +// | | | | +// | v v | +// ^ if clone If ^ +// | / \ / \ | +// | / \ / \ | +// | v v v v | +// | true false false true | +// | / \ / \ | +// +---- \ / ----+ +// \ / +// 1v v2 +// region +// | +// v +// exit +// +// +// after peel and predicate move +// +// stmt1 +// / +// / +// clone / orig +// / +// / +----------+ +// / | | +// / loop predicate | +// / | | +// v v | +// TOP-->loop clone loop<----+ | +// | | | | +// stmt2 clone stmt2 | | +// | | | ^ +// v v | | +// if clone If ^ | +// / \ / \ | | +// / \ / \ | | +// v v v v | | +// true false false true | | +// | \ / \ | | +// | \ / ----+ ^ +// | \ / | +// | 1v v2 | +// v region | +// | | | +// | v | +// | exit | +// | | +// +--------------->-----------------+ +// +// +// final graph +// +// stmt1 +// | +// v +// stmt2 clone +// | +// v +// if clone +// / | +// / | +// v v +// false true +// | | +// | v +// | loop predicate +// | | +// | v +// | loop<----+ +// | | | +// | stmt2 | +// | | | +// | v | +// v if ^ +// | / \ | +// | / \ | +// | v v | +// | false true | +// | | \ | +// v v --+ +// region +// | +// v +// exit +// void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) { C->set_major_progress(); @@ -307,26 +475,33 @@ // iterations adjusted. Therefore, we need to declare this loop as // no longer a 'main' loop; it will need new pre and post loops before // we can do further RCE. - Node *h = loop->_head; - if( h->is_CountedLoop() ) { - CountedLoopNode *cl = h->as_CountedLoop(); +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print("Peel "); + loop->dump_head(); + } +#endif + Node* head = loop->_head; + bool counted_loop = head->is_CountedLoop(); + if (counted_loop) { + CountedLoopNode *cl = head->as_CountedLoop(); assert(cl->trip_count() > 0, "peeling a fully unrolled loop"); cl->set_trip_count(cl->trip_count() - 1); - if( cl->is_main_loop() ) { + if (cl->is_main_loop()) { cl->set_normal_loop(); #ifndef PRODUCT - if( PrintOpto && VerifyLoopOptimizations ) { + if (PrintOpto && VerifyLoopOptimizations) { tty->print("Peeling a 'main' loop; resetting to 'normal' "); loop->dump_head(); } #endif } } + Node* entry = head->in(LoopNode::EntryControl); // Step 1: Clone the loop body. The clone becomes the peeled iteration. // The pre-loop illegally has 2 control users (old & new loops). - clone_loop( loop, old_new, dom_depth(loop->_head) ); - + clone_loop( loop, old_new, dom_depth(head) ); // Step 2: Make the old-loop fall-in edges point to the peeled iteration. // Do this by making the old-loop fall-in edges act as if they came @@ -334,12 +509,15 @@ // backedges) and then map to the new peeled iteration. This leaves // the pre-loop with only 1 user (the new peeled iteration), but the // peeled-loop backedge has 2 users. - for (DUIterator_Fast jmax, j = loop->_head->fast_outs(jmax); j < jmax; j++) { - Node* old = loop->_head->fast_out(j); - if( old->in(0) == loop->_head && old->req() == 3 && - (old->is_Loop() || old->is_Phi()) ) { - Node *new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx]; - if( !new_exit_value ) // Backedge value is ALSO loop invariant? + Node* new_exit_value = old_new[head->in(LoopNode::LoopBackControl)->_idx]; + new_exit_value = move_loop_predicates(entry, new_exit_value); + _igvn.hash_delete(head); + head->set_req(LoopNode::EntryControl, new_exit_value); + for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { + Node* old = head->fast_out(j); + if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) { + new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx]; + if (!new_exit_value ) // Backedge value is ALSO loop invariant? // Then loop body backedge value remains the same. new_exit_value = old->in(LoopNode::LoopBackControl); _igvn.hash_delete(old); @@ -350,12 +528,12 @@ // Step 3: Cut the backedge on the clone (so its not a loop) and remove the // extra backedge user. - Node *nnn = old_new[loop->_head->_idx]; - _igvn.hash_delete(nnn); - nnn->set_req(LoopNode::LoopBackControl, C->top()); - for (DUIterator_Fast j2max, j2 = nnn->fast_outs(j2max); j2 < j2max; j2++) { - Node* use = nnn->fast_out(j2); - if( use->in(0) == nnn && use->req() == 3 && use->is_Phi() ) { + Node* new_head = old_new[head->_idx]; + _igvn.hash_delete(new_head); + new_head->set_req(LoopNode::LoopBackControl, C->top()); + for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) { + Node* use = new_head->fast_out(j2); + if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) { _igvn.hash_delete(use); use->set_req(LoopNode::LoopBackControl, C->top()); } @@ -363,15 +541,15 @@ // Step 4: Correct dom-depth info. Set to loop-head depth. - int dd = dom_depth(loop->_head); - set_idom(loop->_head, loop->_head->in(1), dd); + int dd = dom_depth(head); + set_idom(head, head->in(1), dd); for (uint j3 = 0; j3 < loop->_body.size(); j3++) { Node *old = loop->_body.at(j3); Node *nnn = old_new[old->_idx]; if (!has_ctrl(nnn)) set_idom(nnn, idom(nnn), dd-1); // While we're at it, remove any SafePoints from the peeled code - if( old->Opcode() == Op_SafePoint ) { + if (old->Opcode() == Op_SafePoint) { Node *nnn = old_new[old->_idx]; lazy_replace(nnn,nnn->in(TypeFunc::Control)); } @@ -384,34 +562,26 @@ loop->record_for_igvn(); } +#define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop + //------------------------------policy_maximally_unroll------------------------ -// Return exact loop trip count, or 0 if not maximally unrolling +// Calculate exact loop trip count and return true if loop can be maximally +// unrolled. bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const { CountedLoopNode *cl = _head->as_CountedLoop(); - assert( cl->is_normal_loop(), "" ); - - Node *init_n = cl->init_trip(); - Node *limit_n = cl->limit(); + assert(cl->is_normal_loop(), ""); + if (!cl->is_valid_counted_loop()) + return false; // Malformed counted loop - // Non-constant bounds - if( init_n == NULL || !init_n->is_Con() || - limit_n == NULL || !limit_n->is_Con() || - // protect against stride not being a constant - !cl->stride_is_con() ) { + if (!cl->has_exact_trip_count()) { + // Trip count is not exact. return false; } - int init = init_n->get_int(); - int limit = limit_n->get_int(); - int span = limit - init; - int stride = cl->stride_con(); - if (init >= limit || stride > span) { - // return a false (no maximally unroll) and the regular unroll/peel - // route will make a small mess which CCP will fold away. - return false; - } - uint trip_count = span/stride; // trip_count can be greater than 2 Gig. - assert( (int)trip_count*stride == span, "must divide evenly" ); + uint trip_count = cl->trip_count(); + // Note, max_juint is used to indicate unknown trip count. + assert(trip_count > 1, "one iteration loop should be optimized out already"); + assert(trip_count < max_juint, "exact trip_count should be less than max_uint."); // Real policy: if we maximally unroll, does it get too big? // Allow the unrolled mess to get larger than standard loop @@ -419,18 +589,46 @@ uint body_size = _body.size(); uint unroll_limit = (uint)LoopUnrollLimit * 4; assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits"); - cl->set_trip_count(trip_count); - if( trip_count <= unroll_limit && body_size <= unroll_limit ) { - uint new_body_size = body_size * trip_count; - if (new_body_size <= unroll_limit && - body_size == new_body_size / trip_count && - // Unrolling can result in a large amount of node construction - new_body_size < MaxNodeLimit - phase->C->unique()) { - return true; // maximally unroll - } + if (trip_count > unroll_limit || body_size > unroll_limit) { + return false; + } + + // Take into account that after unroll conjoined heads and tails will fold, + // otherwise policy_unroll() may allow more unrolling than max unrolling. + uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count; + uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE; + if (body_size != tst_body_size) // Check for int overflow + return false; + if (new_body_size > unroll_limit || + // Unrolling can result in a large amount of node construction + new_body_size >= MaxNodeLimit - phase->C->unique()) { + return false; } - return false; // Do not maximally unroll + // Currently we don't have policy to optimize one iteration loops. + // Maximally unrolling transformation is used for that: + // it is peeled and the original loop become non reachable (dead). + // Also fully unroll a loop with few iterations regardless next + // conditions since following loop optimizations will split + // such loop anyway (pre-main-post). + if (trip_count <= 3) + return true; + + // Do not unroll a loop with String intrinsics code. + // String intrinsics are large and have loops. + for (uint k = 0; k < _body.size(); k++) { + Node* n = _body.at(k); + switch (n->Opcode()) { + case Op_StrComp: + case Op_StrEquals: + case Op_StrIndexOf: + case Op_AryEq: { + return false; + } + } // switch + } + + return true; // Do maximally unroll } @@ -440,13 +638,16 @@ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const { CountedLoopNode *cl = _head->as_CountedLoop(); - assert( cl->is_normal_loop() || cl->is_main_loop(), "" ); + assert(cl->is_normal_loop() || cl->is_main_loop(), ""); - // protect against stride not being a constant - if( !cl->stride_is_con() ) return false; + if (!cl->is_valid_counted_loop()) + return false; // Malformed counted loop // protect against over-unrolling - if( cl->trip_count() <= 1 ) return false; + if (cl->trip_count() <= 1) return false; + + // Check for stride being a small enough constant + if (abs(cl->stride_con()) > (1<<3)) return false; int future_unroll_ct = cl->unrolled_count() * 2; @@ -477,21 +678,21 @@ // Non-constant bounds. // Protect against over-unrolling when init or/and limit are not constant // (so that trip_count's init value is maxint) but iv range is known. - if( init_n == NULL || !init_n->is_Con() || - limit_n == NULL || !limit_n->is_Con() ) { + if (init_n == NULL || !init_n->is_Con() || + limit_n == NULL || !limit_n->is_Con()) { Node* phi = cl->phi(); - if( phi != NULL ) { + if (phi != NULL) { assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi."); const TypeInt* iv_type = phase->_igvn.type(phi)->is_int(); int next_stride = cl->stride_con() * 2; // stride after this unroll - if( next_stride > 0 ) { - if( iv_type->_lo + next_stride <= iv_type->_lo || // overflow - iv_type->_lo + next_stride > iv_type->_hi ) { + if (next_stride > 0) { + if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow + iv_type->_lo + next_stride > iv_type->_hi) { return false; // over-unrolling } - } else if( next_stride < 0 ) { - if( iv_type->_hi + next_stride >= iv_type->_hi || // overflow - iv_type->_hi + next_stride < iv_type->_lo ) { + } else if (next_stride < 0) { + if (iv_type->_hi + next_stride >= iv_type->_hi || // overflow + iv_type->_hi + next_stride < iv_type->_lo) { return false; // over-unrolling } } @@ -503,25 +704,31 @@ // Key test to unroll CaffeineMark's Logic test int xors_in_loop = 0; // Also count ModL, DivL and MulL which expand mightly - for( uint k = 0; k < _body.size(); k++ ) { - switch( _body.at(k)->Opcode() ) { - case Op_XorI: xors_in_loop++; break; // CaffeineMark's Logic test - case Op_ModL: body_size += 30; break; - case Op_DivL: body_size += 30; break; - case Op_MulL: body_size += 10; break; - } + for (uint k = 0; k < _body.size(); k++) { + Node* n = _body.at(k); + switch (n->Opcode()) { + case Op_XorI: xors_in_loop++; break; // CaffeineMark's Logic test + case Op_ModL: body_size += 30; break; + case Op_DivL: body_size += 30; break; + case Op_MulL: body_size += 10; break; + case Op_StrComp: + case Op_StrEquals: + case Op_StrIndexOf: + case Op_AryEq: { + // Do not unroll a loop with String intrinsics code. + // String intrinsics are large and have loops. + return false; + } + } // switch } // Check for being too big - if( body_size > (uint)LoopUnrollLimit ) { - if( xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true; + if (body_size > (uint)LoopUnrollLimit) { + if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true; // Normal case: loop too big return false; } - // Check for stride being a small enough constant - if( abs(cl->stride_con()) > (1<<3) ) return false; - // Unroll once! (Each trip will soon do double iterations) return true; } @@ -645,6 +852,15 @@ // alignment. Useful to unroll loops that do no array accesses. void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) { +#ifndef PRODUCT + if (TraceLoopOpts) { + if (peel_only) + tty->print("PeelMainPost "); + else + tty->print("PreMainPost "); + loop->dump_head(); + } +#endif C->set_major_progress(); // Find common pieces of the loop being guarded with pre & post loops @@ -897,16 +1113,23 @@ //------------------------------do_unroll-------------------------------------- // Unroll the loop body one step - make each trip do 2 iterations. void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) { - assert( LoopUnrollLimit, "" ); + assert(LoopUnrollLimit, ""); + CountedLoopNode *loop_head = loop->_head->as_CountedLoop(); + CountedLoopEndNode *loop_end = loop_head->loopexit(); + assert(loop_end, ""); #ifndef PRODUCT - if( PrintOpto && VerifyLoopOptimizations ) { + if (PrintOpto && VerifyLoopOptimizations) { tty->print("Unrolling "); loop->dump_head(); + } else if (TraceLoopOpts) { + if (loop_head->trip_count() < (uint)LoopUnrollLimit) { + tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count()); + } else { + tty->print("Unroll %d ", loop_head->unrolled_count()*2); + } + loop->dump_head(); } #endif - CountedLoopNode *loop_head = loop->_head->as_CountedLoop(); - CountedLoopEndNode *loop_end = loop_head->loopexit(); - assert( loop_end, "" ); // Remember loop node count before unrolling to detect // if rounds of unroll,optimize are making progress @@ -915,7 +1138,7 @@ Node *ctrl = loop_head->in(LoopNode::EntryControl); Node *limit = loop_head->limit(); Node *init = loop_head->init_trip(); - Node *strid = loop_head->stride(); + Node *stride = loop_head->stride(); Node *opaq = NULL; if( adjust_min_trip ) { // If not maximally unrolling, need adjustment @@ -955,13 +1178,13 @@ // odd iteration: (trip_cnt & ~1). Then back compute a new limit. Node *span = new (C, 3) SubINode( limit, init ); register_new_node( span, ctrl ); - Node *trip = new (C, 3) DivINode( 0, span, strid ); + Node *trip = new (C, 3) DivINode( 0, span, stride ); register_new_node( trip, ctrl ); Node *mtwo = _igvn.intcon(-2); set_ctrl(mtwo, C->root()); Node *rond = new (C, 3) AndINode( trip, mtwo ); register_new_node( rond, ctrl ); - Node *spn2 = new (C, 3) MulINode( rond, strid ); + Node *spn2 = new (C, 3) MulINode( rond, stride ); register_new_node( spn2, ctrl ); Node *lim2 = new (C, 3) AddINode( spn2, init ); register_new_node( lim2, ctrl ); @@ -1040,17 +1263,23 @@ void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) { CountedLoopNode *cl = loop->_head->as_CountedLoop(); - assert( cl->trip_count() > 0, ""); + assert(cl->trip_count() > 0, ""); +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print("MaxUnroll %d ", cl->trip_count()); + loop->dump_head(); + } +#endif // If loop is tripping an odd number of times, peel odd iteration - if( (cl->trip_count() & 1) == 1 ) { - do_peeling( loop, old_new ); + if ((cl->trip_count() & 1) == 1) { + do_peeling(loop, old_new); } // Now its tripping an even number of times remaining. Double loop body. // Do not adjust pre-guards; they are not needed and do not exist. - if( cl->trip_count() > 0 ) { - do_unroll( loop, old_new, false ); + if (cl->trip_count() > 0) { + do_unroll(loop, old_new, false); } } @@ -1227,35 +1456,55 @@ // Eliminate range-checks and other trip-counter vs loop-invariant tests. void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) { #ifndef PRODUCT - if( PrintOpto && VerifyLoopOptimizations ) { + if (PrintOpto && VerifyLoopOptimizations) { tty->print("Range Check Elimination "); loop->dump_head(); + } else if (TraceLoopOpts) { + tty->print("RangeCheck "); + loop->dump_head(); } #endif - assert( RangeCheckElimination, "" ); + assert(RangeCheckElimination, ""); CountedLoopNode *cl = loop->_head->as_CountedLoop(); - assert( cl->is_main_loop(), "" ); + assert(cl->is_main_loop(), ""); + + // protect against stride not being a constant + if (!cl->stride_is_con()) + return; // Find the trip counter; we are iteration splitting based on it Node *trip_counter = cl->phi(); // Find the main loop limit; we will trim it's iterations // to not ever trip end tests Node *main_limit = cl->limit(); + + // Need to find the main-loop zero-trip guard + Node *ctrl = cl->in(LoopNode::EntryControl); + assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, ""); + Node *iffm = ctrl->in(0); + assert(iffm->Opcode() == Op_If, ""); + Node *bolzm = iffm->in(1); + assert(bolzm->Opcode() == Op_Bool, ""); + Node *cmpzm = bolzm->in(1); + assert(cmpzm->is_Cmp(), ""); + Node *opqzm = cmpzm->in(2); + // Can not optimize a loop if pre-loop Opaque1 node is optimized + // away and then another round of loop opts attempted. + if (opqzm->Opcode() != Op_Opaque1) + return; + assert(opqzm->in(1) == main_limit, "do not understand situation"); + // Find the pre-loop limit; we will expand it's iterations to // not ever trip low tests. - Node *ctrl = cl->in(LoopNode::EntryControl); - assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" ); - Node *iffm = ctrl->in(0); - assert( iffm->Opcode() == Op_If, "" ); Node *p_f = iffm->in(0); - assert( p_f->Opcode() == Op_IfFalse, "" ); + assert(p_f->Opcode() == Op_IfFalse, ""); CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); - assert( pre_end->loopnode()->is_pre_loop(), "" ); + assert(pre_end->loopnode()->is_pre_loop(), ""); Node *pre_opaq1 = pre_end->limit(); // Occasionally it's possible for a pre-loop Opaque1 node to be // optimized away and then another round of loop opts attempted. // We can not optimize this particular loop in that case. - if( pre_opaq1->Opcode() != Op_Opaque1 ) + if (pre_opaq1->Opcode() != Op_Opaque1) return; Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1; Node *pre_limit = pre_opaq->in(1); @@ -1266,25 +1515,11 @@ // Ensure the original loop limit is available from the // pre-loop Opaque1 node. Node *orig_limit = pre_opaq->original_loop_limit(); - if( orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP ) + if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP) return; - // Need to find the main-loop zero-trip guard - Node *bolzm = iffm->in(1); - assert( bolzm->Opcode() == Op_Bool, "" ); - Node *cmpzm = bolzm->in(1); - assert( cmpzm->is_Cmp(), "" ); - Node *opqzm = cmpzm->in(2); - if( opqzm->Opcode() != Op_Opaque1 ) - return; - assert( opqzm->in(1) == main_limit, "do not understand situation" ); - // Must know if its a count-up or count-down loop - // protect against stride not being a constant - if ( !cl->stride_is_con() ) { - return; - } int stride_con = cl->stride_con(); Node *zero = _igvn.intcon(0); Node *one = _igvn.intcon(1); @@ -1566,17 +1801,17 @@ // have on the last iteration. This will break the loop. bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) { // Minimum size must be empty loop - if( _body.size() > 7/*number of nodes in an empty loop*/ ) return false; + if (_body.size() > EMPTY_LOOP_SIZE) + return false; - if( !_head->is_CountedLoop() ) return false; // Dead loop + if (!_head->is_CountedLoop()) + return false; // Dead loop CountedLoopNode *cl = _head->as_CountedLoop(); - if( !cl->loopexit() ) return false; // Malformed loop - if( !phase->is_member(this,phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)) ) ) + if (!cl->loopexit()) + return false; // Malformed loop + if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) return false; // Infinite loop -#ifndef PRODUCT - if( PrintOpto ) - tty->print_cr("Removing empty loop"); -#endif + #ifdef ASSERT // Ensure only one phi which is the iv. Node* iv = NULL; @@ -1589,6 +1824,54 @@ } assert(iv == cl->phi(), "Wrong phi" ); #endif + + // main and post loops have explicitly created zero trip guard + bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop(); + if (needs_guard) { + // Skip guard if values not overlap. + const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int(); + const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int(); + int stride_con = cl->stride_con(); + if (stride_con > 0) { + needs_guard = (init_t->_hi >= limit_t->_lo); + } else { + needs_guard = (init_t->_lo <= limit_t->_hi); + } + } + if (needs_guard) { + // Check for an obvious zero trip guard. + Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl)); + if (inctrl->Opcode() == Op_IfTrue) { + // The test should look like just the backedge of a CountedLoop + Node* iff = inctrl->in(0); + if (iff->is_If()) { + Node* bol = iff->in(1); + if (bol->is_Bool() && bol->as_Bool()->_test._test == cl->loopexit()->test_trip()) { + Node* cmp = bol->in(1); + if (cmp->is_Cmp() && cmp->in(1) == cl->init_trip() && cmp->in(2) == cl->limit()) { + needs_guard = false; + } + } + } + } + } + +#ifndef PRODUCT + if (PrintOpto) { + tty->print("Removing empty loop with%s zero trip guard", needs_guard ? "out" : ""); + this->dump_head(); + } else if (TraceLoopOpts) { + tty->print("Empty with%s zero trip guard ", needs_guard ? "out" : ""); + this->dump_head(); + } +#endif + + if (needs_guard) { + // Peel the loop to ensure there's a zero trip guard + Node_List old_new; + phase->do_peeling(this, old_new); + } + // Replace the phi at loop head with the final value of the last // iteration. Then the CountedLoopEnd will collapse (backedge never // taken) and all loop-invariant uses of the exit values will be correct. @@ -1600,12 +1883,49 @@ return true; } +//------------------------------policy_do_one_iteration_loop------------------- +// Convert one iteration loop into normal code. +bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) { + if (!_head->as_Loop()->is_valid_counted_loop()) + return false; // Only for counted loop + + CountedLoopNode *cl = _head->as_CountedLoop(); + if (!cl->has_exact_trip_count() || cl->trip_count() != 1) { + return false; + } + +#ifndef PRODUCT + if(TraceLoopOpts) { + tty->print("OneIteration "); + this->dump_head(); + } +#endif + + Node *init_n = cl->init_trip(); +#ifdef ASSERT + // Loop boundaries should be constant since trip count is exact. + assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration"); +#endif + // Replace the phi at loop head with the value of the init_trip. + // Then the CountedLoopEnd will collapse (backedge will not be taken) + // and all loop-invariant uses of the exit values will be correct. + phase->_igvn.replace_node(cl->phi(), cl->init_trip()); + phase->C->set_major_progress(); + return true; +} //============================================================================= //------------------------------iteration_split_impl--------------------------- bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) { + // Compute exact loop trip count if possible. + compute_exact_trip_count(phase); + + // Convert one iteration loop into normal code. + if (policy_do_one_iteration_loop(phase)) + return true; + // Check and remove empty loops (spam micro-benchmarks) - if( policy_do_remove_empty_loop(phase) ) + if (policy_do_remove_empty_loop(phase)) return true; // Here we removed an empty loop bool should_peel = policy_peeling(phase); // Should we peel? @@ -1614,40 +1934,40 @@ // Non-counted loops may be peeled; exactly 1 iteration is peeled. // This removes loop-invariant tests (usually null checks). - if( !_head->is_CountedLoop() ) { // Non-counted loop + if (!_head->is_CountedLoop()) { // Non-counted loop if (PartialPeelLoop && phase->partial_peel(this, old_new)) { // Partial peel succeeded so terminate this round of loop opts return false; } - if( should_peel ) { // Should we peel? + if (should_peel) { // Should we peel? #ifndef PRODUCT if (PrintOpto) tty->print_cr("should_peel"); #endif phase->do_peeling(this,old_new); - } else if( should_unswitch ) { + } else if (should_unswitch) { phase->do_unswitching(this, old_new); } return true; } CountedLoopNode *cl = _head->as_CountedLoop(); - if( !cl->loopexit() ) return true; // Ignore various kinds of broken loops + if (!cl->loopexit()) return true; // Ignore various kinds of broken loops // Do nothing special to pre- and post- loops - if( cl->is_pre_loop() || cl->is_post_loop() ) return true; + if (cl->is_pre_loop() || cl->is_post_loop()) return true; // Compute loop trip count from profile data compute_profile_trip_cnt(phase); // Before attempting fancy unrolling, RCE or alignment, see if we want // to completely unroll this loop or do loop unswitching. - if( cl->is_normal_loop() ) { + if (cl->is_normal_loop()) { if (should_unswitch) { phase->do_unswitching(this, old_new); return true; } bool should_maximally_unroll = policy_maximally_unroll(phase); - if( should_maximally_unroll ) { + if (should_maximally_unroll) { // Here we did some unrolling and peeling. Eventually we will // completely unroll this loop and it will no longer be a loop. phase->do_maximally_unroll(this,old_new); @@ -1655,6 +1975,12 @@ } } + // Skip next optimizations if running low on nodes. Note that + // policy_unswitching and policy_maximally_unroll have this check. + uint nodes_left = MaxNodeLimit - phase->C->unique(); + if ((2 * _body.size()) > nodes_left) { + return true; + } // Counted loops may be peeled, may need some iterations run up // front for RCE, and may want to align loop refs to a cache @@ -1685,14 +2011,14 @@ // If we have any of these conditions (RCE, alignment, unrolling) met, then // we switch to the pre-/main-/post-loop model. This model also covers // peeling. - if( should_rce || should_align || should_unroll ) { - if( cl->is_normal_loop() ) // Convert to 'pre/main/post' loops + if (should_rce || should_align || should_unroll) { + if (cl->is_normal_loop()) // Convert to 'pre/main/post' loops phase->insert_pre_post_loops(this,old_new, !may_rce_align); // Adjust the pre- and main-loop limits to let the pre and post loops run // with full checks, but the main-loop with no checks. Remove said // checks from the main body. - if( should_rce ) + if (should_rce) phase->do_range_check(this,old_new); // Double loop body for unrolling. Adjust the minimum-trip test (will do @@ -1700,16 +2026,16 @@ // an even number of trips). If we are peeling, we might enable some RCE // and we'd rather unroll the post-RCE'd loop SO... do not unroll if // peeling. - if( should_unroll && !should_peel ) - phase->do_unroll(this,old_new, true); + if (should_unroll && !should_peel) + phase->do_unroll(this,old_new, true); // Adjust the pre-loop limits to align the main body // iterations. - if( should_align ) + if (should_align) Unimplemented(); } else { // Else we have an unchanged counted loop - if( should_peel ) // Might want to peel but do nothing else + if (should_peel) // Might want to peel but do nothing else phase->do_peeling(this,old_new); } return true; @@ -1720,7 +2046,7 @@ //------------------------------iteration_split-------------------------------- bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) { // Recursively iteration split nested loops - if( _child && !_child->iteration_split( phase, old_new )) + if (_child && !_child->iteration_split(phase, old_new)) return false; // Clean out prior deadwood @@ -1729,21 +2055,20 @@ // Look for loop-exit tests with my 50/50 guesses from the Parsing stage. // Replace with a 1-in-10 exit guess. - if( _parent /*not the root loop*/ && + if (_parent /*not the root loop*/ && !_irreducible && // Also ignore the occasional dead backedge - !tail()->is_top() ) { + !tail()->is_top()) { adjust_loop_exit_prob(phase); } - // Gate unrolling, RCE and peeling efforts. - if( !_child && // If not an inner loop, do not split + if (!_child && // If not an inner loop, do not split !_irreducible && _allow_optimizations && - !tail()->is_top() ) { // Also ignore the occasional dead backedge + !tail()->is_top()) { // Also ignore the occasional dead backedge if (!_has_call) { - if (!iteration_split_impl( phase, old_new )) { + if (!iteration_split_impl(phase, old_new)) { return false; } } else if (policy_unswitching(phase)) { @@ -1752,639 +2077,16 @@ } // Minor offset re-organization to remove loop-fallout uses of - // trip counter. - if( _head->is_CountedLoop() ) phase->reorg_offsets( this ); - if( _next && !_next->iteration_split( phase, old_new )) + // trip counter when there was no major reshaping. + phase->reorg_offsets(this); + + if (_next && !_next->iteration_split(phase, old_new)) return false; return true; } -//-------------------------------is_uncommon_trap_proj---------------------------- -// Return true if proj is the form of "proj->[region->..]call_uct" -bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, bool must_reason_predicate) { - int path_limit = 10; - assert(proj, "invalid argument"); - Node* out = proj; - for (int ct = 0; ct < path_limit; ct++) { - out = out->unique_ctrl_out(); - if (out == NULL || out->is_Root() || out->is_Start()) - return false; - if (out->is_CallStaticJava()) { - int req = out->as_CallStaticJava()->uncommon_trap_request(); - if (req != 0) { - Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(req); - if (!must_reason_predicate || reason == Deoptimization::Reason_predicate){ - return true; - } - } - return false; // don't do further after call - } - } - return false; -} -//-------------------------------is_uncommon_trap_if_pattern------------------------- -// Return true for "if(test)-> proj -> ... -// | -// V -// other_proj->[region->..]call_uct" -// -// "must_reason_predicate" means the uct reason must be Reason_predicate -bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, bool must_reason_predicate) { - Node *in0 = proj->in(0); - if (!in0->is_If()) return false; - // Variation of a dead If node. - if (in0->outcnt() < 2) return false; - IfNode* iff = in0->as_If(); - - // we need "If(Conv2B(Opaque1(...)))" pattern for must_reason_predicate - if (must_reason_predicate) { - if (iff->in(1)->Opcode() != Op_Conv2B || - iff->in(1)->in(1)->Opcode() != Op_Opaque1) { - return false; - } - } - - ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj(); - return is_uncommon_trap_proj(other_proj, must_reason_predicate); -} - -//------------------------------create_new_if_for_predicate------------------------ -// create a new if above the uct_if_pattern for the predicate to be promoted. -// -// before after -// ---------- ---------- -// ctrl ctrl -// | | -// | | -// v v -// iff new_iff -// / \ / \ -// / \ / \ -// v v v v -// uncommon_proj cont_proj if_uct if_cont -// \ | | | | -// \ | | | | -// v v v | v -// rgn loop | iff -// | | / \ -// | | / \ -// v | v v -// uncommon_trap | uncommon_proj cont_proj -// \ \ | | -// \ \ | | -// v v v v -// rgn loop -// | -// | -// v -// uncommon_trap -// -// -// We will create a region to guard the uct call if there is no one there. -// The true projecttion (if_cont) of the new_iff is returned. -ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj) { - assert(is_uncommon_trap_if_pattern(cont_proj, true), "must be a uct if pattern!"); - IfNode* iff = cont_proj->in(0)->as_If(); - - ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con); - Node *rgn = uncommon_proj->unique_ctrl_out(); - assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct"); - - if (!rgn->is_Region()) { // create a region to guard the call - assert(rgn->is_Call(), "must be call uct"); - CallNode* call = rgn->as_Call(); - rgn = new (C, 1) RegionNode(1); - _igvn.set_type(rgn, rgn->bottom_type()); - rgn->add_req(uncommon_proj); - set_idom(rgn, idom(uncommon_proj), dom_depth(uncommon_proj)+1); - _igvn.hash_delete(call); - call->set_req(0, rgn); - } - - // Create new_iff - uint iffdd = dom_depth(iff); - IdealLoopTree* lp = get_loop(iff); - IfNode *new_iff = new (C, 2) IfNode(iff->in(0), NULL, iff->_prob, iff->_fcnt); - register_node(new_iff, lp, idom(iff), iffdd); - Node *if_cont = new (C, 1) IfTrueNode(new_iff); - Node *if_uct = new (C, 1) IfFalseNode(new_iff); - if (cont_proj->is_IfFalse()) { - // Swap - Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp; - } - register_node(if_cont, lp, new_iff, iffdd); - register_node(if_uct, get_loop(rgn), new_iff, iffdd); - - // if_cont to iff - _igvn.hash_delete(iff); - iff->set_req(0, if_cont); - set_idom(iff, if_cont, dom_depth(iff)); - - // if_uct to rgn - _igvn.hash_delete(rgn); - rgn->add_req(if_uct); - Node* ridom = idom(rgn); - Node* nrdom = dom_lca(ridom, new_iff); - set_idom(rgn, nrdom, dom_depth(rgn)); - - // rgn must have no phis - assert(!rgn->as_Region()->has_phi(), "region must have no phis"); - - return if_cont->as_Proj(); -} - -//------------------------------find_predicate_insertion_point-------------------------- -// Find a good location to insert a predicate -ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c) { - if (start_c == C->root() || !start_c->is_Proj()) - return NULL; - if (is_uncommon_trap_if_pattern(start_c->as_Proj(), true/*Reason_Predicate*/)) { - return start_c->as_Proj(); - } - return NULL; -} - -//------------------------------Invariance----------------------------------- -// Helper class for loop_predication_impl to compute invariance on the fly and -// clone invariants. -class Invariance : public StackObj { - VectorSet _visited, _invariant; - Node_Stack _stack; - VectorSet _clone_visited; - Node_List _old_new; // map of old to new (clone) - IdealLoopTree* _lpt; - PhaseIdealLoop* _phase; - - // Helper function to set up the invariance for invariance computation - // If n is a known invariant, set up directly. Otherwise, look up the - // the possibility to push n onto the stack for further processing. - void visit(Node* use, Node* n) { - if (_lpt->is_invariant(n)) { // known invariant - _invariant.set(n->_idx); - } else if (!n->is_CFG()) { - Node *n_ctrl = _phase->ctrl_or_self(n); - Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG - if (_phase->is_dominator(n_ctrl, u_ctrl)) { - _stack.push(n, n->in(0) == NULL ? 1 : 0); - } - } - } - - // Compute invariance for "the_node" and (possibly) all its inputs recursively - // on the fly - void compute_invariance(Node* n) { - assert(_visited.test(n->_idx), "must be"); - visit(n, n); - while (_stack.is_nonempty()) { - Node* n = _stack.node(); - uint idx = _stack.index(); - if (idx == n->req()) { // all inputs are processed - _stack.pop(); - // n is invariant if it's inputs are all invariant - bool all_inputs_invariant = true; - for (uint i = 0; i < n->req(); i++) { - Node* in = n->in(i); - if (in == NULL) continue; - assert(_visited.test(in->_idx), "must have visited input"); - if (!_invariant.test(in->_idx)) { // bad guy - all_inputs_invariant = false; - break; - } - } - if (all_inputs_invariant) { - _invariant.set(n->_idx); // I am a invariant too - } - } else { // process next input - _stack.set_index(idx + 1); - Node* m = n->in(idx); - if (m != NULL && !_visited.test_set(m->_idx)) { - visit(n, m); - } - } - } - } - - // Helper function to set up _old_new map for clone_nodes. - // If n is a known invariant, set up directly ("clone" of n == n). - // Otherwise, push n onto the stack for real cloning. - void clone_visit(Node* n) { - assert(_invariant.test(n->_idx), "must be invariant"); - if (_lpt->is_invariant(n)) { // known invariant - _old_new.map(n->_idx, n); - } else{ // to be cloned - assert (!n->is_CFG(), "should not see CFG here"); - _stack.push(n, n->in(0) == NULL ? 1 : 0); - } - } - - // Clone "n" and (possibly) all its inputs recursively - void clone_nodes(Node* n, Node* ctrl) { - clone_visit(n); - while (_stack.is_nonempty()) { - Node* n = _stack.node(); - uint idx = _stack.index(); - if (idx == n->req()) { // all inputs processed, clone n! - _stack.pop(); - // clone invariant node - Node* n_cl = n->clone(); - _old_new.map(n->_idx, n_cl); - _phase->register_new_node(n_cl, ctrl); - for (uint i = 0; i < n->req(); i++) { - Node* in = n_cl->in(i); - if (in == NULL) continue; - n_cl->set_req(i, _old_new[in->_idx]); - } - } else { // process next input - _stack.set_index(idx + 1); - Node* m = n->in(idx); - if (m != NULL && !_clone_visited.test_set(m->_idx)) { - clone_visit(m); // visit the input - } - } - } - } - - public: - Invariance(Arena* area, IdealLoopTree* lpt) : - _lpt(lpt), _phase(lpt->_phase), - _visited(area), _invariant(area), _stack(area, 10 /* guess */), - _clone_visited(area), _old_new(area) - {} - - // Map old to n for invariance computation and clone - void map_ctrl(Node* old, Node* n) { - assert(old->is_CFG() && n->is_CFG(), "must be"); - _old_new.map(old->_idx, n); // "clone" of old is n - _invariant.set(old->_idx); // old is invariant - _clone_visited.set(old->_idx); - } - - // Driver function to compute invariance - bool is_invariant(Node* n) { - if (!_visited.test_set(n->_idx)) - compute_invariance(n); - return (_invariant.test(n->_idx) != 0); - } - - // Driver function to clone invariant - Node* clone(Node* n, Node* ctrl) { - assert(ctrl->is_CFG(), "must be"); - assert(_invariant.test(n->_idx), "must be an invariant"); - if (!_clone_visited.test(n->_idx)) - clone_nodes(n, ctrl); - return _old_new[n->_idx]; - } -}; - -//------------------------------is_range_check_if ----------------------------------- -// Returns true if the predicate of iff is in "scale*iv + offset u< load_range(ptr)" format -// Note: this function is particularly designed for loop predication. We require load_range -// and offset to be loop invariant computed on the fly by "invar" -bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const { - if (!is_loop_exit(iff)) { - return false; - } - if (!iff->in(1)->is_Bool()) { - return false; - } - const BoolNode *bol = iff->in(1)->as_Bool(); - if (bol->_test._test != BoolTest::lt) { - return false; - } - if (!bol->in(1)->is_Cmp()) { - return false; - } - const CmpNode *cmp = bol->in(1)->as_Cmp(); - if (cmp->Opcode() != Op_CmpU ) { - return false; - } - Node* range = cmp->in(2); - if (range->Opcode() != Op_LoadRange) { - const TypeInt* tint = phase->_igvn.type(range)->isa_int(); - if (!OptimizeFill || tint == NULL || tint->empty() || tint->_lo < 0) { - // Allow predication on positive values that aren't LoadRanges. - // This allows optimization of loops where the length of the - // array is a known value and doesn't need to be loaded back - // from the array. - return false; - } - } - if (!invar.is_invariant(range)) { - return false; - } - Node *iv = _head->as_CountedLoop()->phi(); - int scale = 0; - Node *offset = NULL; - if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, &scale, &offset)) { - return false; - } - if(offset && !invar.is_invariant(offset)) { // offset must be invariant - return false; - } - return true; -} - -//------------------------------rc_predicate----------------------------------- -// Create a range check predicate -// -// for (i = init; i < limit; i += stride) { -// a[scale*i+offset] -// } -// -// Compute max(scale*i + offset) for init <= i < limit and build the predicate -// as "max(scale*i + offset) u< a.length". -// -// There are two cases for max(scale*i + offset): -// (1) stride*scale > 0 -// max(scale*i + offset) = scale*(limit-stride) + offset -// (2) stride*scale < 0 -// max(scale*i + offset) = scale*init + offset -BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl, - int scale, Node* offset, - Node* init, Node* limit, Node* stride, - Node* range, bool upper) { - DEBUG_ONLY(ttyLocker ttyl); - if (TraceLoopPredicate) tty->print("rc_predicate "); - - Node* max_idx_expr = init; - int stride_con = stride->get_int(); - if ((stride_con > 0) == (scale > 0) == upper) { - max_idx_expr = new (C, 3) SubINode(limit, stride); - register_new_node(max_idx_expr, ctrl); - if (TraceLoopPredicate) tty->print("(limit - stride) "); - } else { - if (TraceLoopPredicate) tty->print("init "); - } - - if (scale != 1) { - ConNode* con_scale = _igvn.intcon(scale); - max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale); - register_new_node(max_idx_expr, ctrl); - if (TraceLoopPredicate) tty->print("* %d ", scale); - } - - if (offset && (!offset->is_Con() || offset->get_int() != 0)){ - max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset); - register_new_node(max_idx_expr, ctrl); - if (TraceLoopPredicate) - if (offset->is_Con()) tty->print("+ %d ", offset->get_int()); - else tty->print("+ offset "); - } - - CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range); - register_new_node(cmp, ctrl); - BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt); - register_new_node(bol, ctrl); - - if (TraceLoopPredicate) tty->print_cr("<u range"); - return bol; -} - -//------------------------------ loop_predication_impl-------------------------- -// Insert loop predicates for null checks and range checks -bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) { - if (!UseLoopPredicate) return false; - - if (!loop->_head->is_Loop()) { - // Could be a simple region when irreducible loops are present. - return false; - } - - CountedLoopNode *cl = NULL; - if (loop->_head->is_CountedLoop()) { - cl = loop->_head->as_CountedLoop(); - // do nothing for iteration-splitted loops - if (!cl->is_normal_loop()) return false; - } - - // Too many traps seen? - bool tmt = C->too_many_traps(C->method(), 0, Deoptimization::Reason_predicate); - int tc = C->trap_count(Deoptimization::Reason_predicate); - if (tmt || tc > 0) { - if (TraceLoopPredicate) { - tty->print_cr("too many predicate traps: %d", tc); - C->method()->print(); // which method has too many predicate traps - tty->print_cr(""); - } - return false; - } - - LoopNode *lpn = loop->_head->as_Loop(); - Node* entry = lpn->in(LoopNode::EntryControl); - - ProjNode *predicate_proj = find_predicate_insertion_point(entry); - if (!predicate_proj){ -#ifndef PRODUCT - if (TraceLoopPredicate) { - tty->print("missing predicate:"); - loop->dump_head(); - } -#endif - return false; - } - - ConNode* zero = _igvn.intcon(0); - set_ctrl(zero, C->root()); - Node *cond_false = new (C, 2) Conv2BNode(zero); - register_new_node(cond_false, C->root()); - ConNode* one = _igvn.intcon(1); - set_ctrl(one, C->root()); - Node *cond_true = new (C, 2) Conv2BNode(one); - register_new_node(cond_true, C->root()); - - ResourceArea *area = Thread::current()->resource_area(); - Invariance invar(area, loop); - - // Create list of if-projs such that a newer proj dominates all older - // projs in the list, and they all dominate loop->tail() - Node_List if_proj_list(area); - LoopNode *head = loop->_head->as_Loop(); - Node *current_proj = loop->tail(); //start from tail - while ( current_proj != head ) { - if (loop == get_loop(current_proj) && // still in the loop ? - current_proj->is_Proj() && // is a projection ? - current_proj->in(0)->Opcode() == Op_If) { // is a if projection ? - if_proj_list.push(current_proj); - } - current_proj = idom(current_proj); - } - - bool hoisted = false; // true if at least one proj is promoted - while (if_proj_list.size() > 0) { - // Following are changed to nonnull when a predicate can be hoisted - ProjNode* new_predicate_proj = NULL; - - ProjNode* proj = if_proj_list.pop()->as_Proj(); - IfNode* iff = proj->in(0)->as_If(); - - if (!is_uncommon_trap_if_pattern(proj)) { - if (loop->is_loop_exit(iff)) { - // stop processing the remaining projs in the list because the execution of them - // depends on the condition of "iff" (iff->in(1)). - break; - } else { - // Both arms are inside the loop. There are two cases: - // (1) there is one backward branch. In this case, any remaining proj - // in the if_proj list post-dominates "iff". So, the condition of "iff" - // does not determine the execution the remining projs directly, and we - // can safely continue. - // (2) both arms are forwarded, i.e. a diamond shape. In this case, "proj" - // does not dominate loop->tail(), so it can not be in the if_proj list. - continue; - } - } - - Node* test = iff->in(1); - if (!test->is_Bool()){ //Conv2B, ... - continue; - } - BoolNode* bol = test->as_Bool(); - if (invar.is_invariant(bol)) { - // Invariant test - new_predicate_proj = create_new_if_for_predicate(predicate_proj); - Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0); - BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool(); - - // Negate test if necessary - bool negated = false; - if (proj->_con != predicate_proj->_con) { - new_predicate_bol = new (C, 2) BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate()); - register_new_node(new_predicate_bol, ctrl); - negated = true; - } - IfNode* new_predicate_iff = new_predicate_proj->in(0)->as_If(); - _igvn.hash_delete(new_predicate_iff); - new_predicate_iff->set_req(1, new_predicate_bol); - if (TraceLoopPredicate) tty->print_cr("invariant if%s: %d", negated ? " negated" : "", new_predicate_iff->_idx); - - } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) { - assert(proj->_con == predicate_proj->_con, "must match"); - - // Range check for counted loops - const Node* cmp = bol->in(1)->as_Cmp(); - Node* idx = cmp->in(1); - assert(!invar.is_invariant(idx), "index is variant"); - assert(cmp->in(2)->Opcode() == Op_LoadRange || OptimizeFill, "must be"); - Node* rng = cmp->in(2); - assert(invar.is_invariant(rng), "range must be invariant"); - int scale = 1; - Node* offset = zero; - bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset); - assert(ok, "must be index expression"); - - Node* init = cl->init_trip(); - Node* limit = cl->limit(); - Node* stride = cl->stride(); - - // Build if's for the upper and lower bound tests. The - // lower_bound test will dominate the upper bound test and all - // cloned or created nodes will use the lower bound test as - // their declared control. - ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj); - ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj); - assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate"); - Node *ctrl = lower_bound_proj->in(0)->as_If()->in(0); - - // Perform cloning to keep Invariance state correct since the - // late schedule will place invariant things in the loop. - rng = invar.clone(rng, ctrl); - if (offset && offset != zero) { - assert(invar.is_invariant(offset), "offset must be loop invariant"); - offset = invar.clone(offset, ctrl); - } - - // Test the lower bound - Node* lower_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, false); - IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If(); - _igvn.hash_delete(lower_bound_iff); - lower_bound_iff->set_req(1, lower_bound_bol); - if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx); - - // Test the upper bound - Node* upper_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, true); - IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If(); - _igvn.hash_delete(upper_bound_iff); - upper_bound_iff->set_req(1, upper_bound_bol); - if (TraceLoopPredicate) tty->print_cr("upper bound check if: %d", lower_bound_iff->_idx); - - // Fall through into rest of the clean up code which will move - // any dependent nodes onto the upper bound test. - new_predicate_proj = upper_bound_proj; - } else { - // The other proj of the "iff" is a uncommon trap projection, and we can assume - // the other proj will not be executed ("executed" means uct raised). - continue; - } - - // Success - attach condition (new_predicate_bol) to predicate if - invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate - - // Eliminate the old if in the loop body - _igvn.hash_delete(iff); - iff->set_req(1, proj->is_IfFalse() ? cond_false : cond_true); - - Node* ctrl = new_predicate_proj; // new control - ProjNode* dp = proj; // old control - assert(get_loop(dp) == loop, "guaranteed at the time of collecting proj"); - // Find nodes (depends only on the test) off the surviving projection; - // move them outside the loop with the control of proj_clone - for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { - Node* cd = dp->fast_out(i); // Control-dependent node - if (cd->depends_only_on_test()) { - assert(cd->in(0) == dp, ""); - _igvn.hash_delete(cd); - cd->set_req(0, ctrl); // ctrl, not NULL - set_early_ctrl(cd); - _igvn._worklist.push(cd); - IdealLoopTree *new_loop = get_loop(get_ctrl(cd)); - if (new_loop != loop) { - if (!loop->_child) loop->_body.yank(cd); - if (!new_loop->_child ) new_loop->_body.push(cd); - } - --i; - --imax; - } - } - - hoisted = true; - C->set_major_progress(); - } // end while - -#ifndef PRODUCT - // report that the loop predication has been actually performed - // for this loop - if (TraceLoopPredicate && hoisted) { - tty->print("Loop Predication Performed:"); - loop->dump_head(); - } -#endif - - return hoisted; -} - -//------------------------------loop_predication-------------------------------- -// driver routine for loop predication optimization -bool IdealLoopTree::loop_predication( PhaseIdealLoop *phase) { - bool hoisted = false; - // Recursively promote predicates - if ( _child ) { - hoisted = _child->loop_predication( phase); - } - - // self - if (!_irreducible && !tail()->is_top()) { - hoisted |= phase->loop_predication_impl(this); - } - - if ( _next ) { //sibling - hoisted |= _next->loop_predication( phase); - } - - return hoisted; -} - - +//============================================================================= // Process all the loops in the loop tree and replace any fill // patterns with an intrisc version. bool PhaseIdealLoop::do_intrinsify_fill() { @@ -2504,9 +2206,12 @@ if (value != head->phi()) { msg = "unhandled shift in address"; } else { - found_index = true; - shift = n; - assert(type2aelembytes(store->as_Mem()->memory_type(), true) == 1 << shift->in(2)->get_int(), "scale should match"); + if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) { + msg = "scale doesn't match"; + } else { + found_index = true; + shift = n; + } } } else if (n->Opcode() == Op_ConvI2L && conv == NULL) { if (n->in(1) == head->phi()) { @@ -2641,6 +2346,13 @@ return false; } +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print("ArrayFill "); + lpt->dump_head(); + } +#endif + // Now replace the whole loop body by a call to a fill routine that // covers the same region as the loop. Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base);
--- a/src/share/vm/opto/loopUnswitch.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/loopUnswitch.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,15 +32,17 @@ // // orig: transformed: // if (invariant-test) then +// predicate predicate // loop loop // stmt1 stmt1 // if (invariant-test) then stmt2 // stmt2 stmt4 // else endloop // stmt3 else -// endif loop [clone] -// stmt4 stmt1 [clone] -// endloop stmt3 +// endif predicate [clone] +// stmt4 loop [clone] +// endloop stmt1 [clone] +// stmt3 // stmt4 [clone] // endloop // endif @@ -110,6 +112,13 @@ IfNode* unswitch_iff = find_unswitching_candidate((const IdealLoopTree *)loop); assert(unswitch_iff != NULL, "should be at least one"); +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print("Unswitch %d ", head->unswitch_count()+1); + loop->dump_head(); + } +#endif + // Need to revert back to normal loop if (head->is_CountedLoop() && !head->as_CountedLoop()->is_normal_loop()) { head->as_CountedLoop()->set_normal_loop(); @@ -117,8 +126,15 @@ ProjNode* proj_true = create_slow_version_of_loop(loop, old_new); - assert(proj_true->is_IfTrue() && proj_true->unique_ctrl_out() == head, "by construction"); - +#ifdef ASSERT + Node* uniqc = proj_true->unique_ctrl_out(); + Node* entry = head->in(LoopNode::EntryControl); + Node* predicate = find_predicate(entry); + if (predicate != NULL) predicate = predicate->in(0); + assert(proj_true->is_IfTrue() && + (predicate == NULL && uniqc == head || + predicate != NULL && uniqc == predicate), "by construction"); +#endif // Increment unswitch count LoopNode* head_clone = old_new[head->_idx]->as_Loop(); int nct = head->unswitch_count() + 1; @@ -220,21 +236,24 @@ register_node(ifslow, outer_loop, iff, dom_depth(iff)); // Clone the loop body. The clone becomes the fast loop. The - // original pre-header will (illegally) have 2 control users (old & new loops). + // original pre-header will (illegally) have 3 control users + // (old & new loops & new if). clone_loop(loop, old_new, dom_depth(head), iff); assert(old_new[head->_idx]->is_Loop(), "" ); // Fast (true) control + Node* iffast_pred = clone_loop_predicates(entry, iffast); _igvn.hash_delete(head); - head->set_req(LoopNode::EntryControl, iffast); - set_idom(head, iffast, dom_depth(head)); + head->set_req(LoopNode::EntryControl, iffast_pred); + set_idom(head, iffast_pred, dom_depth(head)); _igvn._worklist.push(head); // Slow (false) control + Node* ifslow_pred = move_loop_predicates(entry, ifslow); LoopNode* slow_head = old_new[head->_idx]->as_Loop(); _igvn.hash_delete(slow_head); - slow_head->set_req(LoopNode::EntryControl, ifslow); - set_idom(slow_head, ifslow, dom_depth(slow_head)); + slow_head->set_req(LoopNode::EntryControl, ifslow_pred); + set_idom(slow_head, ifslow_pred, dom_depth(slow_head)); _igvn._worklist.push(slow_head); recompute_dom_depth();
--- a/src/share/vm/opto/loopnode.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/loopnode.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -56,12 +56,32 @@ // Dump special per-node info #ifndef PRODUCT void LoopNode::dump_spec(outputStream *st) const { - if( is_inner_loop () ) st->print( "inner " ); - if( is_partial_peel_loop () ) st->print( "partial_peel " ); - if( partial_peel_has_failed () ) st->print( "partial_peel_failed " ); + if (is_inner_loop()) st->print( "inner " ); + if (is_partial_peel_loop()) st->print( "partial_peel " ); + if (partial_peel_has_failed()) st->print( "partial_peel_failed " ); } #endif +//------------------------------is_valid_counted_loop------------------------- +bool LoopNode::is_valid_counted_loop() const { + if (is_CountedLoop()) { + CountedLoopNode* l = as_CountedLoop(); + CountedLoopEndNode* le = l->loopexit(); + if (le != NULL && + le->proj_out(1 /* true */) == l->in(LoopNode::LoopBackControl)) { + Node* phi = l->phi(); + Node* exit = le->proj_out(0 /* false */); + if (exit != NULL && exit->Opcode() == Op_IfFalse && + phi != NULL && phi->is_Phi() && + phi->in(LoopNode::LoopBackControl) == l->incr() && + le->loopnode() == l && le->stride_is_con()) { + return true; + } + } + } + return false; +} + //------------------------------get_early_ctrl--------------------------------- // Compute earliest legal control Node *PhaseIdealLoop::get_early_ctrl( Node *n ) { @@ -142,43 +162,44 @@ } //------------------------------is_counted_loop-------------------------------- -Node *PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) { +bool PhaseIdealLoop::is_counted_loop( Node *x, IdealLoopTree *loop ) { PhaseGVN *gvn = &_igvn; // Counted loop head must be a good RegionNode with only 3 not NULL // control input edges: Self, Entry, LoopBack. - if ( x->in(LoopNode::Self) == NULL || x->req() != 3 ) - return NULL; + if (x->in(LoopNode::Self) == NULL || x->req() != 3) + return false; Node *init_control = x->in(LoopNode::EntryControl); Node *back_control = x->in(LoopNode::LoopBackControl); - if( init_control == NULL || back_control == NULL ) // Partially dead - return NULL; + if (init_control == NULL || back_control == NULL) // Partially dead + return false; // Must also check for TOP when looking for a dead loop - if( init_control->is_top() || back_control->is_top() ) - return NULL; + if (init_control->is_top() || back_control->is_top()) + return false; // Allow funny placement of Safepoint - if( back_control->Opcode() == Op_SafePoint ) + if (back_control->Opcode() == Op_SafePoint) back_control = back_control->in(TypeFunc::Control); // Controlling test for loop Node *iftrue = back_control; uint iftrue_op = iftrue->Opcode(); - if( iftrue_op != Op_IfTrue && - iftrue_op != Op_IfFalse ) + if (iftrue_op != Op_IfTrue && + iftrue_op != Op_IfFalse) // I have a weird back-control. Probably the loop-exit test is in // the middle of the loop and I am looking at some trailing control-flow // merge point. To fix this I would have to partially peel the loop. - return NULL; // Obscure back-control + return false; // Obscure back-control // Get boolean guarding loop-back test Node *iff = iftrue->in(0); - if( get_loop(iff) != loop || !iff->in(1)->is_Bool() ) return NULL; + if (get_loop(iff) != loop || !iff->in(1)->is_Bool()) + return false; BoolNode *test = iff->in(1)->as_Bool(); BoolTest::mask bt = test->_test._test; float cl_prob = iff->as_If()->_prob; - if( iftrue_op == Op_IfFalse ) { + if (iftrue_op == Op_IfFalse) { bt = BoolTest(bt).negate(); cl_prob = 1.0 - cl_prob; } @@ -186,7 +207,7 @@ Node *cmp = test->in(1); int cmp_op = cmp->Opcode(); if( cmp_op != Op_CmpI ) - return NULL; // Avoid pointer & float compares + return false; // Avoid pointer & float compares // Find the trip-counter increment & limit. Limit must be loop invariant. Node *incr = cmp->in(1); @@ -196,55 +217,64 @@ // need 'loop()' test to tell if limit is loop invariant // --------- - if( !is_member( loop, get_ctrl(incr) ) ) { // Swapped trip counter and limit? - Node *tmp = incr; // Then reverse order into the CmpI + if (!is_member(loop, get_ctrl(incr))) { // Swapped trip counter and limit? + Node *tmp = incr; // Then reverse order into the CmpI incr = limit; limit = tmp; bt = BoolTest(bt).commute(); // And commute the exit test } - if( is_member( loop, get_ctrl(limit) ) ) // Limit must loop-invariant - return NULL; + if (is_member(loop, get_ctrl(limit))) // Limit must be loop-invariant + return false; + if (!is_member(loop, get_ctrl(incr))) // Trip counter must be loop-variant + return false; + Node* phi_incr = NULL; // Trip-counter increment must be commutative & associative. - uint incr_op = incr->Opcode(); - if( incr_op == Op_Phi && incr->req() == 3 ) { - incr = incr->in(2); // Assume incr is on backedge of Phi - incr_op = incr->Opcode(); + if (incr->is_Phi()) { + if (incr->as_Phi()->region() != x || incr->req() != 3) + return false; // Not simple trip counter expression + phi_incr = incr; + incr = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi + if (!is_member(loop, get_ctrl(incr))) // Trip counter must be loop-variant + return false; } + Node* trunc1 = NULL; Node* trunc2 = NULL; const TypeInt* iv_trunc_t = NULL; if (!(incr = CountedLoopNode::match_incr_with_optional_truncation(incr, &trunc1, &trunc2, &iv_trunc_t))) { - return NULL; // Funny increment opcode + return false; // Funny increment opcode } + assert(incr->Opcode() == Op_AddI, "wrong increment code"); // Get merge point Node *xphi = incr->in(1); Node *stride = incr->in(2); - if( !stride->is_Con() ) { // Oops, swap these - if( !xphi->is_Con() ) // Is the other guy a constant? - return NULL; // Nope, unknown stride, bail out + if (!stride->is_Con()) { // Oops, swap these + if (!xphi->is_Con()) // Is the other guy a constant? + return false; // Nope, unknown stride, bail out Node *tmp = xphi; // 'incr' is commutative, so ok to swap xphi = stride; stride = tmp; } - //if( loop(xphi) != l) return NULL;// Merge point is in inner loop?? - if( !xphi->is_Phi() ) return NULL; // Too much math on the trip counter + // Stride must be constant + int stride_con = stride->get_int(); + assert(stride_con != 0, "missed some peephole opt"); + + if (!xphi->is_Phi()) + return false; // Too much math on the trip counter + if (phi_incr != NULL && phi_incr != xphi) + return false; PhiNode *phi = xphi->as_Phi(); - // Stride must be constant - const Type *stride_t = stride->bottom_type(); - int stride_con = stride_t->is_int()->get_con(); - assert( stride_con, "missed some peephole opt" ); - // Phi must be of loop header; backedge must wrap to increment - if( phi->region() != x ) return NULL; - if( trunc1 == NULL && phi->in(LoopNode::LoopBackControl) != incr || - trunc1 != NULL && phi->in(LoopNode::LoopBackControl) != trunc1 ) { - return NULL; + if (phi->region() != x) + return false; + if (trunc1 == NULL && phi->in(LoopNode::LoopBackControl) != incr || + trunc1 != NULL && phi->in(LoopNode::LoopBackControl) != trunc1) { + return false; } Node *init_trip = phi->in(LoopNode::EntryControl); - //if (!init_trip->is_Con()) return NULL; // avoid rolling over MAXINT/MININT // If iv trunc type is smaller than int, check for possible wrap. if (!TypeInt::INT->higher_equal(iv_trunc_t)) { @@ -267,12 +297,12 @@ if (stride_con > 0) { if (iv_trunc_t->_hi - phi_ft->_hi < stride_con || iv_trunc_t->_lo > phi_ft->_lo) { - return NULL; // truncation may occur + return false; // truncation may occur } } else if (stride_con < 0) { if (iv_trunc_t->_lo - phi_ft->_lo > stride_con || iv_trunc_t->_hi < phi_ft->_hi) { - return NULL; // truncation may occur + return false; // truncation may occur } } // No possibility of wrap so truncation can be discarded @@ -281,35 +311,50 @@ assert(trunc1 == NULL && trunc2 == NULL, "no truncation for int"); } + // If the condition is inverted and we will be rolling + // through MININT to MAXINT, then bail out. + if (bt == BoolTest::eq || // Bail out, but this loop trips at most twice! + // Odd stride + bt == BoolTest::ne && stride_con != 1 && stride_con != -1 || + // Count down loop rolls through MAXINT + (bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0 || + // Count up loop rolls through MININT + (bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0 ) { + return false; // Bail out + } + + const TypeInt* init_t = gvn->type(init_trip)->is_int(); + const TypeInt* limit_t = gvn->type(limit)->is_int(); + + if (stride_con > 0) { + long init_p = (long)init_t->_lo + stride_con; + if (init_p > (long)max_jint || init_p > (long)limit_t->_hi) + return false; // cyclic loop or this loop trips only once + } else { + long init_p = (long)init_t->_hi + stride_con; + if (init_p < (long)min_jint || init_p < (long)limit_t->_lo) + return false; // cyclic loop or this loop trips only once + } + // ================================================= // ---- SUCCESS! Found A Trip-Counted Loop! ----- // - // Canonicalize the condition on the test. If we can exactly determine - // the trip-counter exit value, then set limit to that value and use - // a '!=' test. Otherwise use condition '<' for count-up loops and - // '>' for count-down loops. If the condition is inverted and we will - // be rolling through MININT to MAXINT, then bail out. - + assert(x->Opcode() == Op_Loop, "regular loops only"); C->print_method("Before CountedLoop", 3); - - // Check for SafePoint on backedge and remove - Node *sfpt = x->in(LoopNode::LoopBackControl); - if( sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) { - lazy_replace( sfpt, iftrue ); - loop->_tail = iftrue; +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print("Counted "); + loop->dump_head(); } - - +#endif // If compare points to incr, we are ok. Otherwise the compare // can directly point to the phi; in this case adjust the compare so that // it points to the incr by adjusting the limit. - if( cmp->in(1) == phi || cmp->in(2) == phi ) + if (cmp->in(1) == phi || cmp->in(2) == phi) limit = gvn->transform(new (C, 3) AddINode(limit,stride)); // trip-count for +-tive stride should be: (limit - init_trip + stride - 1)/stride. // Final value for iterator should be: trip_count * stride + init_trip. - const Type *limit_t = limit->bottom_type(); - const Type *init_t = init_trip->bottom_type(); Node *one_p = gvn->intcon( 1); Node *one_m = gvn->intcon(-1); @@ -317,15 +362,15 @@ Node *hook = new (C, 6) Node(6); switch( bt ) { case BoolTest::eq: - return NULL; // Bail out, but this loop trips at most twice! + ShouldNotReachHere(); case BoolTest::ne: // Ahh, the case we desire - if( stride_con == 1 ) + if (stride_con == 1) trip_count = gvn->transform(new (C, 3) SubINode(limit,init_trip)); - else if( stride_con == -1 ) + else if (stride_con == -1) trip_count = gvn->transform(new (C, 3) SubINode(init_trip,limit)); else - return NULL; // Odd stride; must prove we hit limit exactly - set_subtree_ctrl( trip_count ); + ShouldNotReachHere(); + set_subtree_ctrl(trip_count); //_loop.map(trip_count->_idx,loop(limit)); break; case BoolTest::le: // Maybe convert to '<' case @@ -338,7 +383,8 @@ //_loop.map(limit->_idx,limit_loop); // Fall into next case case BoolTest::lt: { // Maybe convert to '!=' case - if( stride_con < 0 ) return NULL; // Count down loop rolls through MAXINT + if (stride_con < 0) // Count down loop rolls through MAXINT + ShouldNotReachHere(); Node *range = gvn->transform(new (C, 3) SubINode(limit,init_trip)); set_subtree_ctrl( range ); hook->init_req(0, range); @@ -367,7 +413,8 @@ //_loop.map(limit->_idx,limit_loop); // Fall into next case case BoolTest::gt: { // Maybe convert to '!=' case - if( stride_con > 0 ) return NULL; // count up loop rolls through MININT + if (stride_con > 0) // count up loop rolls through MININT + ShouldNotReachHere(); Node *range = gvn->transform(new (C, 3) SubINode(limit,init_trip)); set_subtree_ctrl( range ); hook->init_req(0, range); @@ -385,7 +432,7 @@ hook->init_req(3, trip_count); break; } - } + } // switch( bt ) Node *span = gvn->transform(new (C, 3) MulINode(trip_count,stride)); set_subtree_ctrl( span ); @@ -394,83 +441,82 @@ limit = gvn->transform(new (C, 3) AddINode(span,init_trip)); set_subtree_ctrl( limit ); + // Check for SafePoint on backedge and remove + Node *sfpt = x->in(LoopNode::LoopBackControl); + if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) { + lazy_replace( sfpt, iftrue ); + loop->_tail = iftrue; + } + // Build a canonical trip test. // Clone code, as old values may be in use. + Node* nphi = PhiNode::make(x, init_trip, TypeInt::INT); + nphi = _igvn.register_new_node_with_optimizer(nphi); + set_ctrl(nphi, get_ctrl(phi)); + incr = incr->clone(); - incr->set_req(1,phi); + incr->set_req(1,nphi); incr->set_req(2,stride); incr = _igvn.register_new_node_with_optimizer(incr); set_early_ctrl( incr ); - _igvn.hash_delete(phi); - phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn ); - // If phi type is more restrictive than Int, raise to - // Int to prevent (almost) infinite recursion in igvn - // which can only handle integer types for constants or minint..maxint. - if (!TypeInt::INT->higher_equal(phi->bottom_type())) { - Node* nphi = PhiNode::make(phi->in(0), phi->in(LoopNode::EntryControl), TypeInt::INT); - nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl)); - nphi = _igvn.register_new_node_with_optimizer(nphi); - set_ctrl(nphi, get_ctrl(phi)); - _igvn.replace_node(phi, nphi); - phi = nphi->as_Phi(); - } + nphi->set_req(LoopNode::LoopBackControl, incr); + _igvn.replace_node(phi, nphi); + phi = nphi->as_Phi(); + cmp = cmp->clone(); cmp->set_req(1,incr); cmp->set_req(2,limit); cmp = _igvn.register_new_node_with_optimizer(cmp); set_ctrl(cmp, iff->in(0)); - Node *tmp = test->clone(); - assert( tmp->is_Bool(), "" ); - test = (BoolNode*)tmp; - (*(BoolTest*)&test->_test)._test = bt; //BoolTest::ne; + test = test->clone()->as_Bool(); + (*(BoolTest*)&test->_test)._test = bt; test->set_req(1,cmp); _igvn.register_new_node_with_optimizer(test); set_ctrl(test, iff->in(0)); - // If the exit test is dead, STOP! - if( test == NULL ) return NULL; - _igvn.hash_delete(iff); - iff->set_req_X( 1, test, &_igvn ); // Replace the old IfNode with a new LoopEndNode - Node *lex = _igvn.register_new_node_with_optimizer(new (C, 2) CountedLoopEndNode( iff->in(0), iff->in(1), cl_prob, iff->as_If()->_fcnt )); + Node *lex = _igvn.register_new_node_with_optimizer(new (C, 2) CountedLoopEndNode( iff->in(0), test, cl_prob, iff->as_If()->_fcnt )); IfNode *le = lex->as_If(); uint dd = dom_depth(iff); set_idom(le, le->in(0), dd); // Update dominance for loop exit set_loop(le, loop); // Get the loop-exit control - Node *if_f = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue)); + Node *iffalse = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue)); // Need to swap loop-exit and loop-back control? - if( iftrue_op == Op_IfFalse ) { + if (iftrue_op == Op_IfFalse) { Node *ift2=_igvn.register_new_node_with_optimizer(new (C, 1) IfTrueNode (le)); Node *iff2=_igvn.register_new_node_with_optimizer(new (C, 1) IfFalseNode(le)); loop->_tail = back_control = ift2; set_loop(ift2, loop); - set_loop(iff2, get_loop(if_f)); + set_loop(iff2, get_loop(iffalse)); // Lazy update of 'get_ctrl' mechanism. - lazy_replace_proj( if_f , iff2 ); - lazy_replace_proj( iftrue, ift2 ); + lazy_replace_proj( iffalse, iff2 ); + lazy_replace_proj( iftrue, ift2 ); // Swap names - if_f = iff2; - iftrue = ift2; + iffalse = iff2; + iftrue = ift2; } else { - _igvn.hash_delete(if_f ); + _igvn.hash_delete(iffalse); _igvn.hash_delete(iftrue); - if_f ->set_req_X( 0, le, &_igvn ); - iftrue->set_req_X( 0, le, &_igvn ); + iffalse->set_req_X( 0, le, &_igvn ); + iftrue ->set_req_X( 0, le, &_igvn ); } - set_idom(iftrue, le, dd+1); - set_idom(if_f, le, dd+1); + set_idom(iftrue, le, dd+1); + set_idom(iffalse, le, dd+1); + assert(iff->outcnt() == 0, "should be dead now"); + lazy_replace( iff, le ); // fix 'get_ctrl' // Now setup a new CountedLoopNode to replace the existing LoopNode CountedLoopNode *l = new (C, 3) CountedLoopNode(init_control, back_control); + l->set_unswitch_count(x->as_Loop()->unswitch_count()); // Preserve // The following assert is approximately true, and defines the intention // of can_be_counted_loop. It fails, however, because phase->type // is not yet initialized for this loop and its parts. @@ -491,10 +537,14 @@ // Free up intermediate goo _igvn.remove_dead_node(hook); +#ifdef ASSERT + assert(l->is_valid_counted_loop(), "counted loop shape is messed up"); + assert(l == loop->_head && l->phi() == phi && l->loopexit() == lex, "" ); +#endif + C->print_method("After CountedLoop", 3); - // Return trip counter - return trip_count; + return true; } @@ -819,8 +869,10 @@ Node *outer = new (phase->C, 3) LoopNode( ctl, _head->in(outer_idx) ); outer = igvn.register_new_node_with_optimizer(outer, _head); phase->set_created_loop_node(); + + Node* pred = phase->clone_loop_predicates(ctl, outer); // Outermost loop falls into '_head' loop - _head->set_req(LoopNode::EntryControl, outer); + _head->set_req(LoopNode::EntryControl, pred); _head->del_req(outer_idx); // Split all the Phis up between '_head' loop and 'outer' loop. for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) { @@ -1019,8 +1071,6 @@ // Cache parts in locals for easy PhaseIterGVN &igvn = phase->_igvn; - phase->C->print_method("Before beautify loops", 3); - igvn.hash_delete(_head); // Yank from hash before hacking edges // Check for multiple fall-in paths. Peel off a landing pad if need be. @@ -1060,12 +1110,13 @@ // backedges into a private merge point and use the merge point as // the one true backedge. if( _head->req() > 3 ) { - // Merge the many backedges into a single backedge. + // Merge the many backedges into a single backedge but leave + // the hottest backedge as separate edge for the following peel. merge_many_backedges( phase ); result = true; } - // If I am a shared header (multiple backedges), peel off myself loop. + // If I have one hot backedge, peel off myself loop. // I better be the outermost loop. if( _head->req() > 3 ) { split_outer_loop( phase ); @@ -1256,17 +1307,98 @@ return true; } +//---------------------------replace_parallel_iv------------------------------- +// Replace parallel induction variable (parallel to trip counter) +void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) { + assert(loop->_head->is_CountedLoop(), ""); + CountedLoopNode *cl = loop->_head->as_CountedLoop(); + Node *incr = cl->incr(); + if (incr == NULL) + return; // Dead loop? + Node *init = cl->init_trip(); + Node *phi = cl->phi(); + // protect against stride not being a constant + if (!cl->stride_is_con()) + return; + int stride_con = cl->stride_con(); + + PhaseGVN *gvn = &_igvn; + + // Visit all children, looking for Phis + for (DUIterator i = cl->outs(); cl->has_out(i); i++) { + Node *out = cl->out(i); + // Look for other phis (secondary IVs). Skip dead ones + if (!out->is_Phi() || out == phi || !has_node(out)) + continue; + PhiNode* phi2 = out->as_Phi(); + Node *incr2 = phi2->in( LoopNode::LoopBackControl ); + // Look for induction variables of the form: X += constant + if (phi2->region() != loop->_head || + incr2->req() != 3 || + incr2->in(1) != phi2 || + incr2 == incr || + incr2->Opcode() != Op_AddI || + !incr2->in(2)->is_Con()) + continue; + + // Check for parallel induction variable (parallel to trip counter) + // via an affine function. In particular, count-down loops with + // count-up array indices are common. We only RCE references off + // the trip-counter, so we need to convert all these to trip-counter + // expressions. + Node *init2 = phi2->in( LoopNode::EntryControl ); + int stride_con2 = incr2->in(2)->get_int(); + + // The general case here gets a little tricky. We want to find the + // GCD of all possible parallel IV's and make a new IV using this + // GCD for the loop. Then all possible IVs are simple multiples of + // the GCD. In practice, this will cover very few extra loops. + // Instead we require 'stride_con2' to be a multiple of 'stride_con', + // where +/-1 is the common case, but other integer multiples are + // also easy to handle. + int ratio_con = stride_con2/stride_con; + + if ((ratio_con * stride_con) == stride_con2) { // Check for exact + // Convert to using the trip counter. The parallel induction + // variable differs from the trip counter by a loop-invariant + // amount, the difference between their respective initial values. + // It is scaled by the 'ratio_con'. + // Perform local Ideal transformation since in most cases ratio == 1. + Node* ratio = _igvn.intcon(ratio_con); + set_ctrl(ratio, C->root()); + Node* hook = new (C, 3) Node(3); + Node* ratio_init = gvn->transform(new (C, 3) MulINode(init, ratio)); + hook->init_req(0, ratio_init); + Node* diff = gvn->transform(new (C, 3) SubINode(init2, ratio_init)); + hook->init_req(1, diff); + Node* ratio_idx = gvn->transform(new (C, 3) MulINode(phi, ratio)); + hook->init_req(2, ratio_idx); + Node* add = gvn->transform(new (C, 3) AddINode(ratio_idx, diff)); + set_subtree_ctrl(add); + _igvn.replace_node( phi2, add ); + // Free up intermediate goo + _igvn.remove_dead_node(hook); + // Sometimes an induction variable is unused + if (add->outcnt() == 0) { + _igvn.remove_dead_node(add); + } + --i; // deleted this phi; rescan starting with next position + continue; + } + } +} + //------------------------------counted_loop----------------------------------- // Convert to counted loops where possible void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) { // For grins, set the inner-loop flag here - if( !_child ) { - if( _head->is_Loop() ) _head->as_Loop()->set_inner_loop(); + if (!_child) { + if (_head->is_Loop()) _head->as_Loop()->set_inner_loop(); } - if( _head->is_CountedLoop() || - phase->is_counted_loop( _head, this ) ) { + if (_head->is_CountedLoop() || + phase->is_counted_loop(_head, this)) { _has_sfpt = 1; // Indicate we do not need a safepoint here // Look for a safepoint to remove @@ -1275,79 +1407,9 @@ phase->is_deleteable_safept(n)) phase->lazy_replace(n,n->in(TypeFunc::Control)); - CountedLoopNode *cl = _head->as_CountedLoop(); - Node *incr = cl->incr(); - if( !incr ) return; // Dead loop? - Node *init = cl->init_trip(); - Node *phi = cl->phi(); - // protect against stride not being a constant - if( !cl->stride_is_con() ) return; - int stride_con = cl->stride_con(); - // Look for induction variables - - // Visit all children, looking for Phis - for (DUIterator i = cl->outs(); cl->has_out(i); i++) { - Node *out = cl->out(i); - // Look for other phis (secondary IVs). Skip dead ones - if (!out->is_Phi() || out == phi || !phase->has_node(out)) continue; - PhiNode* phi2 = out->as_Phi(); - Node *incr2 = phi2->in( LoopNode::LoopBackControl ); - // Look for induction variables of the form: X += constant - if( phi2->region() != _head || - incr2->req() != 3 || - incr2->in(1) != phi2 || - incr2 == incr || - incr2->Opcode() != Op_AddI || - !incr2->in(2)->is_Con() ) - continue; - - // Check for parallel induction variable (parallel to trip counter) - // via an affine function. In particular, count-down loops with - // count-up array indices are common. We only RCE references off - // the trip-counter, so we need to convert all these to trip-counter - // expressions. - Node *init2 = phi2->in( LoopNode::EntryControl ); - int stride_con2 = incr2->in(2)->get_int(); + phase->replace_parallel_iv(this); - // The general case here gets a little tricky. We want to find the - // GCD of all possible parallel IV's and make a new IV using this - // GCD for the loop. Then all possible IVs are simple multiples of - // the GCD. In practice, this will cover very few extra loops. - // Instead we require 'stride_con2' to be a multiple of 'stride_con', - // where +/-1 is the common case, but other integer multiples are - // also easy to handle. - int ratio_con = stride_con2/stride_con; - - if( ratio_con * stride_con == stride_con2 ) { // Check for exact - // Convert to using the trip counter. The parallel induction - // variable differs from the trip counter by a loop-invariant - // amount, the difference between their respective initial values. - // It is scaled by the 'ratio_con'. - Compile* C = phase->C; - Node* ratio = phase->_igvn.intcon(ratio_con); - phase->set_ctrl(ratio, C->root()); - Node* ratio_init = new (C, 3) MulINode(init, ratio); - phase->_igvn.register_new_node_with_optimizer(ratio_init, init); - phase->set_early_ctrl(ratio_init); - Node* diff = new (C, 3) SubINode(init2, ratio_init); - phase->_igvn.register_new_node_with_optimizer(diff, init2); - phase->set_early_ctrl(diff); - Node* ratio_idx = new (C, 3) MulINode(phi, ratio); - phase->_igvn.register_new_node_with_optimizer(ratio_idx, phi); - phase->set_ctrl(ratio_idx, cl); - Node* add = new (C, 3) AddINode(ratio_idx, diff); - phase->_igvn.register_new_node_with_optimizer(add); - phase->set_ctrl(add, cl); - phase->_igvn.replace_node( phi2, add ); - // Sometimes an induction variable is unused - if (add->outcnt() == 0) { - phase->_igvn.remove_dead_node(add); - } - --i; // deleted this phi; rescan starting with next position - continue; - } - } } else if (_parent != NULL && !_irreducible) { // Not a counted loop. // Look for a safepoint on the idom-path to remove, preserving the first one @@ -1366,24 +1428,46 @@ } // Recursively - if( _child ) _child->counted_loop( phase ); - if( _next ) _next ->counted_loop( phase ); + if (_child) _child->counted_loop( phase ); + if (_next) _next ->counted_loop( phase ); } #ifndef PRODUCT //------------------------------dump_head-------------------------------------- // Dump 1 liner for loop header info void IdealLoopTree::dump_head( ) const { - for( uint i=0; i<_nest; i++ ) + for (uint i=0; i<_nest; i++) tty->print(" "); tty->print("Loop: N%d/N%d ",_head->_idx,_tail->_idx); - if( _irreducible ) tty->print(" IRREDUCIBLE"); - if( _head->is_CountedLoop() ) { + if (_irreducible) tty->print(" IRREDUCIBLE"); + if (UseLoopPredicate) { + Node* entry = PhaseIdealLoop::find_predicate_insertion_point(_head->in(LoopNode::EntryControl), + Deoptimization::Reason_predicate); + if (entry != NULL) { + tty->print(" predicated"); + } + } + if (_head->is_CountedLoop()) { CountedLoopNode *cl = _head->as_CountedLoop(); tty->print(" counted"); - if( cl->is_pre_loop () ) tty->print(" pre" ); - if( cl->is_main_loop() ) tty->print(" main"); - if( cl->is_post_loop() ) tty->print(" post"); + + Node* init_n = cl->init_trip(); + if (init_n != NULL && init_n->is_Con()) + tty->print(" [%d,", cl->init_trip()->get_int()); + else + tty->print(" [int,"); + Node* limit_n = cl->limit(); + if (limit_n != NULL && limit_n->is_Con()) + tty->print("%d),", cl->limit()->get_int()); + else + tty->print("int),"); + int stride_con = cl->stride_con(); + if (stride_con > 0) tty->print("+"); + tty->print("%d", stride_con); + + if (cl->is_pre_loop ()) tty->print(" pre" ); + if (cl->is_main_loop()) tty->print(" main"); + if (cl->is_post_loop()) tty->print(" post"); } tty->cr(); } @@ -1392,8 +1476,8 @@ // Dump loops by loop tree void IdealLoopTree::dump( ) const { dump_head(); - if( _child ) _child->dump(); - if( _next ) _next ->dump(); + if (_child) _child->dump(); + if (_next) _next ->dump(); } #endif @@ -1439,19 +1523,19 @@ } // self (only loops that we can apply loop predication may use their predicates) - if (loop->_head->is_Loop() && - !loop->_irreducible && + if (loop->_head->is_Loop() && + !loop->_irreducible && !loop->tail()->is_top()) { - LoopNode *lpn = loop->_head->as_Loop(); + LoopNode* lpn = loop->_head->as_Loop(); Node* entry = lpn->in(LoopNode::EntryControl); - ProjNode *predicate_proj = find_predicate_insertion_point(entry); + Node* predicate_proj = find_predicate(entry); if (predicate_proj != NULL ) { // right pattern that can be used by loop predication - assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be"); + assert(entry->in(0)->in(1)->in(1)->Opcode() == Op_Opaque1, "must be"); useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one } } - if ( loop->_next ) { // sibling + if (loop->_next) { // sibling collect_potentially_useful_predicates(loop->_next, useful_predicates); } } @@ -1459,7 +1543,8 @@ //------------------------eliminate_useless_predicates----------------------------- // Eliminate all inserted predicates if they could not be used by loop predication. void PhaseIdealLoop::eliminate_useless_predicates() { - if (C->predicate_count() == 0) return; // no predicate left + if (C->predicate_count() == 0) + return; // no predicate left Unique_Node_List useful_predicates; // to store useful predicates if (C->has_loops()) { @@ -1479,10 +1564,11 @@ //----------------------------build_and_optimize------------------------------- // Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to // its corresponding LoopNode. If 'optimize' is true, do some loop cleanups. -void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool do_loop_pred) { +void PhaseIdealLoop::build_and_optimize(bool do_split_ifs) { ResourceMark rm; int old_progress = C->major_progress(); + uint orig_worklist_size = _igvn._worklist.size(); // Reset major-progress flag for the driver's heuristics C->clear_major_progress(); @@ -1510,6 +1596,13 @@ // Do not need a safepoint at the top level _ltree_root->_has_sfpt = 1; + // Initialize Dominators. + // Checked in clone_loop_predicate() during beautify_loops(). + _idom_size = 0; + _idom = NULL; + _dom_depth = NULL; + _dom_stk = NULL; + // Empty pre-order array allocate_preorders(); @@ -1546,6 +1639,7 @@ // Split shared headers and insert loop landing pads. // Do not bother doing this on the Root loop of course. if( !_verify_me && !_verify_only && _ltree_root->_child ) { + C->print_method("Before beautify loops", 3); if( _ltree_root->_child->beautify_loops( this ) ) { // Re-build loop tree! _ltree_root->_child = NULL; @@ -1630,12 +1724,13 @@ for (int i = 0; i < old_progress; i++) C->set_major_progress(); assert(C->unique() == unique, "verification mode made Nodes? ? ?"); - assert(_igvn._worklist.size() == 0, "shouldn't push anything"); + assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything"); return; } - // some parser-inserted loop predicates could never be used by loop - // predication. Eliminate them before loop optimization + // Some parser-inserted loop predicates could never be used by loop + // predication or they were moved away from loop during some optimizations. + // For example, peeling. Eliminate them before next loop optimizations. if (UseLoopPredicate) { eliminate_useless_predicates(); } @@ -1647,12 +1742,15 @@ #ifndef PRODUCT C->verify_graph_edges(); - if( _verify_me ) { // Nested verify pass? + if (_verify_me) { // Nested verify pass? // Check to see if the verify mode is broken assert(C->unique() == unique, "non-optimize mode made Nodes? ? ?"); return; } - if( VerifyLoopOptimizations ) verify(); + if(VerifyLoopOptimizations) verify(); + if(TraceLoopOpts && C->has_loops()) { + _ltree_root->dump(); + } #endif if (ReassociateInvariants) { @@ -1683,7 +1781,7 @@ } // Perform loop predication before iteration splitting - if (do_loop_pred && C->has_loops() && !C->major_progress()) { + if (C->has_loops() && !C->major_progress() && (C->predicate_count() > 0)) { _ltree_root->_child->loop_predication(this); } @@ -1726,8 +1824,20 @@ C->set_major_progress(); } - // Convert scalar to superword operations + // Keep loop predicates and perform optimizations with them + // until no more loop optimizations could be done. + // After that switch predicates off and do more loop optimizations. + if (!C->major_progress() && (C->predicate_count() > 0)) { + C->cleanup_loop_predicates(_igvn); +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print_cr("PredicatesOff"); + } +#endif + C->set_major_progress(); + } + // Convert scalar to superword operations at the end of all loop opts. if (UseSuperWord && C->has_loops() && !C->major_progress()) { // SuperWord transform SuperWord sw(this);
--- a/src/share/vm/opto/loopnode.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/loopnode.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -57,7 +57,12 @@ protected: short _loop_flags; // Names for flag bitfields - enum { pre_post_main=0, inner_loop=8, partial_peel_loop=16, partial_peel_failed=32 }; + enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3, + MainHasNoPreLoop=4, + HasExactTripCount=8, + InnerLoop=16, + PartialPeelLoop=32, + PartialPeelFailed=64 }; char _unswitch_count; enum { _unswitch_max=3 }; @@ -65,13 +70,13 @@ // Names for edge indices enum { Self=0, EntryControl, LoopBackControl }; - int is_inner_loop() const { return _loop_flags & inner_loop; } - void set_inner_loop() { _loop_flags |= inner_loop; } + int is_inner_loop() const { return _loop_flags & InnerLoop; } + void set_inner_loop() { _loop_flags |= InnerLoop; } - int is_partial_peel_loop() const { return _loop_flags & partial_peel_loop; } - void set_partial_peel_loop() { _loop_flags |= partial_peel_loop; } - int partial_peel_has_failed() const { return _loop_flags & partial_peel_failed; } - void mark_partial_peel_failed() { _loop_flags |= partial_peel_failed; } + int is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; } + void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; } + int partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; } + void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; } int unswitch_max() { return _unswitch_max; } int unswitch_count() { return _unswitch_count; } @@ -93,6 +98,7 @@ in(1) != NULL && phase->type(in(1)) != Type::TOP && in(2) != NULL && phase->type(in(2)) != Type::TOP; } + bool is_valid_counted_loop() const; #ifndef PRODUCT virtual void dump_spec(outputStream *st) const; #endif @@ -101,9 +107,8 @@ //------------------------------Counted Loops---------------------------------- // Counted loops are all trip-counted loops, with exactly 1 trip-counter exit // path (and maybe some other exit paths). The trip-counter exit is always -// last in the loop. The trip-counter does not have to stride by a constant, -// but it does have to stride by a loop-invariant amount; the exit value is -// also loop invariant. +// last in the loop. The trip-counter have to stride by a constant; +// the exit value is also loop invariant. // CountedLoopNodes and CountedLoopEndNodes come in matched pairs. The // CountedLoopNode has the incoming loop control and the loop-back-control @@ -112,7 +117,7 @@ // CountedLoopNode if there is control flow in the loop), the post-increment // trip-counter value, and the limit. The trip-counter value is always of // the form (Op old-trip-counter stride). The old-trip-counter is produced -// by a Phi connected to the CountedLoopNode. The stride is loop invariant. +// by a Phi connected to the CountedLoopNode. The stride is constant. // The Op is any commutable opcode, including Add, Mul, Xor. The // CountedLoopEndNode also takes in the loop-invariant limit value. @@ -137,8 +142,8 @@ // the Main CountedLoop. Used to assert that we understand the graph shape. node_idx_t _main_idx; - // Known trip count calculated by policy_maximally_unroll - int _trip_count; + // Known trip count calculated by compute_exact_trip_count() + uint _trip_count; // Expected trip count from profile data float _profile_trip_cnt; @@ -152,7 +157,7 @@ public: CountedLoopNode( Node *entry, Node *backedge ) - : LoopNode(entry, backedge), _trip_count(max_jint), + : LoopNode(entry, backedge), _main_idx(0), _trip_count(max_juint), _profile_trip_cnt(COUNT_UNKNOWN), _unrolled_count_log2(0), _node_count_before_unroll(0) { init_class_id(Class_CountedLoop); @@ -194,13 +199,12 @@ // A 'main' loop that is ONLY unrolled or peeled, never RCE'd or // Aligned, may be missing it's pre-loop. - enum { Normal=0, Pre=1, Main=2, Post=3, PrePostFlagsMask=3, Main_Has_No_Pre_Loop=4 }; - int is_normal_loop() const { return (_loop_flags&PrePostFlagsMask) == Normal; } - int is_pre_loop () const { return (_loop_flags&PrePostFlagsMask) == Pre; } - int is_main_loop () const { return (_loop_flags&PrePostFlagsMask) == Main; } - int is_post_loop () const { return (_loop_flags&PrePostFlagsMask) == Post; } - int is_main_no_pre_loop() const { return _loop_flags & Main_Has_No_Pre_Loop; } - void set_main_no_pre_loop() { _loop_flags |= Main_Has_No_Pre_Loop; } + int is_normal_loop() const { return (_loop_flags&PreMainPostFlagsMask) == Normal; } + int is_pre_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Pre; } + int is_main_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Main; } + int is_post_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Post; } + int is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; } + void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; } int main_idx() const { return _main_idx; } @@ -208,10 +212,19 @@ void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; } void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; } void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Post; _main_idx = main->_idx; } - void set_normal_loop( ) { _loop_flags &= ~PrePostFlagsMask; } + void set_normal_loop( ) { _loop_flags &= ~PreMainPostFlagsMask; } + + void set_trip_count(uint tc) { _trip_count = tc; } + uint trip_count() { return _trip_count; } - void set_trip_count(int tc) { _trip_count = tc; } - int trip_count() { return _trip_count; } + bool has_exact_trip_count() const { return (_loop_flags & HasExactTripCount) != 0; } + void set_exact_trip_count(uint tc) { + _trip_count = tc; + _loop_flags |= HasExactTripCount; + } + void set_nonexact_trip_count() { + _loop_flags &= ~HasExactTripCount; + } void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; } float profile_trip_cnt() { return _profile_trip_cnt; } @@ -384,6 +397,9 @@ // Micro-benchmark spamming. Remove empty loops. bool policy_do_remove_empty_loop( PhaseIdealLoop *phase ); + // Convert one iteration loop into normal code. + bool policy_do_one_iteration_loop( PhaseIdealLoop *phase ); + // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can // make some loop-invariant test (usually a null-check) happen before the // loop. @@ -412,6 +428,9 @@ // Return TRUE if "iff" is a range check. bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const; + // Compute loop exact trip count if possible + void compute_exact_trip_count( PhaseIdealLoop *phase ); + // Compute loop trip count from profile data void compute_profile_trip_cnt( PhaseIdealLoop *phase ); @@ -696,6 +715,9 @@ // Is safept not required by an outer loop? bool is_deleteable_safept(Node* sfpt); + // Replace parallel induction variable (parallel to trip counter) + void replace_parallel_iv(IdealLoopTree *loop); + // Perform verification that the graph is valid. PhaseIdealLoop( PhaseIterGVN &igvn) : PhaseTransform(Ideal_Loop), @@ -703,11 +725,11 @@ _dom_lca_tags(arena()), // Thread::resource_area _verify_me(NULL), _verify_only(true) { - build_and_optimize(false, false); + build_and_optimize(false); } // build the loop tree and perform any requested optimizations - void build_and_optimize(bool do_split_if, bool do_loop_pred); + void build_and_optimize(bool do_split_if); public: // Dominators for the sea of nodes @@ -718,13 +740,13 @@ Node *dom_lca_internal( Node *n1, Node *n2 ) const; // Compute the Ideal Node to Loop mapping - PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool do_loop_pred) : + PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs) : PhaseTransform(Ideal_Loop), _igvn(igvn), _dom_lca_tags(arena()), // Thread::resource_area _verify_me(NULL), _verify_only(false) { - build_and_optimize(do_split_ifs, do_loop_pred); + build_and_optimize(do_split_ifs); } // Verify that verify_me made the same decisions as a fresh run. @@ -734,7 +756,7 @@ _dom_lca_tags(arena()), // Thread::resource_area _verify_me(verify_me), _verify_only(false) { - build_and_optimize(false, false); + build_and_optimize(false); } // Build and verify the loop tree without modifying the graph. This @@ -751,7 +773,7 @@ // Per-Node transform virtual Node *transform( Node *a_node ) { return 0; } - Node *is_counted_loop( Node *x, IdealLoopTree *loop ); + bool is_counted_loop( Node *x, IdealLoopTree *loop ); // Return a post-walked LoopNode IdealLoopTree *get_loop( Node *n ) const { @@ -815,16 +837,41 @@ bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0); // Return true if proj is for "proj->[region->..]call_uct" - bool is_uncommon_trap_proj(ProjNode* proj, bool must_reason_predicate = false); + // Return true if proj is for "proj->[region->..]call_uct" + static bool is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason); // Return true for "if(test)-> proj -> ... // | // V // other_proj->[region->..]call_uct" - bool is_uncommon_trap_if_pattern(ProjNode* proj, bool must_reason_predicate = false); + static bool is_uncommon_trap_if_pattern(ProjNode* proj, Deoptimization::DeoptReason reason); // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted - ProjNode* create_new_if_for_predicate(ProjNode* cont_proj); + ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, + Deoptimization::DeoptReason reason); + void register_control(Node* n, IdealLoopTree *loop, Node* pred); + + // Clone loop predicates to cloned loops (peeled, unswitched) + static ProjNode* clone_predicate(ProjNode* predicate_proj, Node* new_entry, + Deoptimization::DeoptReason reason, + PhaseIdealLoop* loop_phase, + PhaseIterGVN* igvn); + static ProjNode* move_predicate(ProjNode* predicate_proj, Node* new_entry, + Deoptimization::DeoptReason reason, + PhaseIdealLoop* loop_phase, + PhaseIterGVN* igvn); + static Node* clone_loop_predicates(Node* old_entry, Node* new_entry, + bool move_predicates, + PhaseIdealLoop* loop_phase, + PhaseIterGVN* igvn); + Node* clone_loop_predicates(Node* old_entry, Node* new_entry); + Node* move_loop_predicates(Node* old_entry, Node* new_entry); + + void eliminate_loop_predicates(Node* entry); + static Node* skip_loop_predicates(Node* entry); + // Find a good location to insert a predicate - ProjNode* find_predicate_insertion_point(Node* start_c); + static ProjNode* find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason); + // Find a predicate + static Node* find_predicate(Node* entry); // Construct a range check for a predicate if BoolNode* rc_predicate(Node* ctrl, int scale, Node* offset, @@ -936,7 +983,7 @@ Node *has_local_phi_input( Node *n ); // Mark an IfNode as being dominated by a prior test, // without actually altering the CFG (and hence IDOM info). - void dominated_by( Node *prevdom, Node *iff ); + void dominated_by( Node *prevdom, Node *iff, bool flip = false ); // Split Node 'n' through merge point Node *split_thru_region( Node *n, Node *region );
--- a/src/share/vm/opto/loopopts.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/loopopts.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,13 +42,13 @@ return NULL; } int wins = 0; - assert( !n->is_CFG(), "" ); - assert( region->is_Region(), "" ); + assert(!n->is_CFG(), ""); + assert(region->is_Region(), ""); const Type* type = n->bottom_type(); const TypeOopPtr *t_oop = _igvn.type(n)->isa_oopptr(); Node *phi; - if( t_oop != NULL && t_oop->is_known_instance_field() ) { + if (t_oop != NULL && t_oop->is_known_instance_field()) { int iid = t_oop->instance_id(); int index = C->get_alias_index(t_oop); int offset = t_oop->offset(); @@ -57,20 +57,20 @@ phi = PhiNode::make_blank(region, n); } uint old_unique = C->unique(); - for( uint i = 1; i < region->req(); i++ ) { + for (uint i = 1; i < region->req(); i++) { Node *x; Node* the_clone = NULL; - if( region->in(i) == C->top() ) { + if (region->in(i) == C->top()) { x = C->top(); // Dead path? Use a dead data op } else { x = n->clone(); // Else clone up the data op the_clone = x; // Remember for possible deletion. // Alter data node to use pre-phi inputs - if( n->in(0) == region ) + if (n->in(0) == region) x->set_req( 0, region->in(i) ); - for( uint j = 1; j < n->req(); j++ ) { + for (uint j = 1; j < n->req(); j++) { Node *in = n->in(j); - if( in->is_Phi() && in->in(0) == region ) + if (in->is_Phi() && in->in(0) == region) x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone } } @@ -85,7 +85,7 @@ // happen if the singleton occurs on loop entry, as the elimination of // the PhiNode may cause the resulting node to migrate back to a previous // loop iteration. - if( singleton && t == Type::TOP ) { + if (singleton && t == Type::TOP) { // Is_Loop() == false does not confirm the absence of a loop (e.g., an // irreducible loop may not be indicated by an affirmative is_Loop()); // therefore, the only top we can split thru a phi is on a backedge of @@ -93,7 +93,7 @@ singleton &= region->is_Loop() && (i != LoopNode::EntryControl); } - if( singleton ) { + if (singleton) { wins++; x = ((PhaseGVN&)_igvn).makecon(t); } else { @@ -108,12 +108,12 @@ // igvn->type(x) is set to x->Value() already. x->raise_bottom_type(t); Node *y = x->Identity(&_igvn); - if( y != x ) { + if (y != x) { wins++; x = y; } else { y = _igvn.hash_find(x); - if( y ) { + if (y) { wins++; x = y; } else { @@ -129,7 +129,7 @@ phi->set_req( i, x ); } // Too few wins? - if( wins <= policy ) { + if (wins <= policy) { _igvn.remove_dead_node(phi); return NULL; } @@ -137,7 +137,7 @@ // Record Phi register_new_node( phi, region ); - for( uint i2 = 1; i2 < phi->req(); i2++ ) { + for (uint i2 = 1; i2 < phi->req(); i2++) { Node *x = phi->in(i2); // If we commoned up the cloned 'x' with another existing Node, // the existing Node picks up a new use. We need to make the @@ -145,24 +145,44 @@ Node *old_ctrl; IdealLoopTree *old_loop; + if (x->is_Con()) { + // Constant's control is always root. + set_ctrl(x, C->root()); + continue; + } // The occasional new node - if( x->_idx >= old_unique ) { // Found a new, unplaced node? - old_ctrl = x->is_Con() ? C->root() : NULL; - old_loop = NULL; // Not in any prior loop + if (x->_idx >= old_unique) { // Found a new, unplaced node? + old_ctrl = NULL; + old_loop = NULL; // Not in any prior loop } else { - old_ctrl = x->is_Con() ? C->root() : get_ctrl(x); + old_ctrl = get_ctrl(x); old_loop = get_loop(old_ctrl); // Get prior loop } // New late point must dominate new use - Node *new_ctrl = dom_lca( old_ctrl, region->in(i2) ); + Node *new_ctrl = dom_lca(old_ctrl, region->in(i2)); + if (new_ctrl == old_ctrl) // Nothing is changed + continue; + + IdealLoopTree *new_loop = get_loop(new_ctrl); + + // Don't move x into a loop if its uses are + // outside of loop. Otherwise x will be cloned + // for each use outside of this loop. + IdealLoopTree *use_loop = get_loop(region); + if (!new_loop->is_member(use_loop) && + (old_loop == NULL || !new_loop->is_member(old_loop))) { + // Take early control, later control will be recalculated + // during next iteration of loop optimizations. + new_ctrl = get_early_ctrl(x); + new_loop = get_loop(new_ctrl); + } // Set new location set_ctrl(x, new_ctrl); - IdealLoopTree *new_loop = get_loop( new_ctrl ); // If changing loop bodies, see if we need to collect into new body - if( old_loop != new_loop ) { - if( old_loop && !old_loop->_child ) + if (old_loop != new_loop) { + if (old_loop && !old_loop->_child) old_loop->_body.yank(x); - if( !new_loop->_child ) + if (!new_loop->_child) new_loop->_body.push(x); // Collect body info } } @@ -174,9 +194,9 @@ // Replace the dominated test with an obvious true or false. Place it on the // IGVN worklist for later cleanup. Move control-dependent data Nodes on the // live path up to the dominating control. -void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff ) { +void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip ) { #ifndef PRODUCT - if( VerifyLoopOptimizations && PrintOpto ) tty->print_cr("dominating test"); + if (VerifyLoopOptimizations && PrintOpto) tty->print_cr("dominating test"); #endif @@ -185,6 +205,12 @@ assert( iff->Opcode() == Op_If || iff->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added"); int pop = prevdom->Opcode(); assert( pop == Op_IfFalse || pop == Op_IfTrue, "" ); + if (flip) { + if (pop == Op_IfTrue) + pop = Op_IfFalse; + else + pop = Op_IfTrue; + } // 'con' is set to true or false to kill the dominated test. Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO); set_ctrl(con, C->root()); // Constant gets a new use @@ -197,7 +223,7 @@ // I can assume this path reaches an infinite loop. In this case it's not // important to optimize the data Nodes - either the whole compilation will // be tossed or this path (and all data Nodes) will go dead. - if( iff->outcnt() != 2 ) return; + if (iff->outcnt() != 2) return; // Make control-dependent data Nodes on the live path (path that will remain // once the dominated IF is removed) become control-dependent on the @@ -207,16 +233,16 @@ for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { Node* cd = dp->fast_out(i); // Control-dependent node - if( cd->depends_only_on_test() ) { - assert( cd->in(0) == dp, "" ); - _igvn.hash_delete( cd ); + if (cd->depends_only_on_test()) { + assert(cd->in(0) == dp, ""); + _igvn.hash_delete(cd); cd->set_req(0, prevdom); - set_early_ctrl( cd ); + set_early_ctrl(cd); _igvn._worklist.push(cd); IdealLoopTree *new_loop = get_loop(get_ctrl(cd)); - if( old_loop != new_loop ) { - if( !old_loop->_child ) old_loop->_body.yank(cd); - if( !new_loop->_child ) new_loop->_body.push(cd); + if (old_loop != new_loop) { + if (!old_loop->_child) old_loop->_body.yank(cd); + if (!new_loop->_child) new_loop->_body.push(cd); } --i; --imax; @@ -2113,9 +2139,12 @@ // // orig // -// stmt1 -// | -// v +// stmt1 +// | +// v +// loop predicate +// | +// v // loop<----+ // | | // stmt2 | @@ -2146,6 +2175,9 @@ // after clone loop // // stmt1 +// | +// v +// loop predicate // / \ // clone / \ orig // / \ @@ -2184,12 +2216,15 @@ // after partial peel // // stmt1 +// | +// v +// loop predicate // / // clone / orig // / TOP // / \ // v v -// TOP->region region----+ +// TOP->loop loop----+ // | | | // stmt2 stmt2 | // | | | @@ -2227,13 +2262,17 @@ // stmt1 // | // v +// stmt2 clone +// | +// v // ........> ifA clone // : / | // dom / | // : v v // : false true // : | | -// : | stmt2 clone +// : | v +// : | loop predicate // : | | // : | v // : | newloop<-----+ @@ -2263,6 +2302,7 @@ // bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { + assert(!loop->_head->is_CountedLoop(), "Non-counted loop only"); if (!loop->_head->is_Loop()) { return false; } @@ -2290,6 +2330,7 @@ } } + Node* entry = head->in(LoopNode::EntryControl); int dd = dom_depth(head); // Step 1: find cut point @@ -2338,6 +2379,11 @@ } #if !defined(PRODUCT) + if (TraceLoopOpts) { + tty->print("PartialPeel "); + loop->dump_head(); + } + if (TracePartialPeeling) { tty->print_cr("before partial peel one iteration"); Node_List wl; @@ -2481,6 +2527,7 @@ // Create new loop head for new phis and to hang // the nodes being moved (sinked) from the peel region. LoopNode* new_head = new (C, 3) LoopNode(last_peel, last_peel); + new_head->set_unswitch_count(head->unswitch_count()); // Preserve _igvn.register_new_node_with_optimizer(new_head); assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled"); first_not_peeled->set_req(0, new_head); @@ -2580,6 +2627,8 @@ // Backedge of the surviving new_head (the clone) is original last_peel _igvn.hash_delete(new_head_clone); + Node* new_entry = move_loop_predicates(entry, new_head_clone->in(LoopNode::EntryControl)); + new_head_clone->set_req(LoopNode::EntryControl, new_entry); new_head_clone->set_req(LoopNode::LoopBackControl, last_peel); _igvn._worklist.push(new_head_clone); @@ -2651,24 +2700,23 @@ // prevent loop-fallout uses of the pre-incremented trip counter (which are // then alive with the post-incremented trip counter forcing an extra // register move) -void PhaseIdealLoop::reorg_offsets( IdealLoopTree *loop ) { +void PhaseIdealLoop::reorg_offsets(IdealLoopTree *loop) { + // Perform it only for canonical counted loops. + // Loop's shape could be messed up by iteration_split_impl. + if (!loop->_head->is_CountedLoop()) + return; + if (!loop->_head->as_Loop()->is_valid_counted_loop()) + return; CountedLoopNode *cl = loop->_head->as_CountedLoop(); CountedLoopEndNode *cle = cl->loopexit(); - if( !cle ) return; // The occasional dead loop - // Find loop exit control Node *exit = cle->proj_out(false); - assert( exit->Opcode() == Op_IfFalse, "" ); + Node *phi = cl->phi(); // Check for the special case of folks using the pre-incremented // trip-counter on the fall-out path (forces the pre-incremented // and post-incremented trip counter to be live at the same time). // Fix this by adjusting to use the post-increment trip counter. - Node *phi = cl->phi(); - if( !phi ) return; // Dead infinite loop - - // Shape messed up, probably by iteration_split_impl - if (phi->in(LoopNode::LoopBackControl) != cl->incr()) return; bool progress = true; while (progress) { @@ -2677,21 +2725,19 @@ Node* use = phi->fast_out(i); // User of trip-counter if (!has_ctrl(use)) continue; Node *u_ctrl = get_ctrl(use); - if( use->is_Phi() ) { + if (use->is_Phi()) { u_ctrl = NULL; - for( uint j = 1; j < use->req(); j++ ) - if( use->in(j) == phi ) - u_ctrl = dom_lca( u_ctrl, use->in(0)->in(j) ); + for (uint j = 1; j < use->req(); j++) + if (use->in(j) == phi) + u_ctrl = dom_lca(u_ctrl, use->in(0)->in(j)); } IdealLoopTree *u_loop = get_loop(u_ctrl); // Look for loop-invariant use - if( u_loop == loop ) continue; - if( loop->is_member( u_loop ) ) continue; + if (u_loop == loop) continue; + if (loop->is_member(u_loop)) continue; // Check that use is live out the bottom. Assuming the trip-counter // update is right at the bottom, uses of of the loop middle are ok. - if( dom_lca( exit, u_ctrl ) != exit ) continue; - // protect against stride not being a constant - if( !cle->stride_is_con() ) continue; + if (dom_lca(exit, u_ctrl) != exit) continue; // Hit! Refactor use to use the post-incremented tripcounter. // Compute a post-increment tripcounter. Node *opaq = new (C, 2) Opaque2Node( C, cle->incr() ); @@ -2702,9 +2748,10 @@ register_new_node( post, u_ctrl ); _igvn.hash_delete(use); _igvn._worklist.push(use); - for( uint j = 1; j < use->req(); j++ ) - if( use->in(j) == phi ) + for (uint j = 1; j < use->req(); j++) { + if (use->in(j) == phi) use->set_req(j, post); + } // Since DU info changed, rerun loop progress = true; break;
--- a/src/share/vm/opto/matcher.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/matcher.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,6 +49,9 @@ #ifdef TARGET_ARCH_MODEL_zero # include "adfiles/ad_zero.hpp" #endif +#ifdef TARGET_ARCH_MODEL_arm +# include "adfiles/ad_arm.hpp" +#endif OptoReg::Name OptoReg::c_frame_pointer;
--- a/src/share/vm/opto/matcher.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/matcher.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -427,6 +427,11 @@ // Do ints take an entire long register or just half? static const bool int_in_long; + // Do the processor's shift instructions only use the low 5/6 bits + // of the count for 32/64 bit ints? If not we need to do the masking + // ourselves. + static const bool need_masked_shift_count; + // This routine is run whenever a graph fails to match. // If it returns, the compiler should bailout to interpreter without error. // In non-product mode, SoftMatchFailure is false to detect non-canonical
--- a/src/share/vm/opto/memnode.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/memnode.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1559,21 +1559,24 @@ phase->C->has_unsafe_access(), "Field accesses must be precise" ); // For oop loads, we expect the _type to be precise - if (OptimizeStringConcat && klass == phase->C->env()->String_klass() && + if (klass == phase->C->env()->String_klass() && adr->is_AddP() && off != Type::OffsetBot) { - // For constant Strings treat the fields as compile time constants. + // For constant Strings treat the final fields as compile time constants. Node* base = adr->in(AddPNode::Base); const TypeOopPtr* t = phase->type(base)->isa_oopptr(); if (t != NULL && t->singleton()) { - ciObject* string = t->const_oop(); - ciConstant constant = string->as_instance()->field_value_by_offset(off); - if (constant.basic_type() == T_INT) { - return TypeInt::make(constant.as_int()); - } else if (constant.basic_type() == T_ARRAY) { - if (adr->bottom_type()->is_ptr_to_narrowoop()) { - return TypeNarrowOop::make_from_constant(constant.as_object()); - } else { - return TypeOopPtr::make_from_constant(constant.as_object()); + ciField* field = phase->C->env()->String_klass()->get_field_by_offset(off, false); + if (field != NULL && field->is_final()) { + ciObject* string = t->const_oop(); + ciConstant constant = string->as_instance()->field_value(field); + if (constant.basic_type() == T_INT) { + return TypeInt::make(constant.as_int()); + } else if (constant.basic_type() == T_ARRAY) { + if (adr->bottom_type()->is_ptr_to_narrowoop()) { + return TypeNarrowOop::make_from_constant(constant.as_object(), true); + } else { + return TypeOopPtr::make_from_constant(constant.as_object(), true); + } } } } @@ -2614,54 +2617,28 @@ } //============================================================================= -// Do we match on this edge? No memory edges -uint StrCompNode::match_edge(uint idx) const { - return idx == 2 || idx == 3; // StrComp (Binary str1 cnt1) (Binary str2 cnt2) -} - -//------------------------------Ideal------------------------------------------ -// Return a node which is more "ideal" than the current node. Strip out -// control copies -Node *StrCompNode::Ideal(PhaseGVN *phase, bool can_reshape){ - return remove_dead_region(phase, can_reshape) ? this : NULL; -} - -//============================================================================= -// Do we match on this edge? No memory edges -uint StrEqualsNode::match_edge(uint idx) const { - return idx == 2 || idx == 3; // StrEquals (Binary str1 str2) cnt +// Do not match memory edge. +uint StrIntrinsicNode::match_edge(uint idx) const { + return idx == 2 || idx == 3; } //------------------------------Ideal------------------------------------------ // Return a node which is more "ideal" than the current node. Strip out // control copies -Node *StrEqualsNode::Ideal(PhaseGVN *phase, bool can_reshape){ - return remove_dead_region(phase, can_reshape) ? this : NULL; -} - -//============================================================================= -// Do we match on this edge? No memory edges -uint StrIndexOfNode::match_edge(uint idx) const { - return idx == 2 || idx == 3; // StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2) -} - -//------------------------------Ideal------------------------------------------ -// Return a node which is more "ideal" than the current node. Strip out -// control copies -Node *StrIndexOfNode::Ideal(PhaseGVN *phase, bool can_reshape){ - return remove_dead_region(phase, can_reshape) ? this : NULL; -} - -//============================================================================= -// Do we match on this edge? No memory edges -uint AryEqNode::match_edge(uint idx) const { - return idx == 2 || idx == 3; // StrEquals ary1 ary2 -} -//------------------------------Ideal------------------------------------------ -// Return a node which is more "ideal" than the current node. Strip out -// control copies -Node *AryEqNode::Ideal(PhaseGVN *phase, bool can_reshape){ - return remove_dead_region(phase, can_reshape) ? this : NULL; +Node *StrIntrinsicNode::Ideal(PhaseGVN *phase, bool can_reshape) { + if (remove_dead_region(phase, can_reshape)) return this; + + if (can_reshape) { + Node* mem = phase->transform(in(MemNode::Memory)); + // If transformed to a MergeMem, get the desired slice + uint alias_idx = phase->C->get_alias_index(adr_type()); + mem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(alias_idx) : mem; + if (mem != in(MemNode::Memory)) { + set_req(MemNode::Memory, mem); + return this; + } + } + return NULL; } //============================================================================= @@ -4077,6 +4054,7 @@ n = base_memory(); assert(Node::in_dump() || n == NULL || n->bottom_type() == Type::TOP + || n->adr_type() == NULL // address is TOP || n->adr_type() == TypePtr::BOTTOM || n->adr_type() == TypeRawPtr::BOTTOM || Compile::current()->AliasLevel() == 0,
--- a/src/share/vm/opto/memnode.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/memnode.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -776,67 +776,69 @@ static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); }; -//------------------------------StrComp------------------------------------- -class StrCompNode: public Node { +//------------------------------StrIntrinsic------------------------------- +// Base class for Ideal nodes used in String instrinsic code. +class StrIntrinsicNode: public Node { public: - StrCompNode(Node* control, Node* char_array_mem, - Node* s1, Node* c1, - Node* s2, Node* c2): Node(control, char_array_mem, - s1, c1, - s2, c2) {}; - virtual int Opcode() const; + StrIntrinsicNode(Node* control, Node* char_array_mem, + Node* s1, Node* c1, Node* s2, Node* c2): + Node(control, char_array_mem, s1, c1, s2, c2) { + } + + StrIntrinsicNode(Node* control, Node* char_array_mem, + Node* s1, Node* s2, Node* c): + Node(control, char_array_mem, s1, s2, c) { + } + + StrIntrinsicNode(Node* control, Node* char_array_mem, + Node* s1, Node* s2): + Node(control, char_array_mem, s1, s2) { + } + virtual bool depends_only_on_test() const { return false; } - virtual const Type* bottom_type() const { return TypeInt::INT; } virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; } virtual uint match_edge(uint idx) const; virtual uint ideal_reg() const { return Op_RegI; } virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); }; +//------------------------------StrComp------------------------------------- +class StrCompNode: public StrIntrinsicNode { +public: + StrCompNode(Node* control, Node* char_array_mem, + Node* s1, Node* c1, Node* s2, Node* c2): + StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {}; + virtual int Opcode() const; + virtual const Type* bottom_type() const { return TypeInt::INT; } +}; + //------------------------------StrEquals------------------------------------- -class StrEqualsNode: public Node { +class StrEqualsNode: public StrIntrinsicNode { public: StrEqualsNode(Node* control, Node* char_array_mem, - Node* s1, Node* s2, Node* c): Node(control, char_array_mem, - s1, s2, c) {}; + Node* s1, Node* s2, Node* c): + StrIntrinsicNode(control, char_array_mem, s1, s2, c) {}; virtual int Opcode() const; - virtual bool depends_only_on_test() const { return false; } virtual const Type* bottom_type() const { return TypeInt::BOOL; } - virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; } - virtual uint match_edge(uint idx) const; - virtual uint ideal_reg() const { return Op_RegI; } - virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); }; //------------------------------StrIndexOf------------------------------------- -class StrIndexOfNode: public Node { +class StrIndexOfNode: public StrIntrinsicNode { public: StrIndexOfNode(Node* control, Node* char_array_mem, - Node* s1, Node* c1, - Node* s2, Node* c2): Node(control, char_array_mem, - s1, c1, - s2, c2) {}; + Node* s1, Node* c1, Node* s2, Node* c2): + StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {}; virtual int Opcode() const; - virtual bool depends_only_on_test() const { return false; } virtual const Type* bottom_type() const { return TypeInt::INT; } - virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; } - virtual uint match_edge(uint idx) const; - virtual uint ideal_reg() const { return Op_RegI; } - virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); }; //------------------------------AryEq--------------------------------------- -class AryEqNode: public Node { +class AryEqNode: public StrIntrinsicNode { public: - AryEqNode(Node* control, Node* char_array_mem, - Node* s1, Node* s2): Node(control, char_array_mem, s1, s2) {}; + AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2): + StrIntrinsicNode(control, char_array_mem, s1, s2) {}; virtual int Opcode() const; - virtual bool depends_only_on_test() const { return false; } virtual const Type* bottom_type() const { return TypeInt::BOOL; } - virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; } - virtual uint match_edge(uint idx) const; - virtual uint ideal_reg() const { return Op_RegI; } - virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); }; //------------------------------MemBar-----------------------------------------
--- a/src/share/vm/opto/node.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/node.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1373,12 +1373,12 @@ //------------------------------find------------------------------------------ // Find a neighbor of this Node with the given _idx // If idx is negative, find its absolute value, following both _in and _out. -static void find_recur( Node* &result, Node *n, int idx, bool only_ctrl, - VectorSet &old_space, VectorSet &new_space ) { +static void find_recur(Compile* C, Node* &result, Node *n, int idx, bool only_ctrl, + VectorSet* old_space, VectorSet* new_space ) { int node_idx = (idx >= 0) ? idx : -idx; if (NotANode(n)) return; // Gracefully handle NULL, -1, 0xabababab, etc. - // Contained in new_space or old_space? - VectorSet *v = Compile::current()->node_arena()->contains(n) ? &new_space : &old_space; + // Contained in new_space or old_space? Check old_arena first since it's mostly empty. + VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space; if( v->test(n->_idx) ) return; if( (int)n->_idx == node_idx debug_only(|| n->debug_idx() == node_idx) ) { @@ -1390,19 +1390,23 @@ v->set(n->_idx); for( uint i=0; i<n->len(); i++ ) { if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue; - find_recur( result, n->in(i), idx, only_ctrl, old_space, new_space ); + find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space ); } // Search along forward edges also: if (idx < 0 && !only_ctrl) { for( uint j=0; j<n->outcnt(); j++ ) { - find_recur( result, n->raw_out(j), idx, only_ctrl, old_space, new_space ); + find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space ); } } #ifdef ASSERT - // Search along debug_orig edges last: - for (Node* orig = n->debug_orig(); orig != NULL && n != orig; orig = orig->debug_orig()) { - if (NotANode(orig)) break; - find_recur( result, orig, idx, only_ctrl, old_space, new_space ); + // Search along debug_orig edges last, checking for cycles + Node* orig = n->debug_orig(); + if (orig != NULL) { + do { + if (NotANode(orig)) break; + find_recur(C, result, orig, idx, only_ctrl, old_space, new_space ); + orig = orig->debug_orig(); + } while (orig != NULL && orig != n->debug_orig()); } #endif //ASSERT } @@ -1417,7 +1421,7 @@ ResourceArea *area = Thread::current()->resource_area(); VectorSet old_space(area), new_space(area); Node* result = NULL; - find_recur( result, (Node*) this, idx, false, old_space, new_space ); + find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space ); return result; } @@ -1427,7 +1431,7 @@ ResourceArea *area = Thread::current()->resource_area(); VectorSet old_space(area), new_space(area); Node* result = NULL; - find_recur( result, (Node*) this, idx, true, old_space, new_space ); + find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space ); return result; } #endif
--- a/src/share/vm/opto/output.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/output.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1028,7 +1028,7 @@ // helper for Fill_buffer bailout logic static void turn_off_compiler(Compile* C) { - if (CodeCache::unallocated_capacity() >= CodeCacheMinimumFreeSpace*10) { + if (CodeCache::largest_free_block() >= CodeCacheMinimumFreeSpace*10) { // Do not turn off compilation if a single giant method has // blown the code cache size. C->record_failure("excessive request to CodeCache");
--- a/src/share/vm/opto/output.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/output.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/parse.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/parse.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -136,6 +136,7 @@ uint _count; // how many times executed? Currently only set by _goto's bool _is_parsed; // has this block been parsed yet? bool _is_handler; // is this block an exception handler? + bool _has_merged_backedge; // does this block have merged backedge? SafePointNode* _start_map; // all values flowing into this block MethodLivenessResult _live_locals; // lazily initialized liveness bitmap @@ -168,6 +169,18 @@ // True after any predecessor flows control into this block bool is_merged() const { return _start_map != NULL; } +#ifdef ASSERT + // True after backedge predecessor flows control into this block + bool has_merged_backedge() const { return _has_merged_backedge; } + void mark_merged_backedge(Block* pred) { + assert(is_SEL_head(), "should be loop head"); + if (pred != NULL && is_SEL_backedge(pred)) { + assert(is_parsed(), "block should be parsed before merging backedges"); + _has_merged_backedge = true; + } + } +#endif + // True when all non-exception predecessors have been parsed. bool is_ready() const { return preds_parsed() == pred_count(); } @@ -441,11 +454,6 @@ } } - // Return true if the parser should add a loop predicate - bool should_add_predicate(int target_bci); - // Insert a loop predicate into the graph - void add_predicate(); - // Note: Intrinsic generation routines may be found in library_call.cpp. // Helper function to setup Ideal Call nodes @@ -483,8 +491,8 @@ bool static_field_ok_in_clinit(ciField *field, ciMethod *method); // common code for actually performing the load or store - void do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field); - void do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field); + void do_get_xxx(Node* obj, ciField* field, bool is_field); + void do_put_xxx(Node* obj, ciField* field, bool is_field); // loading from a constant field or the constant pool // returns false if push failed (non-perm field constants only, not ldcs)
--- a/src/share/vm/opto/parse1.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/parse1.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -637,6 +637,25 @@ // (Note that dead locals do not get phis built, ever.) ensure_phis_everywhere(); + if (block->is_SEL_head() && + UseLoopPredicate) { + // Add predicate to single entry (not irreducible) loop head. + assert(!block->has_merged_backedge(), "only entry paths should be merged for now"); + // Need correct bci for predicate. + // It is fine to set it here since do_one_block() will set it anyway. + set_parse_bci(block->start()); + add_predicate(); + // Add new region for back branches. + int edges = block->pred_count() - block->preds_parsed() + 1; // +1 for original region + RegionNode *r = new (C, edges+1) RegionNode(edges+1); + _gvn.set_type(r, Type::CONTROL); + record_for_igvn(r); + r->init_req(edges, control()); + set_control(r); + // Add new phis. + ensure_phis_everywhere(); + } + // Leave behind an undisturbed copy of the map, for future merges. set_map(clone_map()); } @@ -1113,7 +1132,7 @@ _preds_parsed = 0; _count = 0; assert(pred_count() == 0 && preds_parsed() == 0, "sanity"); - assert(!(is_merged() || is_parsed() || is_handler()), "sanity"); + assert(!(is_merged() || is_parsed() || is_handler() || has_merged_backedge()), "sanity"); assert(_live_locals.size() == 0, "sanity"); // entry point has additional predecessor @@ -1350,10 +1369,6 @@ set_parse_bci(iter().cur_bci()); if (bci() == block()->limit()) { - // insert a predicate if it falls through to a loop head block - if (should_add_predicate(bci())){ - add_predicate(); - } // Do not walk into the next block until directed by do_all_blocks. merge(bci()); break; @@ -1498,17 +1513,29 @@ || target->is_handler() // These have unpredictable inputs. || target->is_loop_head() // Known multiple inputs || control()->is_Region()) { // We must hide this guy. + + int current_bci = bci(); + set_parse_bci(target->start()); // Set target bci + if (target->is_SEL_head()) { + DEBUG_ONLY( target->mark_merged_backedge(block()); ) + if (target->start() == 0) { + // Add loop predicate for the special case when + // there are backbranches to the method entry. + add_predicate(); + } + } // Add a Region to start the new basic block. Phis will be added // later lazily. int edges = target->pred_count(); if (edges < pnum) edges = pnum; // might be a new path! - Node *r = new (C, edges+1) RegionNode(edges+1); + RegionNode *r = new (C, edges+1) RegionNode(edges+1); gvn().set_type(r, Type::CONTROL); record_for_igvn(r); // zap all inputs to NULL for debugging (done in Node(uint) constructor) // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); } r->init_req(pnum, control()); set_control(r); + set_parse_bci(current_bci); // Restore bci } // Convert the existing Parser mapping into a mapping at this bci. @@ -1517,7 +1544,11 @@ } else { // Prior mapping at this bci if (TraceOptoParse) { tty->print(" with previous state"); } - +#ifdef ASSERT + if (target->is_SEL_head()) { + target->mark_merged_backedge(block()); + } +#endif // We must not manufacture more phis if the target is already parsed. bool nophi = target->is_parsed(); @@ -2054,37 +2085,6 @@ } } -//------------------------------should_add_predicate-------------------------- -bool Parse::should_add_predicate(int target_bci) { - if (!UseLoopPredicate) return false; - Block* target = successor_for_bci(target_bci); - if (target != NULL && - target->is_loop_head() && - block()->rpo() < target->rpo()) { - return true; - } - return false; -} - -//------------------------------add_predicate--------------------------------- -void Parse::add_predicate() { - assert(UseLoopPredicate,"use only for loop predicate"); - Node *cont = _gvn.intcon(1); - Node* opq = _gvn.transform(new (C, 2) Opaque1Node(C, cont)); - Node *bol = _gvn.transform(new (C, 2) Conv2BNode(opq)); - IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN); - Node* iffalse = _gvn.transform(new (C, 1) IfFalseNode(iff)); - C->add_predicate_opaq(opq); - { - PreserveJVMState pjvms(this); - set_control(iffalse); - uncommon_trap(Deoptimization::Reason_predicate, - Deoptimization::Action_maybe_recompile); - } - Node* iftrue = _gvn.transform(new (C, 1) IfTrueNode(iff)); - set_control(iftrue); -} - #ifndef PRODUCT //------------------------show_parse_info-------------------------------------- void Parse::show_parse_info() {
--- a/src/share/vm/opto/parse2.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/parse2.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -293,11 +293,6 @@ if (len < 1) { // If this is a backward branch, add safepoint maybe_add_safepoint(default_dest); - if (should_add_predicate(default_dest)){ - _sp += 1; // set original stack for use by uncommon_trap - add_predicate(); - _sp -= 1; - } merge(default_dest); return; } @@ -344,11 +339,6 @@ if (len < 1) { // If this is a backward branch, add safepoint maybe_add_safepoint(default_dest); - if (should_add_predicate(default_dest)){ - _sp += 1; // set original stack for use by uncommon_trap - add_predicate(); - _sp -= 1; - } merge(default_dest); return; } @@ -756,9 +746,6 @@ push(_gvn.makecon(ret_addr)); // Flow to the jsr. - if (should_add_predicate(jsr_bci)){ - add_predicate(); - } merge(jsr_bci); } @@ -808,8 +795,9 @@ taken = method()->scale_count(taken); not_taken = method()->scale_count(not_taken); - // Give up if too few counts to be meaningful - if (taken + not_taken < 40) { + // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. + // We also check that individual counters are positive first, overwise the sum can become positive. + if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { if (C->log() != NULL) { C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); } @@ -817,13 +805,13 @@ } // Compute frequency that we arrive here - int sum = taken + not_taken; + float sum = taken + not_taken; // Adjust, if this block is a cloned private block but the // Jump counts are shared. Taken the private counts for // just this path instead of the shared counts. if( block()->count() > 0 ) sum = block()->count(); - cnt = (float)sum / (float)FreqCountInvocations; + cnt = sum / FreqCountInvocations; // Pin probability to sane limits float prob; @@ -1040,11 +1028,6 @@ profile_taken_branch(target_bci); adjust_map_after_if(btest, c, prob, branch_block, next_block); if (!stopped()) { - if (should_add_predicate(target_bci)){ // add a predicate if it branches to a loop - int nargs = repush_if_args(); // set original stack for uncommon_trap - add_predicate(); - _sp -= nargs; - } merge(target_bci); } } @@ -1168,11 +1151,6 @@ profile_taken_branch(target_bci); adjust_map_after_if(taken_btest, c, prob, branch_block, next_block); if (!stopped()) { - if (should_add_predicate(target_bci)){ // add a predicate if it branches to a loop - int nargs = repush_if_args(); // set original stack for the uncommon_trap - add_predicate(); - _sp -= nargs; - } merge(target_bci); } } @@ -2166,10 +2144,6 @@ // Update method data profile_taken_branch(target_bci); - // Add loop predicate if it goes to a loop - if (should_add_predicate(target_bci)){ - add_predicate(); - } // Merge the current control into the target basic block merge(target_bci);
--- a/src/share/vm/opto/parse3.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/parse3.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -112,29 +112,31 @@ // Compile-time detect of null-exception? if (stopped()) return; +#ifdef ASSERT const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder()); assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed"); +#endif if (is_get) { --_sp; // pop receiver before getting - do_get_xxx(tjp, obj, field, is_field); + do_get_xxx(obj, field, is_field); } else { - do_put_xxx(tjp, obj, field, is_field); + do_put_xxx(obj, field, is_field); --_sp; // pop receiver after putting } } else { - const TypeKlassPtr* tkp = TypeKlassPtr::make(field_holder); - obj = _gvn.makecon(tkp); + const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror()); + obj = _gvn.makecon(tip); if (is_get) { - do_get_xxx(tkp, obj, field, is_field); + do_get_xxx(obj, field, is_field); } else { - do_put_xxx(tkp, obj, field, is_field); + do_put_xxx(obj, field, is_field); } } } -void Parse::do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) { +void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { // Does this field have a constant value? If so, just push the value. if (field->is_constant()) { if (field->is_static()) { @@ -143,8 +145,8 @@ return; } else { - // final non-static field of a trusted class ({java,sun}.dyn - // classes). + // final non-static field of a trusted class (classes in + // java.lang.invoke and sun.invoke packages and subpackages). if (obj->is_Con()) { const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr(); ciObject* constant_oop = oop_ptr->const_oop(); @@ -231,7 +233,7 @@ } } -void Parse::do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) { +void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { bool is_vol = field->is_volatile(); // If reference is volatile, prevent following memory ops from // floating down past the volatile write. Also prevents commoning
--- a/src/share/vm/opto/phaseX.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/phaseX.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -471,6 +471,13 @@ _delay_transform = delay; } + // Clone loop predicates. Defined in loopTransform.cpp. + Node* clone_loop_predicates(Node* old_entry, Node* new_entry); + Node* move_loop_predicates(Node* old_entry, Node* new_entry); + // Create a new if below new_entry for the predicate to be cloned + ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, + Deoptimization::DeoptReason reason); + #ifndef PRODUCT protected: // Sub-quadratic implementation of VerifyIterativeGVN.
--- a/src/share/vm/opto/regmask.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/regmask.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/regmask.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/regmask.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/runtime.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/runtime.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/split_if.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/split_if.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -399,6 +399,9 @@ #ifndef PRODUCT if( PrintOpto && VerifyLoopOptimizations ) tty->print_cr("Split-if"); + if (TraceLoopOpts) { + tty->print_cr("SplitIf"); + } #endif C->set_major_progress(); Node *region = iff->in(0);
--- a/src/share/vm/opto/stringopts.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/stringopts.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -910,7 +910,7 @@ ciObject* con = field->constant_value().as_object(); // Do not "join" in the previous type; it doesn't add value, // and may yield a vacuous result if the field is of interface type. - type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); + type = TypeOopPtr::make_from_constant(con, true)->isa_oopptr(); assert(type != NULL, "field singleton type must be consistent"); } else { type = TypeOopPtr::make_from_klass(field_klass->as_klass()); @@ -969,6 +969,10 @@ // for (int i=0; ; i++) // if (x <= sizeTable[i]) // return i+1; + + // Add loop predicate first. + kit.add_predicate(); + RegionNode *loop = new (C, 3) RegionNode(3); loop->init_req(1, kit.control()); kit.gvn().set_type(loop, Type::CONTROL); @@ -1086,6 +1090,9 @@ // } { + // Add loop predicate first. + kit.add_predicate(); + RegionNode *head = new (C, 3) RegionNode(3); head->init_req(1, kit.control()); kit.gvn().set_type(head, Type::CONTROL);
--- a/src/share/vm/opto/superword.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/superword.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1132,6 +1132,13 @@ void SuperWord::output() { if (_packset.length() == 0) return; +#ifndef PRODUCT + if (TraceLoopOpts) { + tty->print("SuperWord "); + lpt()->dump_head(); + } +#endif + // MUST ENSURE main loop's initial value is properly aligned: // (iv_initial_value + min_iv_offset) % vector_width_in_bytes() == 0
--- a/src/share/vm/opto/type.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/type.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -32,6 +32,7 @@ #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "oops/instanceKlass.hpp" +#include "oops/instanceMirrorKlass.hpp" #include "oops/klassKlass.hpp" #include "oops/objArrayKlass.hpp" #include "oops/typeArrayKlass.hpp" @@ -2241,43 +2242,49 @@ } else if (this->isa_aryptr()) { _is_ptr_to_narrowoop = (klass()->is_obj_array_klass() && _offset != arrayOopDesc::length_offset_in_bytes()); - } else if (klass() == ciEnv::current()->Class_klass() && - (_offset == java_lang_Class::klass_offset_in_bytes() || - _offset == java_lang_Class::array_klass_offset_in_bytes())) { - // Special hidden fields from the Class. - assert(this->isa_instptr(), "must be an instance ptr."); - _is_ptr_to_narrowoop = true; } else if (klass()->is_instance_klass()) { ciInstanceKlass* ik = klass()->as_instance_klass(); ciField* field = NULL; if (this->isa_klassptr()) { - // Perm objects don't use compressed references, except for - // static fields which are currently compressed. - field = ik->get_field_by_offset(_offset, true); - if (field != NULL) { - BasicType basic_elem_type = field->layout_type(); - _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT || - basic_elem_type == T_ARRAY); - } + // Perm objects don't use compressed references } else if (_offset == OffsetBot || _offset == OffsetTop) { // unsafe access _is_ptr_to_narrowoop = true; } else { // exclude unsafe ops assert(this->isa_instptr(), "must be an instance ptr."); - // Field which contains a compressed oop references. - field = ik->get_field_by_offset(_offset, false); - if (field != NULL) { + + if (klass() == ciEnv::current()->Class_klass() && + (_offset == java_lang_Class::klass_offset_in_bytes() || + _offset == java_lang_Class::array_klass_offset_in_bytes())) { + // Special hidden fields from the Class. + assert(this->isa_instptr(), "must be an instance ptr."); + _is_ptr_to_narrowoop = true; + } else if (klass() == ciEnv::current()->Class_klass() && + _offset >= instanceMirrorKlass::offset_of_static_fields()) { + // Static fields + assert(o != NULL, "must be constant"); + ciInstanceKlass* k = o->as_instance()->java_lang_Class_klass()->as_instance_klass(); + ciField* field = k->get_field_by_offset(_offset, true); + assert(field != NULL, "missing field"); BasicType basic_elem_type = field->layout_type(); _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY); - } else if (klass()->equals(ciEnv::current()->Object_klass())) { - // Compile::find_alias_type() cast exactness on all types to verify - // that it does not affect alias type. - _is_ptr_to_narrowoop = true; } else { - // Type for the copy start in LibraryCallKit::inline_native_clone(). - assert(!klass_is_exact(), "only non-exact klass"); - _is_ptr_to_narrowoop = true; + // Instance fields which contains a compressed oop references. + field = ik->get_field_by_offset(_offset, false); + if (field != NULL) { + BasicType basic_elem_type = field->layout_type(); + _is_ptr_to_narrowoop = (basic_elem_type == T_OBJECT || + basic_elem_type == T_ARRAY); + } else if (klass()->equals(ciEnv::current()->Object_klass())) { + // Compile::find_alias_type() cast exactness on all types to verify + // that it does not affect alias type. + _is_ptr_to_narrowoop = true; + } else { + // Type for the copy start in LibraryCallKit::inline_native_clone(). + assert(!klass_is_exact(), "only non-exact klass"); + _is_ptr_to_narrowoop = true; + } } } } @@ -3386,7 +3393,22 @@ instance_id = InstanceBot; tary = TypeAry::make(Type::BOTTOM, tary->_size); } + } else // Non integral arrays. + // Must fall to bottom if exact klasses in upper lattice + // are not equal or super klass is exact. + if ( above_centerline(ptr) && klass() != tap->klass() && + // meet with top[] and bottom[] are processed further down: + tap ->_klass != NULL && this->_klass != NULL && + // both are exact and not equal: + ((tap ->_klass_is_exact && this->_klass_is_exact) || + // 'tap' is exact and super or unrelated: + (tap ->_klass_is_exact && !tap->klass()->is_subtype_of(klass())) || + // 'this' is exact and super or unrelated: + (this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) { + tary = TypeAry::make(Type::BOTTOM, tary->_size); + return make( NotNull, NULL, tary, lazy_klass, false, off, InstanceBot ); } + bool xk = false; switch (tap->ptr()) { case AnyNull: @@ -3766,7 +3788,7 @@ // Oops, need to compute _klass and cache it ciKlass* k_ary = compute_klass(); - if( this != TypeAryPtr::OOPS ) { + if( this != TypeAryPtr::OOPS && this->dual() != TypeAryPtr::OOPS ) { // The _klass field acts as a cache of the underlying // ciKlass for this array type. In order to set the field, // we need to cast away const-ness.
--- a/src/share/vm/opto/type.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/type.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -988,8 +988,8 @@ static const TypeNarrowOop *make( const TypePtr* type); - static const TypeNarrowOop* make_from_constant(ciObject* con) { - return make(TypeOopPtr::make_from_constant(con)); + static const TypeNarrowOop* make_from_constant(ciObject* con, bool require_constant = false) { + return make(TypeOopPtr::make_from_constant(con, require_constant)); } // returns the equivalent ptr type for this compressed pointer
--- a/src/share/vm/opto/vectornode.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/opto/vectornode.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -32,6 +32,7 @@ //------------------------------VectorNode-------------------------------------- // Vector Operation class VectorNode : public Node { + virtual uint size_of() const { return sizeof(*this); } protected: uint _length; // vector length virtual BasicType elt_basic_type() const = 0; // Vector element basic type
--- a/src/share/vm/precompiled.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/precompiled.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/forte.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/forte.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jni.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jni.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1858,7 +1858,7 @@ // Static field. The fieldID a JNIid specifying the field holder and the offset within the klassOop. JNIid* id = jfieldIDWorkaround::from_static_jfieldID(fieldID); assert(id->is_static_field_id(), "invalid static field id"); - found = instanceKlass::cast(id->holder())->find_local_field_from_offset(id->offset(), true, &fd); + found = id->find_local_field(&fd); } else { // Non-static field. The fieldID is really the offset of the field within the instanceOop. int offset = jfieldIDWorkaround::from_instance_jfieldID(k, fieldID); @@ -1906,9 +1906,7 @@ JNIid* id = instanceKlass::cast(fd.field_holder())->jni_id_for(fd.offset()); debug_only(id->set_is_static_field_id();) - debug_only(int first_offset = instanceKlass::cast(fd.field_holder())->offset_of_static_fields();) - debug_only(int end_offset = first_offset + (instanceKlass::cast(fd.field_holder())->static_field_size() * wordSize);) - assert(id->offset() >= first_offset && id->offset() < end_offset, "invalid static field offset"); + debug_only(id->verify(fd.field_holder())); ret = jfieldIDWorkaround::to_static_jfieldID(id); return ret; @@ -1928,7 +1926,7 @@ if (JvmtiExport::should_post_field_access()) { JvmtiExport::jni_GetField_probe(thread, NULL, NULL, id->holder(), fieldID, true); } - jobject ret = JNIHandles::make_local(id->holder()->obj_field(id->offset())); + jobject ret = JNIHandles::make_local(id->holder()->java_mirror()->obj_field(id->offset())); DTRACE_PROBE1(hotspot_jni, GetStaticObjectField__return, ret); return ret; JNI_END @@ -1950,7 +1948,7 @@ if (JvmtiExport::should_post_field_access()) { \ JvmtiExport::jni_GetField_probe(thread, NULL, NULL, id->holder(), fieldID, true); \ } \ - ret = id->holder()-> Fieldname##_field (id->offset()); \ + ret = id->holder()->java_mirror()-> Fieldname##_field (id->offset()); \ return ret;\ JNI_END @@ -1976,7 +1974,7 @@ field_value.l = value; JvmtiExport::jni_SetField_probe(thread, NULL, NULL, id->holder(), fieldID, true, 'L', (jvalue *)&field_value); } - id->holder()->obj_field_put(id->offset(), JNIHandles::resolve(value)); + id->holder()->java_mirror()->obj_field_put(id->offset(), JNIHandles::resolve(value)); DTRACE_PROBE(hotspot_jni, SetStaticObjectField__return); JNI_END @@ -1999,7 +1997,7 @@ field_value.unionType = value; \ JvmtiExport::jni_SetField_probe(thread, NULL, NULL, id->holder(), fieldID, true, SigType, (jvalue *)&field_value); \ } \ - id->holder()-> Fieldname##_field_put (id->offset(), value); \ + id->holder()->java_mirror()-> Fieldname##_field_put (id->offset(), value); \ DTRACE_PROBE(hotspot_jni, SetStatic##Result##Field__return);\ JNI_END
--- a/src/share/vm/prims/jniCheck.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jniCheck.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -224,8 +224,7 @@ ReportJNIFatalError(thr, fatal_wrong_static_field); /* check for proper field type */ - if (!instanceKlass::cast(f_oop)->find_local_field_from_offset( - id->offset(), true, &fd)) + if (!id->find_local_field(&fd)) ReportJNIFatalError(thr, fatal_static_field_not_found); if ((fd.field_type() != ftype) && !(fd.field_type() == T_ARRAY && ftype == T_OBJECT)) {
--- a/src/share/vm/prims/jni_md.h Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jni_md.h Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvm.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvm.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1808,7 +1808,7 @@ THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "Wrong type at constant pool index"); } klassOop k = cp->klass_at(index, CHECK_NULL); - return (jclass) JNIHandles::make_local(k->klass_part()->java_mirror()); + return (jclass) JNIHandles::make_local(k->java_mirror()); } JVM_END @@ -1824,7 +1824,7 @@ } klassOop k = constantPoolOopDesc::klass_at_if_loaded(cp, index); if (k == NULL) return NULL; - return (jclass) JNIHandles::make_local(k->klass_part()->java_mirror()); + return (jclass) JNIHandles::make_local(k->java_mirror()); } JVM_END
--- a/src/share/vm/prims/jvm.h Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvm.h Fri Apr 22 15:30:53 2011 +0200 @@ -1062,7 +1062,7 @@ JVM_CONSTANT_NameAndType, JVM_CONSTANT_MethodHandle = 15, // JSR 292 JVM_CONSTANT_MethodType = 16, // JSR 292 - JVM_CONSTANT_InvokeDynamicTrans = 17, // JSR 292, only occurs in old class files + //JVM_CONSTANT_(unused) = 17, // JSR 292 early drafts only JVM_CONSTANT_InvokeDynamic = 18, // JSR 292 JVM_CONSTANT_ExternalMax = 18 // Last tag found in classfiles };
--- a/src/share/vm/prims/jvm_misc.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvm_misc.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvmtiClassFileReconstituter.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiEnvBase.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvmtiEnvBase.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -616,9 +616,7 @@ bool found = false; if (jfieldIDWorkaround::is_static_jfieldID(field)) { JNIid* id = jfieldIDWorkaround::from_static_jfieldID(field); - int offset = id->offset(); - klassOop holder = id->holder(); - found = instanceKlass::cast(holder)->find_local_field_from_offset(offset, true, fd); + found = id->find_local_field(fd); } else { // Non-static field. The fieldID is really the offset of the field within the object. int offset = jfieldIDWorkaround::from_instance_jfieldID(k, field);
--- a/src/share/vm/prims/jvmtiEventController.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvmtiEventController.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiExport.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvmtiExport.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1805,6 +1805,10 @@ void JvmtiExport::post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) { JavaThread* thread = JavaThread::current(); + // In theory everyone coming thru here is in_vm but we need to be certain + // because a callee will do a vm->native transition + ThreadInVMfromUnknown __tiv; + EVT_TRIG_TRACE(JVMTI_EVENT_DYNAMIC_CODE_GENERATED, ("JVMTI [%s] method dynamic code generated event triggered", JvmtiTrace::safe_get_thread_name(thread))); @@ -1826,19 +1830,18 @@ } void JvmtiExport::post_dynamic_code_generated(const char *name, const void *code_begin, const void *code_end) { - // In theory everyone coming thru here is in_vm but we need to be certain - // because a callee will do a vm->native transition - ThreadInVMfromUnknown __tiv; jvmtiPhase phase = JvmtiEnv::get_phase(); if (phase == JVMTI_PHASE_PRIMORDIAL || phase == JVMTI_PHASE_START) { post_dynamic_code_generated_internal(name, code_begin, code_end); - return; + } else { + // It may not be safe to post the event from this thread. Defer all + // postings to the service thread so that it can perform them in a safe + // context and in-order. + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + JvmtiDeferredEvent event = JvmtiDeferredEvent::dynamic_code_generated_event( + name, code_begin, code_end); + JvmtiDeferredEventQueue::enqueue(event); } - - // Blocks until everything now in the queue has been posted - JvmtiDeferredEventQueue::flush_queue(Thread::current()); - - post_dynamic_code_generated_internal(name, code_begin, code_end); }
--- a/src/share/vm/prims/jvmtiExport.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvmtiExport.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -140,12 +140,12 @@ char sig_type, jvalue *value); - private: // posts a DynamicCodeGenerated event (internal/private implementation). // The public post_dynamic_code_generated* functions make use of the - // internal implementation. + // internal implementation. Also called from JvmtiDeferredEvent::post() static void post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) KERNEL_RETURN; + private: // GenerateEvents support to allow posting of CompiledMethodLoad and // DynamicCodeGenerated events for a given environment.
--- a/src/share/vm/prims/jvmtiImpl.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvmtiImpl.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -918,31 +918,61 @@ JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event( nmethod* nm) { JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD); - event.set_compiled_method_load(nm); - nmethodLocker::lock_nmethod(nm); // will be unlocked when posted + event._event_data.compiled_method_load = nm; + // Keep the nmethod alive until the ServiceThread can process + // this deferred event. + nmethodLocker::lock_nmethod(nm); return event; } JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event( - jmethodID id, const void* code) { + nmethod* nm, jmethodID id, const void* code) { JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD); - event.set_compiled_method_unload(id, code); + event._event_data.compiled_method_unload.nm = nm; + event._event_data.compiled_method_unload.method_id = id; + event._event_data.compiled_method_unload.code_begin = code; + // Keep the nmethod alive until the ServiceThread can process + // this deferred event. This will keep the memory for the + // generated code from being reused too early. We pass + // zombie_ok == true here so that our nmethod that was just + // made into a zombie can be locked. + nmethodLocker::lock_nmethod(nm, true /* zombie_ok */); + return event; +} +JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event( + const char* name, const void* code_begin, const void* code_end) { + JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED); + event._event_data.dynamic_code_generated.name = name; + event._event_data.dynamic_code_generated.code_begin = code_begin; + event._event_data.dynamic_code_generated.code_end = code_end; return event; } void JvmtiDeferredEvent::post() { + assert(ServiceThread::is_service_thread(Thread::current()), + "Service thread must post enqueued events"); switch(_type) { - case TYPE_COMPILED_METHOD_LOAD: - JvmtiExport::post_compiled_method_load(compiled_method_load()); - nmethodLocker::unlock_nmethod(compiled_method_load()); + case TYPE_COMPILED_METHOD_LOAD: { + nmethod* nm = _event_data.compiled_method_load; + JvmtiExport::post_compiled_method_load(nm); + // done with the deferred event so unlock the nmethod + nmethodLocker::unlock_nmethod(nm); break; - case TYPE_COMPILED_METHOD_UNLOAD: + } + case TYPE_COMPILED_METHOD_UNLOAD: { + nmethod* nm = _event_data.compiled_method_unload.nm; JvmtiExport::post_compiled_method_unload( - compiled_method_unload_method_id(), - compiled_method_unload_code_begin()); + _event_data.compiled_method_unload.method_id, + _event_data.compiled_method_unload.code_begin); + // done with the deferred event so unlock the nmethod + nmethodLocker::unlock_nmethod(nm); break; - case TYPE_FLUSH: - JvmtiDeferredEventQueue::flush_complete(flush_state_addr()); + } + case TYPE_DYNAMIC_CODE_GENERATED: + JvmtiExport::post_dynamic_code_generated_internal( + _event_data.dynamic_code_generated.name, + _event_data.dynamic_code_generated.code_begin, + _event_data.dynamic_code_generated.code_end); break; default: ShouldNotReachHere(); @@ -1065,54 +1095,4 @@ } } -enum { - // Random - used for debugging - FLUSHING = 0x50403020, - FLUSHED = 0x09080706 -}; - -void JvmtiDeferredEventQueue::flush_queue(Thread* thread) { - - volatile int flush_state = FLUSHING; - - JvmtiDeferredEvent flush(JvmtiDeferredEvent::TYPE_FLUSH); - flush.set_flush_state_addr((int*)&flush_state); - - if (ServiceThread::is_service_thread(thread)) { - // If we are the service thread we have to post all preceding events - // Use the flush event as a token to indicate when we can stop - JvmtiDeferredEvent event; - { - MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); - enqueue(flush); - event = dequeue(); - } - while (!event.is_flush_event() || - event.flush_state_addr() != &flush_state) { - event.post(); - { - MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); - event = dequeue(); - } - } - } else { - // Wake up the service thread so it will process events. When it gets - // to the flush event it will set 'flush_complete' and notify us. - MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); - enqueue(flush); - while (flush_state != FLUSHED) { - assert(flush_state == FLUSHING || flush_state == FLUSHED, - "only valid values for this"); - Service_lock->wait(Mutex::_no_safepoint_check_flag); - } - } -} - -void JvmtiDeferredEventQueue::flush_complete(int* state_addr) { - assert(state_addr != NULL && *state_addr == FLUSHING, "must be"); - MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); - *state_addr = FLUSHED; - Service_lock->notify_all(); -} - #endif // ndef KERNEL
--- a/src/share/vm/prims/jvmtiImpl.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvmtiImpl.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -451,59 +451,26 @@ TYPE_NONE, TYPE_COMPILED_METHOD_LOAD, TYPE_COMPILED_METHOD_UNLOAD, - TYPE_FLUSH // pseudo-event used to implement flush_queue() + TYPE_DYNAMIC_CODE_GENERATED } Type; Type _type; union { nmethod* compiled_method_load; struct { + nmethod* nm; jmethodID method_id; const void* code_begin; } compiled_method_unload; - int* flush_state_addr; + struct { + const char* name; + const void* code_begin; + const void* code_end; + } dynamic_code_generated; } _event_data; JvmtiDeferredEvent(Type t) : _type(t) {} - void set_compiled_method_load(nmethod* nm) { - assert(_type == TYPE_COMPILED_METHOD_LOAD, "must be"); - _event_data.compiled_method_load = nm; - } - - nmethod* compiled_method_load() const { - assert(_type == TYPE_COMPILED_METHOD_LOAD, "must be"); - return _event_data.compiled_method_load; - } - - void set_compiled_method_unload(jmethodID id, const void* code) { - assert(_type == TYPE_COMPILED_METHOD_UNLOAD, "must be"); - _event_data.compiled_method_unload.method_id = id; - _event_data.compiled_method_unload.code_begin = code; - } - - jmethodID compiled_method_unload_method_id() const { - assert(_type == TYPE_COMPILED_METHOD_UNLOAD, "must be"); - return _event_data.compiled_method_unload.method_id; - } - - const void* compiled_method_unload_code_begin() const { - assert(_type == TYPE_COMPILED_METHOD_UNLOAD, "must be"); - return _event_data.compiled_method_unload.code_begin; - } - - bool is_flush_event() const { return _type == TYPE_FLUSH; } - - int* flush_state_addr() const { - assert(is_flush_event(), "must be"); - return _event_data.flush_state_addr; - } - - void set_flush_state_addr(int* flag) { - assert(is_flush_event(), "must be"); - _event_data.flush_state_addr = flag; - } - public: JvmtiDeferredEvent() : _type(TYPE_NONE) {} @@ -511,8 +478,11 @@ // Factory methods static JvmtiDeferredEvent compiled_method_load_event(nmethod* nm) KERNEL_RETURN_(JvmtiDeferredEvent()); - static JvmtiDeferredEvent compiled_method_unload_event( + static JvmtiDeferredEvent compiled_method_unload_event(nmethod* nm, jmethodID id, const void* code) KERNEL_RETURN_(JvmtiDeferredEvent()); + static JvmtiDeferredEvent dynamic_code_generated_event( + const char* name, const void* begin, const void* end) + KERNEL_RETURN_(JvmtiDeferredEvent()); // Actually posts the event. void post() KERNEL_RETURN; @@ -548,25 +518,12 @@ // Transfers events from the _pending_list to the _queue. static void process_pending_events() KERNEL_RETURN; - static void flush_complete(int* flush_state) KERNEL_RETURN; - public: // Must be holding Service_lock when calling these static bool has_events() KERNEL_RETURN_(false); static void enqueue(const JvmtiDeferredEvent& event) KERNEL_RETURN; static JvmtiDeferredEvent dequeue() KERNEL_RETURN_(JvmtiDeferredEvent()); - // This call blocks until all events enqueued prior to this call - // have been posted. The Service_lock is acquired and waited upon. - // - // Implemented by creating a "flush" event and placing it in the queue. - // When the flush event is "posted" it will call flush_complete(), which - // will release the caller. - // - // Can be called by any thread (maybe even the service thread itself). - // Not necessary for the caller to be a JavaThread. - static void flush_queue(Thread* current) KERNEL_RETURN; - // Used to enqueue events without using a lock, for times (such as during // safepoint) when we can't or don't want to lock the Service_lock. //
--- a/src/share/vm/prims/jvmtiManageCapabilities.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvmtiManageCapabilities.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -319,8 +319,11 @@ bool enter_all_methods = interp_events || avail.can_generate_breakpoint_events; - UseFastEmptyMethods = !enter_all_methods; - UseFastAccessorMethods = !enter_all_methods; + if (enter_all_methods) { + // Disable these when tracking the bytecodes + UseFastEmptyMethods = false; + UseFastAccessorMethods = false; + } if (avail.can_generate_breakpoint_events) { RewriteFrequentPairs = false;
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1084,7 +1084,10 @@ jbyte old_tag = old_cp->tag_at(old_i).value(); switch (old_tag) { case JVM_CONSTANT_Class: + case JVM_CONSTANT_UnresolvedClass: // revert the copy to JVM_CONSTANT_UnresolvedClass + // May be resolving while calling this so do the same for + // JVM_CONSTANT_UnresolvedClass (klass_name_at() deals with transition) (*merge_cp_p)->unresolved_klass_at_put(old_i, old_cp->klass_name_at(old_i)); break; @@ -3347,11 +3350,12 @@ for (Klass *subk = ik->subklass(); subk != NULL; subk = subk->next_sibling()) { - klassOop sub = subk->as_klassOop(); - instanceKlass *subik = (instanceKlass *)sub->klass_part(); - - // recursively do subclasses of the current subclass - increment_class_counter(subik, THREAD); + if (subk->oop_is_instance()) { + // Only update instanceKlasses + instanceKlass *subik = (instanceKlass*)subk; + // recursively do subclasses of the current subclass + increment_class_counter(subik, THREAD); + } } }
--- a/src/share/vm/prims/jvmtiRedefineClasses.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvmtiRedefineClasses.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiTagMap.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvmtiTagMap.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "jvmtifiles/jvmtiEnv.hpp" +#include "oops/instanceMirrorKlass.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline2.hpp" #include "prims/jvmtiEventController.hpp" @@ -2594,6 +2595,11 @@ if (o->is_klass()) { klassOop k = (klassOop)o; o = Klass::cast(k)->java_mirror(); + if (o == NULL) { + // Classes without mirrors don't correspond to real Java + // classes so just ignore them. + return; + } } else { // SystemDictionary::always_strong_oops_do reports the application @@ -2834,10 +2840,10 @@ // verify that a static oop field is in range static inline bool verify_static_oop(instanceKlass* ik, - klassOop k, int offset) { - address obj_p = (address)k + offset; - address start = (address)ik->start_of_static_fields(); - address end = start + (ik->static_oop_field_size() * heapOopSize); + oop mirror, int offset) { + address obj_p = (address)mirror + offset; + address start = (address)instanceMirrorKlass::start_of_static_fields(mirror); + address end = start + (java_lang_Class::static_oop_field_count(mirror) * heapOopSize); assert(end >= start, "sanity check"); if (obj_p >= start && obj_p < end) { @@ -2938,8 +2944,8 @@ ClassFieldDescriptor* field = field_map->field_at(i); char type = field->field_type(); if (!is_primitive_field_type(type)) { - oop fld_o = k->obj_field(field->field_offset()); - assert(verify_static_oop(ik, k, field->field_offset()), "sanity check"); + oop fld_o = mirror->obj_field(field->field_offset()); + assert(verify_static_oop(ik, mirror, field->field_offset()), "sanity check"); if (fld_o != NULL) { int slot = field->field_index(); if (!CallbackInvoker::report_static_field_reference(mirror, fld_o, slot)) { @@ -2949,7 +2955,7 @@ } } else { if (is_reporting_primitive_fields()) { - address addr = (address)k + field->field_offset(); + address addr = (address)mirror + field->field_offset(); int slot = field->field_index(); if (!CallbackInvoker::report_primitive_static_field(mirror, slot, addr, type)) { delete field_map;
--- a/src/share/vm/prims/jvmtiTagMap.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/jvmtiTagMap.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/methodHandleWalk.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/methodHandleWalk.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -36,7 +36,7 @@ // MethodHandleChain void MethodHandleChain::set_method_handle(Handle mh, TRAPS) { - if (!java_dyn_MethodHandle::is_instance(mh())) lose("bad method handle", CHECK); + if (!java_lang_invoke_MethodHandle::is_instance(mh())) lose("bad method handle", CHECK); // set current method handle and unpack partially _method_handle = mh; @@ -47,21 +47,21 @@ _conversion = -1; _last_invoke = Bytecodes::_nop; //arbitrary non-garbage - if (sun_dyn_DirectMethodHandle::is_instance(mh())) { + if (java_lang_invoke_DirectMethodHandle::is_instance(mh())) { set_last_method(mh(), THREAD); return; } - if (sun_dyn_AdapterMethodHandle::is_instance(mh())) { + if (java_lang_invoke_AdapterMethodHandle::is_instance(mh())) { _conversion = AdapterMethodHandle_conversion(); assert(_conversion != -1, "bad conv value"); - assert(sun_dyn_BoundMethodHandle::is_instance(mh()), "also BMH"); + assert(java_lang_invoke_BoundMethodHandle::is_instance(mh()), "also BMH"); } - if (sun_dyn_BoundMethodHandle::is_instance(mh())) { + if (java_lang_invoke_BoundMethodHandle::is_instance(mh())) { if (!is_adapter()) // keep AMH and BMH separate in this model _is_bound = true; _arg_slot = BoundMethodHandle_vmargslot(); oop target = MethodHandle_vmtarget_oop(); - if (!is_bound() || java_dyn_MethodHandle::is_instance(target)) { + if (!is_bound() || java_lang_invoke_MethodHandle::is_instance(target)) { _arg_type = compute_bound_arg_type(target, NULL, _arg_slot, CHECK); } else if (target != NULL && target->is_method()) { methodOop m = (methodOop) target; @@ -100,13 +100,12 @@ BasicType MethodHandleChain::compute_bound_arg_type(oop target, methodOop m, int arg_slot, TRAPS) { // There is no direct indication of whether the argument is primitive or not. // It is implied by the _vmentry code, and by the MethodType of the target. - // FIXME: Make it explicit MethodHandleImpl refactors out from MethodHandle BasicType arg_type = T_VOID; if (target != NULL) { - oop mtype = java_dyn_MethodHandle::type(target); + oop mtype = java_lang_invoke_MethodHandle::type(target); int arg_num = MethodHandles::argument_slot_to_argnum(mtype, arg_slot); if (arg_num >= 0) { - oop ptype = java_dyn_MethodType::ptype(mtype, arg_num); + oop ptype = java_lang_invoke_MethodType::ptype(mtype, arg_num); arg_type = java_lang_Class::as_BasicType(ptype); } } else if (m != NULL) { @@ -205,28 +204,28 @@ int arg_slot = chain().adapter_arg_slot(); SlotState* arg_state = slot_state(arg_slot); if (arg_state == NULL - && conv_op > sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW) { + && conv_op > java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW) { lose("bad argument index", CHECK_(empty)); } // perform the adapter action switch (chain().adapter_conversion_op()) { - case sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY: + case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY: // No changes to arguments; pass the bits through. break; - case sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW: { + case java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW: { // To keep the verifier happy, emit bitwise ("raw") conversions as needed. // See MethodHandles::same_basic_type_for_arguments for allowed conversions. Handle incoming_mtype(THREAD, chain().method_type_oop()); oop outgoing_mh_oop = chain().vmtarget_oop(); - if (!java_dyn_MethodHandle::is_instance(outgoing_mh_oop)) + if (!java_lang_invoke_MethodHandle::is_instance(outgoing_mh_oop)) lose("outgoing target not a MethodHandle", CHECK_(empty)); - Handle outgoing_mtype(THREAD, java_dyn_MethodHandle::type(outgoing_mh_oop)); + Handle outgoing_mtype(THREAD, java_lang_invoke_MethodHandle::type(outgoing_mh_oop)); outgoing_mh_oop = NULL; // GC safety - int nptypes = java_dyn_MethodType::ptype_count(outgoing_mtype()); - if (nptypes != java_dyn_MethodType::ptype_count(incoming_mtype())) + int nptypes = java_lang_invoke_MethodType::ptype_count(outgoing_mtype()); + if (nptypes != java_lang_invoke_MethodType::ptype_count(incoming_mtype())) lose("incoming and outgoing parameter count do not agree", CHECK_(empty)); for (int i = 0, slot = _outgoing.length() - 1; slot >= 0; slot--) { @@ -236,8 +235,8 @@ klassOop in_klass = NULL; klassOop out_klass = NULL; - BasicType inpbt = java_lang_Class::as_BasicType(java_dyn_MethodType::ptype(incoming_mtype(), i), &in_klass); - BasicType outpbt = java_lang_Class::as_BasicType(java_dyn_MethodType::ptype(outgoing_mtype(), i), &out_klass); + BasicType inpbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(incoming_mtype(), i), &in_klass); + BasicType outpbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::ptype(outgoing_mtype(), i), &out_klass); assert(inpbt == arg.basic_type(), "sanity"); if (inpbt != outpbt) { @@ -255,8 +254,8 @@ i++; // We need to skip void slots at the top of the loop. } - BasicType inrbt = java_lang_Class::as_BasicType(java_dyn_MethodType::rtype(incoming_mtype())); - BasicType outrbt = java_lang_Class::as_BasicType(java_dyn_MethodType::rtype(outgoing_mtype())); + BasicType inrbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(incoming_mtype())); + BasicType outrbt = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(outgoing_mtype())); if (inrbt != outrbt) { if (inrbt == T_INT && outrbt == T_VOID) { // See comments in MethodHandles::same_basic_type_for_arguments. @@ -268,7 +267,7 @@ break; } - case sun_dyn_AdapterMethodHandle::OP_CHECK_CAST: { + case java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST: { // checkcast the Nth outgoing argument in place klassOop dest_klass = NULL; BasicType dest = java_lang_Class::as_BasicType(chain().adapter_arg_oop(), &dest_klass); @@ -281,7 +280,7 @@ break; } - case sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM: { + case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM: { // i2l, etc., on the Nth outgoing argument in place BasicType src = chain().adapter_conversion_src_type(), dest = chain().adapter_conversion_dest_type(); @@ -306,7 +305,7 @@ break; } - case sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM: { + case java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM: { // checkcast to wrapper type & call intValue, etc. BasicType dest = chain().adapter_conversion_dest_type(); ArgToken arg = arg_state->_arg; @@ -324,7 +323,7 @@ break; } - case sun_dyn_AdapterMethodHandle::OP_PRIM_TO_REF: { + case java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF: { // call wrapper type.valueOf BasicType src = chain().adapter_conversion_src_type(); ArgToken arg = arg_state->_arg; @@ -340,7 +339,7 @@ break; } - case sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS: { + case java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS: { int dest_arg_slot = chain().adapter_conversion_vminfo(); if (!slot_has_argument(dest_arg_slot)) { lose("bad swap index", CHECK_(empty)); @@ -353,7 +352,7 @@ break; } - case sun_dyn_AdapterMethodHandle::OP_ROT_ARGS: { + case java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS: { int dest_arg_slot = chain().adapter_conversion_vminfo(); if (!slot_has_argument(dest_arg_slot) || arg_slot == dest_arg_slot) { lose("bad rotate index", CHECK_(empty)); @@ -379,7 +378,7 @@ break; } - case sun_dyn_AdapterMethodHandle::OP_DUP_ARGS: { + case java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS: { int dup_slots = chain().adapter_conversion_stack_pushes(); if (dup_slots <= 0) { lose("bad dup count", CHECK_(empty)); @@ -393,7 +392,7 @@ break; } - case sun_dyn_AdapterMethodHandle::OP_DROP_ARGS: { + case java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS: { int drop_slots = -chain().adapter_conversion_stack_pushes(); if (drop_slots <= 0) { lose("bad drop count", CHECK_(empty)); @@ -407,12 +406,12 @@ break; } - case sun_dyn_AdapterMethodHandle::OP_COLLECT_ARGS: { //NYI, may GC + case java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS: { //NYI, may GC lose("unimplemented", CHECK_(empty)); break; } - case sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS: { + case java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS: { klassOop array_klass_oop = NULL; BasicType array_type = java_lang_Class::as_BasicType(chain().adapter_arg_oop(), &array_klass_oop); @@ -470,8 +469,8 @@ break; } - case sun_dyn_AdapterMethodHandle::OP_FLYBY: //NYI, runs Java code - case sun_dyn_AdapterMethodHandle::OP_RICOCHET: //NYI, runs Java code + case java_lang_invoke_AdapterMethodHandle::OP_FLYBY: //NYI, runs Java code + case java_lang_invoke_AdapterMethodHandle::OP_RICOCHET: //NYI, runs Java code lose("unimplemented", CHECK_(empty)); break; @@ -533,7 +532,7 @@ // void MethodHandleWalker::walk_incoming_state(TRAPS) { Handle mtype(THREAD, chain().method_type_oop()); - int nptypes = java_dyn_MethodType::ptype_count(mtype()); + int nptypes = java_lang_invoke_MethodType::ptype_count(mtype()); _outgoing_argc = nptypes; int argp = nptypes - 1; if (argp >= 0) { @@ -542,7 +541,7 @@ for (int i = 0; i < nptypes; i++) { klassOop arg_type_klass = NULL; BasicType arg_type = java_lang_Class::as_BasicType( - java_dyn_MethodType::ptype(mtype(), i), &arg_type_klass); + java_lang_invoke_MethodType::ptype(mtype(), i), &arg_type_klass); int index = new_local_index(arg_type); ArgToken arg = make_parameter(arg_type, arg_type_klass, index, CHECK); debug_only(arg_type_klass = (klassOop) NULL); @@ -556,7 +555,7 @@ // call make_parameter at the end of the list for the return type klassOop ret_type_klass = NULL; BasicType ret_type = java_lang_Class::as_BasicType( - java_dyn_MethodType::rtype(mtype()), &ret_type_klass); + java_lang_invoke_MethodType::rtype(mtype()), &ret_type_klass); ArgToken ret = make_parameter(ret_type, ret_type_klass, -1, CHECK); // ignore ret; client can catch it if needed } @@ -631,7 +630,7 @@ // Get return type klass. Handle first_mtype(THREAD, chain().method_type_oop()); // _rklass is NULL for primitives. - _rtype = java_lang_Class::as_BasicType(java_dyn_MethodType::rtype(first_mtype()), &_rklass); + _rtype = java_lang_Class::as_BasicType(java_lang_invoke_MethodType::rtype(first_mtype()), &_rklass); if (_rtype == T_ARRAY) _rtype = T_OBJECT; int params = _callee->size_of_parameters(); // Incoming arguments plus receiver. @@ -960,6 +959,10 @@ if (m == NULL) { // Get the intrinsic methodOop. m = vmIntrinsics::method_for(iid); + if (m == NULL) { + ArgToken zero; + lose(vmIntrinsics::name_at(iid), CHECK_(zero)); + } } klassOop klass = m->method_holder(); @@ -1396,7 +1399,7 @@ void print_method_handle(oop mh) { if (!mh->is_oop()) { tty->print_cr("*** not a method handle: "INTPTR_FORMAT, (intptr_t)mh); - } else if (java_dyn_MethodHandle::is_instance(mh)) { + } else if (java_lang_invoke_MethodHandle::is_instance(mh)) { //MethodHandlePrinter::print(mh); } else { tty->print("*** not a method handle: ");
--- a/src/share/vm/prims/methodHandleWalk.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/methodHandleWalk.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,13 +48,13 @@ void set_last_method(oop target, TRAPS); static BasicType compute_bound_arg_type(oop target, methodOop m, int arg_slot, TRAPS); - oop MethodHandle_type_oop() { return java_dyn_MethodHandle::type(method_handle_oop()); } - oop MethodHandle_vmtarget_oop() { return java_dyn_MethodHandle::vmtarget(method_handle_oop()); } - int MethodHandle_vmslots() { return java_dyn_MethodHandle::vmslots(method_handle_oop()); } - int DirectMethodHandle_vmindex() { return sun_dyn_DirectMethodHandle::vmindex(method_handle_oop()); } - oop BoundMethodHandle_argument_oop() { return sun_dyn_BoundMethodHandle::argument(method_handle_oop()); } - int BoundMethodHandle_vmargslot() { return sun_dyn_BoundMethodHandle::vmargslot(method_handle_oop()); } - int AdapterMethodHandle_conversion() { return sun_dyn_AdapterMethodHandle::conversion(method_handle_oop()); } + oop MethodHandle_type_oop() { return java_lang_invoke_MethodHandle::type(method_handle_oop()); } + oop MethodHandle_vmtarget_oop() { return java_lang_invoke_MethodHandle::vmtarget(method_handle_oop()); } + int MethodHandle_vmslots() { return java_lang_invoke_MethodHandle::vmslots(method_handle_oop()); } + int DirectMethodHandle_vmindex() { return java_lang_invoke_DirectMethodHandle::vmindex(method_handle_oop()); } + oop BoundMethodHandle_argument_oop() { return java_lang_invoke_BoundMethodHandle::argument(method_handle_oop()); } + int BoundMethodHandle_vmargslot() { return java_lang_invoke_BoundMethodHandle::vmargslot(method_handle_oop()); } + int AdapterMethodHandle_conversion() { return java_lang_invoke_AdapterMethodHandle::conversion(method_handle_oop()); } public: MethodHandleChain(Handle root, TRAPS) @@ -343,6 +343,7 @@ int cpool_symbol_put(int tag, Symbol* con) { if (con == NULL) return 0; ConstantValue* cv = new ConstantValue(tag, con); + con->increment_refcount(); return _constants.append(cv); }
--- a/src/share/vm/prims/methodHandles.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/methodHandles.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -112,7 +112,7 @@ // MethodHandles::generate_adapters // void MethodHandles::generate_adapters() { - if (!EnableMethodHandles || SystemDictionary::MethodHandle_klass() == NULL) return; + if (!EnableInvokeDynamic || SystemDictionary::MethodHandle_klass() == NULL) return; assert(_adapter_code == NULL, "generate only once"); @@ -143,7 +143,7 @@ void MethodHandles::set_enabled(bool z) { if (_enabled != z) { - guarantee(z && EnableMethodHandles, "can only enable once, and only if -XX:+EnableMethodHandles"); + guarantee(z && EnableInvokeDynamic, "can only enable once, and only if -XX:+EnableInvokeDynamic"); _enabled = z; } } @@ -163,9 +163,9 @@ // or it may use the klass/index form; both forms mean the same thing. methodOop m = decode_methodOop(methodOop(vmtarget), decode_flags_result); if ((decode_flags_result & _dmf_has_receiver) != 0 - && java_dyn_MethodType::is_instance(mtype)) { + && java_lang_invoke_MethodType::is_instance(mtype)) { // Extract receiver type restriction from mtype.ptypes[0]. - objArrayOop ptypes = java_dyn_MethodType::ptypes(mtype); + objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(mtype); oop ptype0 = (ptypes == NULL || ptypes->length() < 1) ? oop(NULL) : ptypes->obj_at(0); if (java_lang_Class::is_instance(ptype0)) receiver_limit_result = java_lang_Class::as_klassOop(ptype0); @@ -199,18 +199,18 @@ // (MemberName is the non-operational name used for queries and setup.) methodOop MethodHandles::decode_DirectMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { - oop vmtarget = sun_dyn_DirectMethodHandle::vmtarget(mh); - int vmindex = sun_dyn_DirectMethodHandle::vmindex(mh); - oop mtype = sun_dyn_DirectMethodHandle::type(mh); + oop vmtarget = java_lang_invoke_DirectMethodHandle::vmtarget(mh); + int vmindex = java_lang_invoke_DirectMethodHandle::vmindex(mh); + oop mtype = java_lang_invoke_DirectMethodHandle::type(mh); return decode_vmtarget(vmtarget, vmindex, mtype, receiver_limit_result, decode_flags_result); } methodOop MethodHandles::decode_BoundMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { - assert(sun_dyn_BoundMethodHandle::is_instance(mh), ""); + assert(java_lang_invoke_BoundMethodHandle::is_instance(mh), ""); assert(mh->klass() != SystemDictionary::AdapterMethodHandle_klass(), ""); for (oop bmh = mh;;) { // Bound MHs can be stacked to bind several arguments. - oop target = java_dyn_MethodHandle::vmtarget(bmh); + oop target = java_lang_invoke_MethodHandle::vmtarget(bmh); if (target == NULL) return NULL; decode_flags_result |= MethodHandles::_dmf_binds_argument; klassOop tk = target->klass(); @@ -218,7 +218,7 @@ bmh = target; continue; } else { - if (java_dyn_MethodHandle::is_subclass(tk)) { + if (java_lang_invoke_MethodHandle::is_subclass(tk)) { //assert(tk == SystemDictionary::DirectMethodHandle_klass(), "end of BMH chain must be DMH"); return decode_MethodHandle(target, receiver_limit_result, decode_flags_result); } else { @@ -240,9 +240,9 @@ assert(mh->klass() == SystemDictionary::AdapterMethodHandle_klass(), ""); for (oop amh = mh;;) { // Adapter MHs can be stacked to convert several arguments. - int conv_op = adapter_conversion_op(sun_dyn_AdapterMethodHandle::conversion(amh)); + int conv_op = adapter_conversion_op(java_lang_invoke_AdapterMethodHandle::conversion(amh)); decode_flags_result |= (_dmf_adapter_lsb << conv_op) & _DMF_ADAPTER_MASK; - oop target = java_dyn_MethodHandle::vmtarget(amh); + oop target = java_lang_invoke_MethodHandle::vmtarget(amh); if (target == NULL) return NULL; klassOop tk = target->klass(); if (tk == SystemDictionary::AdapterMethodHandle_klass()) { @@ -258,14 +258,14 @@ methodOop MethodHandles::decode_MethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) { if (mh == NULL) return NULL; klassOop mhk = mh->klass(); - assert(java_dyn_MethodHandle::is_subclass(mhk), "must be a MethodHandle"); + assert(java_lang_invoke_MethodHandle::is_subclass(mhk), "must be a MethodHandle"); if (mhk == SystemDictionary::DirectMethodHandle_klass()) { return decode_DirectMethodHandle(mh, receiver_limit_result, decode_flags_result); } else if (mhk == SystemDictionary::BoundMethodHandle_klass()) { return decode_BoundMethodHandle(mh, receiver_limit_result, decode_flags_result); } else if (mhk == SystemDictionary::AdapterMethodHandle_klass()) { return decode_AdapterMethodHandle(mh, receiver_limit_result, decode_flags_result); - } else if (sun_dyn_BoundMethodHandle::is_subclass(mhk)) { + } else if (java_lang_invoke_BoundMethodHandle::is_subclass(mhk)) { // could be a JavaMethodHandle (but not an adapter MH) return decode_BoundMethodHandle(mh, receiver_limit_result, decode_flags_result); } else { @@ -308,7 +308,7 @@ } else if (xk == SystemDictionary::MemberName_klass()) { // Note: This only works if the MemberName has already been resolved. return decode_MemberName(x, receiver_limit_result, decode_flags_result); - } else if (java_dyn_MethodHandle::is_subclass(xk)) { + } else if (java_lang_invoke_MethodHandle::is_subclass(xk)) { return decode_MethodHandle(x, receiver_limit_result, decode_flags_result); } else if (xk == SystemDictionary::reflect_Method_klass()) { oop clazz = java_lang_reflect_Method::clazz(x); @@ -327,7 +327,7 @@ } else { // unrecognized object assert(!x->is_method(), "already checked"); - assert(!sun_dyn_MemberName::is_instance(x), "already checked"); + assert(!java_lang_invoke_MemberName::is_instance(x), "already checked"); } return NULL; } @@ -336,15 +336,15 @@ int MethodHandles::decode_MethodHandle_stack_pushes(oop mh) { if (mh->klass() == SystemDictionary::DirectMethodHandle_klass()) return 0; // no push/pop - int this_vmslots = java_dyn_MethodHandle::vmslots(mh); + int this_vmslots = java_lang_invoke_MethodHandle::vmslots(mh); int last_vmslots = 0; oop last_mh = mh; for (;;) { - oop target = java_dyn_MethodHandle::vmtarget(last_mh); + oop target = java_lang_invoke_MethodHandle::vmtarget(last_mh); if (target->klass() == SystemDictionary::DirectMethodHandle_klass()) { - last_vmslots = java_dyn_MethodHandle::vmslots(target); + last_vmslots = java_lang_invoke_MethodHandle::vmslots(target); break; - } else if (!java_dyn_MethodHandle::is_instance(target)) { + } else if (!java_lang_invoke_MethodHandle::is_instance(target)) { // might be klass or method assert(target->is_method(), "must get here with a direct ref to method"); last_vmslots = methodOop(target)->size_of_parameters(); @@ -361,16 +361,16 @@ // MemberName support -// import sun_dyn_MemberName.* +// import java_lang_invoke_MemberName.* enum { - IS_METHOD = sun_dyn_MemberName::MN_IS_METHOD, - IS_CONSTRUCTOR = sun_dyn_MemberName::MN_IS_CONSTRUCTOR, - IS_FIELD = sun_dyn_MemberName::MN_IS_FIELD, - IS_TYPE = sun_dyn_MemberName::MN_IS_TYPE, - SEARCH_SUPERCLASSES = sun_dyn_MemberName::MN_SEARCH_SUPERCLASSES, - SEARCH_INTERFACES = sun_dyn_MemberName::MN_SEARCH_INTERFACES, + IS_METHOD = java_lang_invoke_MemberName::MN_IS_METHOD, + IS_CONSTRUCTOR = java_lang_invoke_MemberName::MN_IS_CONSTRUCTOR, + IS_FIELD = java_lang_invoke_MemberName::MN_IS_FIELD, + IS_TYPE = java_lang_invoke_MemberName::MN_IS_TYPE, + SEARCH_SUPERCLASSES = java_lang_invoke_MemberName::MN_SEARCH_SUPERCLASSES, + SEARCH_INTERFACES = java_lang_invoke_MemberName::MN_SEARCH_INTERFACES, ALL_KINDS = IS_METHOD | IS_CONSTRUCTOR | IS_FIELD | IS_TYPE, - VM_INDEX_UNINITIALIZED = sun_dyn_MemberName::VM_INDEX_UNINITIALIZED + VM_INDEX_UNINITIALIZED = java_lang_invoke_MemberName::VM_INDEX_UNINITIALIZED }; Handle MethodHandles::new_MemberName(TRAPS) { @@ -405,10 +405,10 @@ if (!do_dispatch || (flags & IS_CONSTRUCTOR) || m->can_be_statically_bound()) vmindex = methodOopDesc::nonvirtual_vtable_index; // implies never any dispatch assert(vmindex != VM_INDEX_UNINITIALIZED, "Java sentinel value"); - sun_dyn_MemberName::set_vmtarget(mname_oop, vmtarget); - sun_dyn_MemberName::set_vmindex(mname_oop, vmindex); - sun_dyn_MemberName::set_flags(mname_oop, flags); - sun_dyn_MemberName::set_clazz(mname_oop, Klass::cast(m->method_holder())->java_mirror()); + java_lang_invoke_MemberName::set_vmtarget(mname_oop, vmtarget); + java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex); + java_lang_invoke_MemberName::set_flags(mname_oop, flags); + java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(m->method_holder())->java_mirror()); } void MethodHandles::init_MemberName(oop mname_oop, klassOop field_holder, AccessFlags mods, int offset) { @@ -416,21 +416,21 @@ oop vmtarget = field_holder; int vmindex = offset; // determines the field uniquely when combined with static bit assert(vmindex != VM_INDEX_UNINITIALIZED, "bad alias on vmindex"); - sun_dyn_MemberName::set_vmtarget(mname_oop, vmtarget); - sun_dyn_MemberName::set_vmindex(mname_oop, vmindex); - sun_dyn_MemberName::set_flags(mname_oop, flags); - sun_dyn_MemberName::set_clazz(mname_oop, Klass::cast(field_holder)->java_mirror()); + java_lang_invoke_MemberName::set_vmtarget(mname_oop, vmtarget); + java_lang_invoke_MemberName::set_vmindex(mname_oop, vmindex); + java_lang_invoke_MemberName::set_flags(mname_oop, flags); + java_lang_invoke_MemberName::set_clazz(mname_oop, Klass::cast(field_holder)->java_mirror()); } methodOop MethodHandles::decode_MemberName(oop mname, klassOop& receiver_limit_result, int& decode_flags_result) { - int flags = sun_dyn_MemberName::flags(mname); + int flags = java_lang_invoke_MemberName::flags(mname); if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0) return NULL; // not invocable - oop vmtarget = sun_dyn_MemberName::vmtarget(mname); - int vmindex = sun_dyn_MemberName::vmindex(mname); + oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname); + int vmindex = java_lang_invoke_MemberName::vmindex(mname); if (vmindex == VM_INDEX_UNINITIALIZED) return NULL; // not resolved methodOop m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result); - oop clazz = sun_dyn_MemberName::clazz(mname); + oop clazz = java_lang_invoke_MemberName::clazz(mname); if (clazz != NULL && java_lang_Class::is_instance(clazz)) { klassOop klass = java_lang_Class::as_klassOop(clazz); if (klass != NULL) receiver_limit_result = klass; @@ -442,8 +442,8 @@ Symbol* MethodHandles::convert_to_signature(oop type_str, bool polymorphic, TRAPS) { - if (java_dyn_MethodType::is_instance(type_str)) { - return java_dyn_MethodType::as_signature(type_str, polymorphic, CHECK_NULL); + if (java_lang_invoke_MethodType::is_instance(type_str)) { + return java_lang_invoke_MethodType::as_signature(type_str, polymorphic, CHECK_NULL); } else if (java_lang_Class::is_instance(type_str)) { return java_lang_Class::as_signature(type_str, false, CHECK_NULL); } else if (java_lang_String::is_instance(type_str)) { @@ -461,7 +461,7 @@ // Resolving it plants a vmtarget/vmindex in it, // which refers dirctly to JVM internals. void MethodHandles::resolve_MemberName(Handle mname, TRAPS) { - assert(sun_dyn_MemberName::is_instance(mname()), ""); + assert(java_lang_invoke_MemberName::is_instance(mname()), ""); #ifdef ASSERT // If this assert throws, renegotiate the sentinel value used by the Java code, // so that it is distinct from any valid vtable index value, and any special @@ -472,12 +472,12 @@ const int sentinel_limit = methodOopDesc::highest_unused_vtable_index_value - sentinel_slop; assert(VM_INDEX_UNINITIALIZED < sentinel_limit, "Java sentinel != JVM sentinels"); #endif - if (sun_dyn_MemberName::vmindex(mname()) != VM_INDEX_UNINITIALIZED) + if (java_lang_invoke_MemberName::vmindex(mname()) != VM_INDEX_UNINITIALIZED) return; // already resolved - oop defc_oop = sun_dyn_MemberName::clazz(mname()); - oop name_str = sun_dyn_MemberName::name(mname()); - oop type_str = sun_dyn_MemberName::type(mname()); - int flags = sun_dyn_MemberName::flags(mname()); + oop defc_oop = java_lang_invoke_MemberName::clazz(mname()); + oop name_str = java_lang_invoke_MemberName::name(mname()); + oop type_str = java_lang_invoke_MemberName::type(mname()); + int flags = java_lang_invoke_MemberName::flags(mname()); if (defc_oop == NULL || name_str == NULL || type_str == NULL) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to resolve"); @@ -510,7 +510,7 @@ // convert the external string or reflective type to an internal signature TempNewSymbol type = convert_to_signature(type_str, polymorphic_signature, CHECK); - if (java_dyn_MethodType::is_instance(type_str) && polymorphic_signature) { + if (java_lang_invoke_MethodType::is_instance(type_str) && polymorphic_signature) { polymorphic_method_type = Handle(THREAD, type_str); //preserve exactly } @@ -557,9 +557,9 @@ vmtarget = result.resolved_klass()->as_klassOop(); } int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS); - sun_dyn_MemberName::set_vmtarget(mname(), vmtarget); - sun_dyn_MemberName::set_vmindex(mname(), vmindex); - sun_dyn_MemberName::set_modifiers(mname(), mods); + java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget); + java_lang_invoke_MemberName::set_vmindex(mname(), vmindex); + java_lang_invoke_MemberName::set_modifiers(mname(), mods); DEBUG_ONLY(int junk; klassOop junk2); assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(), "properly stored for later decoding"); @@ -586,9 +586,9 @@ oop vmtarget = m(); int vmindex = methodOopDesc::nonvirtual_vtable_index; int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS); - sun_dyn_MemberName::set_vmtarget(mname(), vmtarget); - sun_dyn_MemberName::set_vmindex(mname(), vmindex); - sun_dyn_MemberName::set_modifiers(mname(), mods); + java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget); + java_lang_invoke_MemberName::set_vmindex(mname(), vmindex); + java_lang_invoke_MemberName::set_modifiers(mname(), mods); DEBUG_ONLY(int junk; klassOop junk2); assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(), "properly stored for later decoding"); @@ -605,9 +605,9 @@ int vmindex = fd.offset(); int mods = (fd.access_flags().as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS); if (vmindex == VM_INDEX_UNINITIALIZED) break; // should not happen - sun_dyn_MemberName::set_vmtarget(mname(), vmtarget); - sun_dyn_MemberName::set_vmindex(mname(), vmindex); - sun_dyn_MemberName::set_modifiers(mname(), mods); + java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget); + java_lang_invoke_MemberName::set_vmindex(mname(), vmindex); + java_lang_invoke_MemberName::set_modifiers(mname(), mods); return; } default: @@ -618,11 +618,11 @@ if (polymorphic_method_type.not_null()) { // Look on a non-null class loader. Handle cur_class_loader; - const int nptypes = java_dyn_MethodType::ptype_count(polymorphic_method_type()); + const int nptypes = java_lang_invoke_MethodType::ptype_count(polymorphic_method_type()); for (int i = 0; i <= nptypes; i++) { oop type_mirror; - if (i < nptypes) type_mirror = java_dyn_MethodType::ptype(polymorphic_method_type(), i); - else type_mirror = java_dyn_MethodType::rtype(polymorphic_method_type()); + if (i < nptypes) type_mirror = java_lang_invoke_MethodType::ptype(polymorphic_method_type(), i); + else type_mirror = java_lang_invoke_MethodType::rtype(polymorphic_method_type()); klassOop example_type = java_lang_Class::as_klassOop(type_mirror); if (example_type == NULL) continue; oop class_loader = Klass::cast(example_type)->class_loader(); @@ -639,9 +639,9 @@ } if (m != NULL) { int mods = (m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS); - sun_dyn_MemberName::set_vmtarget(mname(), m); - sun_dyn_MemberName::set_vmindex(mname(), m->vtable_index()); - sun_dyn_MemberName::set_modifiers(mname(), mods); + java_lang_invoke_MemberName::set_vmtarget(mname(), m); + java_lang_invoke_MemberName::set_vmindex(mname(), m->vtable_index()); + java_lang_invoke_MemberName::set_modifiers(mname(), mods); return; } } @@ -653,17 +653,17 @@ // Resolving it plants a vmtarget/vmindex in it, // which refers directly to JVM internals. void MethodHandles::expand_MemberName(Handle mname, int suppress, TRAPS) { - assert(sun_dyn_MemberName::is_instance(mname()), ""); - oop vmtarget = sun_dyn_MemberName::vmtarget(mname()); - int vmindex = sun_dyn_MemberName::vmindex(mname()); + assert(java_lang_invoke_MemberName::is_instance(mname()), ""); + oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname()); + int vmindex = java_lang_invoke_MemberName::vmindex(mname()); if (vmtarget == NULL || vmindex == VM_INDEX_UNINITIALIZED) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to expand"); } - bool have_defc = (sun_dyn_MemberName::clazz(mname()) != NULL); - bool have_name = (sun_dyn_MemberName::name(mname()) != NULL); - bool have_type = (sun_dyn_MemberName::type(mname()) != NULL); - int flags = sun_dyn_MemberName::flags(mname()); + bool have_defc = (java_lang_invoke_MemberName::clazz(mname()) != NULL); + bool have_name = (java_lang_invoke_MemberName::name(mname()) != NULL); + bool have_type = (java_lang_invoke_MemberName::type(mname()) != NULL); + int flags = java_lang_invoke_MemberName::flags(mname()); if (suppress != 0) { if (suppress & _suppress_defc) have_defc = true; @@ -687,16 +687,16 @@ if (receiver_limit != NULL && receiver_limit != defc && Klass::cast(receiver_limit)->is_subtype_of(defc)) defc = receiver_limit; - sun_dyn_MemberName::set_clazz(mname(), Klass::cast(defc)->java_mirror()); + java_lang_invoke_MemberName::set_clazz(mname(), Klass::cast(defc)->java_mirror()); } if (!have_name) { //not java_lang_String::create_from_symbol; let's intern member names Handle name = StringTable::intern(m->name(), CHECK); - sun_dyn_MemberName::set_name(mname(), name()); + java_lang_invoke_MemberName::set_name(mname(), name()); } if (!have_type) { Handle type = java_lang_String::create_from_symbol(m->signature(), CHECK); - sun_dyn_MemberName::set_type(mname(), type()); + java_lang_invoke_MemberName::set_type(mname(), type()); } return; } @@ -711,16 +711,16 @@ if (!defc->find_field_from_offset(vmindex, is_static, &fd)) break; // cannot expand if (!have_defc) { - sun_dyn_MemberName::set_clazz(mname(), defc->java_mirror()); + java_lang_invoke_MemberName::set_clazz(mname(), defc->java_mirror()); } if (!have_name) { //not java_lang_String::create_from_symbol; let's intern member names Handle name = StringTable::intern(fd.name(), CHECK); - sun_dyn_MemberName::set_name(mname(), name()); + java_lang_invoke_MemberName::set_name(mname(), name()); } if (!have_type) { Handle type = java_lang_String::create_from_symbol(fd.signature(), CHECK); - sun_dyn_MemberName::set_type(mname(), type()); + java_lang_invoke_MemberName::set_type(mname(), type()); } return; } @@ -775,7 +775,7 @@ --rskip; } else if (rfill < rlimit) { oop result = results->obj_at(rfill++); - if (!sun_dyn_MemberName::is_instance(result)) + if (!java_lang_invoke_MemberName::is_instance(result)) return -99; // caller bug! MethodHandles::init_MemberName(result, st.klass()->as_klassOop(), st.access_flags(), st.offset()); } else if (++overflow >= overflow_limit) { @@ -823,7 +823,7 @@ --rskip; } else if (rfill < rlimit) { oop result = results->obj_at(rfill++); - if (!sun_dyn_MemberName::is_instance(result)) + if (!java_lang_invoke_MemberName::is_instance(result)) return -99; // caller bug! MethodHandles::init_MemberName(result, m, true); } else if (++overflow >= overflow_limit) { @@ -857,9 +857,9 @@ // Sanitize out methodOops, klassOops, and any other non-Java data. // This is for debugging and reflection. oop MethodHandles::encode_target(Handle mh, int format, TRAPS) { - assert(java_dyn_MethodHandle::is_instance(mh()), "must be a MH"); + assert(java_lang_invoke_MethodHandle::is_instance(mh()), "must be a MH"); if (format == ETF_HANDLE_OR_METHOD_NAME) { - oop target = java_dyn_MethodHandle::vmtarget(mh()); + oop target = java_lang_invoke_MethodHandle::vmtarget(mh()); if (target == NULL) { return NULL; // unformed MH } @@ -874,10 +874,10 @@ if (target->klass() == SystemDictionary::DirectMethodHandle_klass()) { return target; } - if (!java_dyn_MethodHandle::is_instance(target)){ + if (!java_lang_invoke_MethodHandle::is_instance(target)){ return NULL; // unformed MH } - target = java_dyn_MethodHandle::vmtarget(target); + target = java_lang_invoke_MethodHandle::vmtarget(target); } } // cases of metadata in MH.vmtarget: @@ -904,7 +904,7 @@ instanceKlassHandle mname_klass(THREAD, SystemDictionary::MemberName_klass()); mname_klass->initialize(CHECK_NULL); Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL); - sun_dyn_MemberName::set_vmindex(mname(), VM_INDEX_UNINITIALIZED); + java_lang_invoke_MemberName::set_vmindex(mname(), VM_INDEX_UNINITIALIZED); bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0); init_MemberName(mname(), m, do_dispatch); expand_MemberName(mname, 0, CHECK_NULL); @@ -923,10 +923,12 @@ "java/lang/Null", //"java/lang/Nothing", "sun/dyn/empty/Empty", + "sun/invoke/empty/Empty", NULL }; static bool is_always_null_type(klassOop klass) { + if (klass == NULL) return false; // safety if (!Klass::cast(klass)->oop_is_instance()) return false; instanceKlass* ik = instanceKlass::cast(klass); // Must be on the boot class path: @@ -943,6 +945,8 @@ } bool MethodHandles::class_cast_needed(klassOop src, klassOop dst) { + if (dst == NULL) return true; + if (src == NULL) return (dst != SystemDictionary::Object_klass()); if (src == dst || dst == SystemDictionary::Object_klass()) return false; // quickest checks Klass* srck = Klass::cast(src); @@ -1025,10 +1029,15 @@ int first_ptype_pos, KlassHandle insert_ptype, TRAPS) { - objArrayHandle ptypes(THREAD, java_dyn_MethodType::ptypes(mtype())); + Handle mhi_type; + if (m->is_method_handle_invoke()) { + // use this more exact typing instead of the symbolic signature: + mhi_type = Handle(THREAD, m->method_handle_type()); + } + objArrayHandle ptypes(THREAD, java_lang_invoke_MethodType::ptypes(mtype())); int pnum = first_ptype_pos; int pmax = ptypes->length(); - int mnum = 0; // method argument + int anum = 0; // method argument const char* err = NULL; ResourceMark rm(THREAD); for (SignatureStream ss(m->signature()); !ss.is_done(); ss.next()) { @@ -1036,7 +1045,7 @@ if (ss.at_return_type()) { if (pnum != pmax) { err = "too many arguments"; break; } - ptype_oop = java_dyn_MethodType::rtype(mtype()); + ptype_oop = java_lang_invoke_MethodType::rtype(mtype()); } else { if (pnum >= pmax) { err = "not enough arguments"; break; } @@ -1047,47 +1056,70 @@ else ptype_oop = insert_ptype->java_mirror(); pnum += 1; - mnum += 1; + anum += 1; } - klassOop pklass = NULL; - BasicType ptype = T_OBJECT; - if (ptype_oop != NULL) - ptype = java_lang_Class::as_BasicType(ptype_oop, &pklass); - else - // null does not match any non-reference; use Object to report the error - pklass = SystemDictionary::Object_klass(); - klassOop mklass = NULL; - BasicType mtype = ss.type(); - if (mtype == T_ARRAY) mtype = T_OBJECT; // fold all refs to T_OBJECT - if (mtype == T_OBJECT) { - if (ptype_oop == NULL) { + KlassHandle pklass; + BasicType ptype = T_OBJECT; + bool have_ptype = false; + // missing ptype_oop does not match any non-reference; use Object to report the error + pklass = SystemDictionaryHandles::Object_klass(); + if (ptype_oop != NULL) { + have_ptype = true; + klassOop pklass_oop = NULL; + ptype = java_lang_Class::as_BasicType(ptype_oop, &pklass_oop); + pklass = KlassHandle(THREAD, pklass_oop); + } + ptype_oop = NULL; //done with this + KlassHandle aklass; + BasicType atype = ss.type(); + if (atype == T_ARRAY) atype = T_OBJECT; // fold all refs to T_OBJECT + if (atype == T_OBJECT) { + if (!have_ptype) { // null matches any reference continue; } - KlassHandle pklass_handle(THREAD, pklass); pklass = NULL; - // If we fail to resolve types at this point, we will throw an error. - Symbol* name = ss.as_symbol(CHECK); - instanceKlass* mk = instanceKlass::cast(m->method_holder()); - Handle loader(THREAD, mk->class_loader()); - Handle domain(THREAD, mk->protection_domain()); - mklass = SystemDictionary::resolve_or_null(name, loader, domain, CHECK); - pklass = pklass_handle(); - if (mklass == NULL && pklass != NULL && - Klass::cast(pklass)->name() == name && - m->is_method_handle_invoke()) { - // Assume a match. We can't really decode the signature of MH.invoke*. - continue; + if (mhi_type.is_null()) { + // If we fail to resolve types at this point, we will usually throw an error. + TempNewSymbol name = ss.as_symbol_or_null(); + if (name != NULL) { + instanceKlass* mk = instanceKlass::cast(m->method_holder()); + Handle loader(THREAD, mk->class_loader()); + Handle domain(THREAD, mk->protection_domain()); + klassOop aklass_oop = SystemDictionary::resolve_or_null(name, loader, domain, CHECK); + if (aklass_oop != NULL) + aklass = KlassHandle(THREAD, aklass_oop); + } + } else { + // for method handle invokers we don't look at the name in the signature + oop atype_oop; + if (ss.at_return_type()) + atype_oop = java_lang_invoke_MethodType::rtype(mhi_type()); + else + atype_oop = java_lang_invoke_MethodType::ptype(mhi_type(), anum-1); + klassOop aklass_oop = NULL; + atype = java_lang_Class::as_BasicType(atype_oop, &aklass_oop); + aklass = KlassHandle(THREAD, aklass_oop); } } if (!ss.at_return_type()) { - err = check_argument_type_change(ptype, pklass, mtype, mklass, mnum); + err = check_argument_type_change(ptype, pklass(), atype, aklass(), anum); } else { - err = check_return_type_change(mtype, mklass, ptype, pklass); // note reversal! + err = check_return_type_change(atype, aklass(), ptype, pklass()); // note reversal! } if (err != NULL) break; } if (err != NULL) { +#ifndef PRODUCT + if (PrintMiscellaneous && (Verbose || WizardMode)) { + tty->print("*** verify_method_signature failed: "); + java_lang_invoke_MethodType::print_signature(mtype(), tty); + tty->cr(); + tty->print_cr(" first_ptype_pos = %d, insert_ptype = "UINTX_FORMAT, first_ptype_pos, insert_ptype()); + tty->print(" Failing method: "); + m->print(); + } +#endif //PRODUCT THROW_MSG(vmSymbols::java_lang_InternalError(), err); } } @@ -1111,7 +1143,7 @@ } if (m_needs_receiver && err == NULL) { - objArrayOop ptypes = java_dyn_MethodType::ptypes(mtype()); + objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(mtype()); if (ptypes->length() < first_ptype_pos) { err = "receiver argument is missing"; goto die; } if (has_bound_recv) @@ -1131,15 +1163,15 @@ void MethodHandles::verify_vmslots(Handle mh, TRAPS) { // Verify vmslots. - int check_slots = argument_slot_count(java_dyn_MethodHandle::type(mh())); - if (java_dyn_MethodHandle::vmslots(mh()) != check_slots) { + int check_slots = argument_slot_count(java_lang_invoke_MethodHandle::type(mh())); + if (java_lang_invoke_MethodHandle::vmslots(mh()) != check_slots) { THROW_MSG(vmSymbols::java_lang_InternalError(), "bad vmslots in BMH"); } } void MethodHandles::verify_vmargslot(Handle mh, int argnum, int argslot, TRAPS) { // Verify that argslot points at the given argnum. - int check_slot = argument_slot(java_dyn_MethodHandle::type(mh()), argnum); + int check_slot = argument_slot(java_lang_invoke_MethodHandle::type(mh()), argnum); if (argslot != check_slot || argslot < 0) { const char* fmt = "for argnum of %d, vmargslot is %d, should be %d"; size_t msglen = strlen(fmt) + 3*11 + 1; @@ -1160,8 +1192,8 @@ int delete_argnum, oop dst_mtype, int dst_beg, int dst_end, bool raw) { - objArrayOop src_ptypes = java_dyn_MethodType::ptypes(src_mtype); - objArrayOop dst_ptypes = java_dyn_MethodType::ptypes(dst_mtype); + objArrayOop src_ptypes = java_lang_invoke_MethodType::ptypes(src_mtype); + objArrayOop dst_ptypes = java_lang_invoke_MethodType::ptypes(dst_mtype); int src_max = src_ptypes->length(); int dst_max = dst_ptypes->length(); @@ -1224,8 +1256,8 @@ } // Now compare return types also. - oop src_rtype = java_dyn_MethodType::rtype(src_mtype); - oop dst_rtype = java_dyn_MethodType::rtype(dst_mtype); + oop src_rtype = java_lang_invoke_MethodType::rtype(src_mtype); + oop dst_rtype = java_lang_invoke_MethodType::rtype(dst_mtype); if (src_rtype != dst_rtype) { err = check_return_type_change(dst_rtype, src_rtype, raw); // note reversal! if (err != NULL) return err; @@ -1287,10 +1319,12 @@ // format, format, format const char* src_name = type2name(src_type); const char* dst_name = type2name(dst_type); - if (src_type == T_OBJECT) src_name = Klass::cast(src_klass)->external_name(); - if (dst_type == T_OBJECT) dst_name = Klass::cast(dst_klass)->external_name(); if (src_name == NULL) src_name = "unknown type"; if (dst_name == NULL) dst_name = "unknown type"; + if (src_type == T_OBJECT) + src_name = (src_klass != NULL) ? Klass::cast(src_klass)->external_name() : "an unresolved class"; + if (dst_type == T_OBJECT) + dst_name = (dst_klass != NULL) ? Klass::cast(dst_klass)->external_name() : "an unresolved class"; size_t msglen = strlen(err) + strlen(src_name) + strlen(dst_name) + (argnum < 10 ? 1 : 11); char* msg = NEW_RESOURCE_ARRAY(char, msglen + 1); @@ -1314,7 +1348,7 @@ // then return a negative number. Otherwise, the result // is in the range [0..vmslots] inclusive. int MethodHandles::argument_slot(oop method_type, int arg) { - objArrayOop ptypes = java_dyn_MethodType::ptypes(method_type); + objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(method_type); int argslot = 0; int len = ptypes->length(); if (arg < -1 || arg >= len) return -99; @@ -1328,7 +1362,7 @@ // Given a slot number, return the argument number. int MethodHandles::argument_slot_to_argnum(oop method_type, int query_argslot) { - objArrayOop ptypes = java_dyn_MethodType::ptypes(method_type); + objArrayOop ptypes = java_lang_invoke_MethodType::ptypes(method_type); int argslot = 0; int len = ptypes->length(); for (int i = len-1; i >= 0; i--) { @@ -1394,11 +1428,11 @@ void MethodHandles::verify_DirectMethodHandle(Handle mh, methodHandle m, TRAPS) { // Verify type. - Handle mtype(THREAD, java_dyn_MethodHandle::type(mh())); + Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(mh())); verify_method_type(m, mtype, false, KlassHandle(), CHECK); // Verify vmslots. - if (java_dyn_MethodHandle::vmslots(mh()) != m->size_of_parameters()) { + if (java_lang_invoke_MethodHandle::vmslots(mh()) != m->size_of_parameters()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "bad vmslots in DMH"); } } @@ -1410,7 +1444,7 @@ THROW(vmSymbols::java_lang_InternalError()); } - java_dyn_MethodHandle::init_vmslots(mh()); + java_lang_invoke_MethodHandle::init_vmslots(mh()); if (VerifyMethodHandles) { // The privileged code which invokes this routine should not make @@ -1470,8 +1504,8 @@ if (me == NULL) { THROW(vmSymbols::java_lang_InternalError()); } - sun_dyn_DirectMethodHandle::set_vmtarget(mh(), vmtarget); - sun_dyn_DirectMethodHandle::set_vmindex(mh(), vmindex); + java_lang_invoke_DirectMethodHandle::set_vmtarget(mh(), vmtarget); + java_lang_invoke_DirectMethodHandle::set_vmindex(mh(), vmindex); DEBUG_ONLY(int flags; klassOop rlimit); assert(MethodHandles::decode_method(mh(), rlimit, flags) == m(), "properly stored for later decoding"); @@ -1482,15 +1516,15 @@ assert(decode_MethodHandle_stack_pushes(mh()) == 0, "DMH does not move stack"); // Done! - java_dyn_MethodHandle::set_vmentry(mh(), me); + java_lang_invoke_MethodHandle::set_vmentry(mh(), me); } void MethodHandles::verify_BoundMethodHandle_with_receiver(Handle mh, methodHandle m, TRAPS) { // Verify type. - oop receiver = sun_dyn_BoundMethodHandle::argument(mh()); - Handle mtype(THREAD, java_dyn_MethodHandle::type(mh())); + oop receiver = java_lang_invoke_BoundMethodHandle::argument(mh()); + Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(mh())); KlassHandle bound_recv_type; if (receiver != NULL) bound_recv_type = KlassHandle(THREAD, receiver->klass()); verify_method_type(m, mtype, true, bound_recv_type, CHECK); @@ -1498,11 +1532,11 @@ int receiver_pos = m->size_of_parameters() - 1; // Verify MH.vmargslot, which should point at the bound receiver. - verify_vmargslot(mh, -1, sun_dyn_BoundMethodHandle::vmargslot(mh()), CHECK); + verify_vmargslot(mh, -1, java_lang_invoke_BoundMethodHandle::vmargslot(mh()), CHECK); //verify_vmslots(mh, CHECK); // Verify vmslots. - if (java_dyn_MethodHandle::vmslots(mh()) != receiver_pos) { + if (java_lang_invoke_MethodHandle::vmslots(mh()) != receiver_pos) { THROW_MSG(vmSymbols::java_lang_InternalError(), "bad vmslots in BMH (receiver)"); } } @@ -1520,7 +1554,7 @@ KlassHandle receiver_klass; { - oop receiver_oop = sun_dyn_BoundMethodHandle::argument(mh()); + oop receiver_oop = java_lang_invoke_BoundMethodHandle::argument(mh()); if (receiver_oop != NULL) receiver_klass = KlassHandle(THREAD, receiver_oop->klass()); } @@ -1531,31 +1565,31 @@ if (m.is_null()) { THROW(vmSymbols::java_lang_InternalError()); } if (m->is_abstract()) { THROW(vmSymbols::java_lang_AbstractMethodError()); } - java_dyn_MethodHandle::init_vmslots(mh()); + java_lang_invoke_MethodHandle::init_vmslots(mh()); if (VerifyMethodHandles) { verify_BoundMethodHandle_with_receiver(mh, m, CHECK); } - sun_dyn_BoundMethodHandle::set_vmtarget(mh(), m()); + java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), m()); DEBUG_ONLY(int junk; klassOop junk2); assert(MethodHandles::decode_method(mh(), junk2, junk) == m(), "properly stored for later decoding"); assert(decode_MethodHandle_stack_pushes(mh()) == 1, "BMH pushes one stack slot"); // Done! - java_dyn_MethodHandle::set_vmentry(mh(), MethodHandles::entry(MethodHandles::_bound_ref_direct_mh)); + java_lang_invoke_MethodHandle::set_vmentry(mh(), MethodHandles::entry(MethodHandles::_bound_ref_direct_mh)); } void MethodHandles::verify_BoundMethodHandle(Handle mh, Handle target, int argnum, bool direct_to_method, TRAPS) { Handle ptype_handle(THREAD, - java_dyn_MethodType::ptype(java_dyn_MethodHandle::type(target()), argnum)); + java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum)); KlassHandle ptype_klass; BasicType ptype = java_lang_Class::as_BasicType(ptype_handle(), &ptype_klass); int slots_pushed = type2size[ptype]; - oop argument = sun_dyn_BoundMethodHandle::argument(mh()); + oop argument = java_lang_invoke_BoundMethodHandle::argument(mh()); const char* err = NULL; @@ -1623,9 +1657,9 @@ if (err == NULL) { // Verify the rest of the method type. - err = check_method_type_insertion(java_dyn_MethodHandle::type(mh()), + err = check_method_type_insertion(java_lang_invoke_MethodHandle::type(mh()), argnum, ptype_handle(), - java_dyn_MethodHandle::type(target())); + java_lang_invoke_MethodHandle::type(target())); } if (err != NULL) { @@ -1635,20 +1669,20 @@ void MethodHandles::init_BoundMethodHandle(Handle mh, Handle target, int argnum, TRAPS) { // Check arguments. - if (mh.is_null() || target.is_null() || !java_dyn_MethodHandle::is_instance(target())) { + if (mh.is_null() || target.is_null() || !java_lang_invoke_MethodHandle::is_instance(target())) { THROW(vmSymbols::java_lang_InternalError()); } - java_dyn_MethodHandle::init_vmslots(mh()); + java_lang_invoke_MethodHandle::init_vmslots(mh()); if (VerifyMethodHandles) { int insert_after = argnum - 1; - verify_vmargslot(mh, insert_after, sun_dyn_BoundMethodHandle::vmargslot(mh()), CHECK); + verify_vmargslot(mh, insert_after, java_lang_invoke_BoundMethodHandle::vmargslot(mh()), CHECK); verify_vmslots(mh, CHECK); } // Get bound type and required slots. - oop ptype_oop = java_dyn_MethodType::ptype(java_dyn_MethodHandle::type(target()), argnum); + oop ptype_oop = java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum); BasicType ptype = java_lang_Class::as_BasicType(ptype_oop); int slots_pushed = type2size[ptype]; @@ -1659,12 +1693,12 @@ bool direct_to_method = false; if (OptimizeMethodHandles && target->klass() == SystemDictionary::DirectMethodHandle_klass() && - (argnum == 0 || sun_dyn_DirectMethodHandle::vmindex(target()) < 0)) { + (argnum == 0 || java_lang_invoke_DirectMethodHandle::vmindex(target()) < 0)) { int decode_flags = 0; klassOop receiver_limit_oop = NULL; methodHandle m(THREAD, decode_method(target(), receiver_limit_oop, decode_flags)); if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "DMH failed to decode"); } DEBUG_ONLY(int m_vmslots = m->size_of_parameters() - slots_pushed); // pos. of 1st arg. - assert(sun_dyn_BoundMethodHandle::vmslots(mh()) == m_vmslots, "type w/ m sig"); + assert(java_lang_invoke_BoundMethodHandle::vmslots(mh()) == m_vmslots, "type w/ m sig"); if (argnum == 0 && (decode_flags & _dmf_has_receiver) != 0) { KlassHandle receiver_limit(THREAD, receiver_limit_oop); init_BoundMethodHandle_with_receiver(mh, m, @@ -1677,11 +1711,11 @@ // to bind another argument and still invoke the methodOop directly. if (!(decode_flags & _dmf_does_dispatch)) { direct_to_method = true; - sun_dyn_BoundMethodHandle::set_vmtarget(mh(), m()); + java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), m()); } } if (!direct_to_method) - sun_dyn_BoundMethodHandle::set_vmtarget(mh(), target()); + java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), target()); if (VerifyMethodHandles) { verify_BoundMethodHandle(mh, target, argnum, direct_to_method, CHECK); @@ -1703,7 +1737,7 @@ } // Done! - java_dyn_MethodHandle::set_vmentry(mh(), me); + java_lang_invoke_MethodHandle::set_vmentry(mh(), me); } static void throw_InternalError_for_bad_conversion(int conversion, const char* err, TRAPS) { @@ -1713,8 +1747,8 @@ } void MethodHandles::verify_AdapterMethodHandle(Handle mh, int argnum, TRAPS) { - jint conversion = sun_dyn_AdapterMethodHandle::conversion(mh()); - int argslot = sun_dyn_AdapterMethodHandle::vmargslot(mh()); + jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh()); + int argslot = java_lang_invoke_AdapterMethodHandle::vmargslot(mh()); verify_vmargslot(mh, argnum, argslot, CHECK); verify_vmslots(mh, CHECK); @@ -1731,10 +1765,10 @@ BasicType dest = adapter_conversion_dest_type(conversion); int vminfo = adapter_conversion_vminfo(conversion); // should be zero - Handle argument(THREAD, sun_dyn_AdapterMethodHandle::argument(mh())); - Handle target(THREAD, sun_dyn_AdapterMethodHandle::vmtarget(mh())); - Handle src_mtype(THREAD, java_dyn_MethodHandle::type(mh())); - Handle dst_mtype(THREAD, java_dyn_MethodHandle::type(target())); + Handle argument(THREAD, java_lang_invoke_AdapterMethodHandle::argument(mh())); + Handle target(THREAD, java_lang_invoke_AdapterMethodHandle::vmtarget(mh())); + Handle src_mtype(THREAD, java_lang_invoke_MethodHandle::type(mh())); + Handle dst_mtype(THREAD, java_lang_invoke_MethodHandle::type(target())); const char* err = NULL; @@ -1760,7 +1794,7 @@ break; case _adapter_flyby: case _adapter_ricochet: - if (!java_dyn_MethodHandle::is_instance(argument())) + if (!java_lang_invoke_MethodHandle::is_instance(argument())) { err = "MethodHandle adapter argument required"; break; } break; default: @@ -1806,9 +1840,9 @@ err = "adapter requires src/dest conversion subfields for swap"; break; } int swap_size = type2size[src]; - oop src_mtype = sun_dyn_AdapterMethodHandle::type(mh()); - oop dest_mtype = sun_dyn_AdapterMethodHandle::type(target()); - int slot_limit = sun_dyn_AdapterMethodHandle::vmslots(target()); + oop src_mtype = java_lang_invoke_AdapterMethodHandle::type(mh()); + oop dest_mtype = java_lang_invoke_AdapterMethodHandle::type(target()); + int slot_limit = java_lang_invoke_AdapterMethodHandle::vmslots(target()); int src_slot = argslot; int dest_slot = vminfo; bool rotate_up = (src_slot > dest_slot); // upward rotation @@ -1821,8 +1855,8 @@ } else if (ek == _adapter_swap_args && !(src_slot > dest_slot)) { err = "source of swap must be deeper in stack"; } else if (ek == _adapter_swap_args) { - err = check_argument_type_change(java_dyn_MethodType::ptype(src_mtype, dest_arg), - java_dyn_MethodType::ptype(dest_mtype, src_arg), + err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, dest_arg), + java_lang_invoke_MethodType::ptype(dest_mtype, src_arg), dest_arg); } else if (ek == _adapter_rot_args) { if (rotate_up) { @@ -1830,8 +1864,8 @@ // rotate up: [dest_slot..src_slot-ss] --> [dest_slot+ss..src_slot] // that is: [src_arg+1..dest_arg] --> [src_arg..dest_arg-1] for (int i = src_arg+1; i <= dest_arg && err == NULL; i++) { - err = check_argument_type_change(java_dyn_MethodType::ptype(src_mtype, i), - java_dyn_MethodType::ptype(dest_mtype, i-1), + err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, i), + java_lang_invoke_MethodType::ptype(dest_mtype, i-1), i); } } else { // rotate down @@ -1839,15 +1873,15 @@ // rotate down: [src_slot+ss..dest_slot] --> [src_slot..dest_slot-ss] // that is: [dest_arg..src_arg-1] --> [dst_arg+1..src_arg] for (int i = dest_arg; i <= src_arg-1 && err == NULL; i++) { - err = check_argument_type_change(java_dyn_MethodType::ptype(src_mtype, i), - java_dyn_MethodType::ptype(dest_mtype, i+1), + err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, i), + java_lang_invoke_MethodType::ptype(dest_mtype, i+1), i); } } } if (err == NULL) - err = check_argument_type_change(java_dyn_MethodType::ptype(src_mtype, src_arg), - java_dyn_MethodType::ptype(dest_mtype, dest_arg), + err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype, src_arg), + java_lang_invoke_MethodType::ptype(dest_mtype, dest_arg), src_arg); } break; @@ -1918,8 +1952,8 @@ if (err == NULL) { // Make sure this adapter does not push too deeply. int slots_pushed = stack_move / stack_move_unit(); - int this_vmslots = java_dyn_MethodHandle::vmslots(mh()); - int target_vmslots = java_dyn_MethodHandle::vmslots(target()); + int this_vmslots = java_lang_invoke_MethodHandle::vmslots(mh()); + int target_vmslots = java_lang_invoke_MethodHandle::vmslots(target()); if (slots_pushed != (target_vmslots - this_vmslots)) { err = "stack_move inconsistent with previous and current MethodType vmslots"; } else if (slots_pushed > 0) { @@ -1961,7 +1995,7 @@ case _adapter_check_cast: { // The actual value being checked must be a reference: - err = check_argument_type_change(java_dyn_MethodType::ptype(src_mtype(), argnum), + err = check_argument_type_change(java_lang_invoke_MethodType::ptype(src_mtype(), argnum), object_java_mirror(), argnum); if (err != NULL) break; @@ -1985,9 +2019,9 @@ } void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS) { - oop argument = sun_dyn_AdapterMethodHandle::argument(mh()); - int argslot = sun_dyn_AdapterMethodHandle::vmargslot(mh()); - jint conversion = sun_dyn_AdapterMethodHandle::conversion(mh()); + oop argument = java_lang_invoke_AdapterMethodHandle::argument(mh()); + int argslot = java_lang_invoke_AdapterMethodHandle::vmargslot(mh()); + jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh()); jint conv_op = adapter_conversion_op(conversion); // adjust the adapter code to the internal EntryKind enumeration: @@ -1995,11 +2029,11 @@ EntryKind ek_opt = ek_orig; // may be optimized // Finalize the vmtarget field (Java initialized it to null). - if (!java_dyn_MethodHandle::is_instance(target())) { + if (!java_lang_invoke_MethodHandle::is_instance(target())) { throw_InternalError_for_bad_conversion(conversion, "bad target", THREAD); return; } - sun_dyn_AdapterMethodHandle::set_vmtarget(mh(), target()); + java_lang_invoke_AdapterMethodHandle::set_vmtarget(mh(), target()); if (VerifyMethodHandles) { verify_AdapterMethodHandle(mh, argnum, CHECK); @@ -2083,7 +2117,7 @@ case _adapter_rot_args: { int swap_slots = type2size[src]; - int slot_limit = sun_dyn_AdapterMethodHandle::vmslots(mh()); + int slot_limit = java_lang_invoke_AdapterMethodHandle::vmslots(mh()); int src_slot = argslot; int dest_slot = vminfo; int rotate = (ek_orig == _adapter_swap_args) ? 0 : (src_slot > dest_slot) ? 1 : -1; @@ -2147,10 +2181,10 @@ jint new_conversion = adapter_conversion(conv_op, src, dest, stack_move, vminfo); // Finalize the conversion field. (Note that it is final to Java code.) - sun_dyn_AdapterMethodHandle::set_conversion(mh(), new_conversion); + java_lang_invoke_AdapterMethodHandle::set_conversion(mh(), new_conversion); // Done! - java_dyn_MethodHandle::set_vmentry(mh(), entry(ek_opt)); + java_lang_invoke_MethodHandle::set_vmentry(mh(), entry(ek_opt)); // There should be enough memory barriers on exit from native methods // to ensure that the MH is fully initialized to all threads before @@ -2158,7 +2192,7 @@ } // -// Here are the native methods on sun.dyn.MethodHandleImpl. +// Here are the native methods on sun.invoke.MethodHandleImpl. // They are the private interface between this JVM and the HotSpot-specific // Java code that implements JSR 292 method handles. // @@ -2168,7 +2202,7 @@ // direct method handles for invokestatic or invokespecial // void init(DirectMethodHandle self, MemberName ref, boolean doDispatch, Class<?> caller); -JVM_ENTRY(void, MHI_init_DMH(JNIEnv *env, jobject igcls, jobject mh_jh, +JVM_ENTRY(void, MHN_init_DMH(JNIEnv *env, jobject igcls, jobject mh_jh, jobject target_jh, jboolean do_dispatch, jobject caller_jh)) { ResourceMark rm; // for error messages @@ -2177,13 +2211,13 @@ Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh)); // Early returns out of this method leave the DMH in an unfinished state. - assert(java_dyn_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); + assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); // which method are we really talking about? if (target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } oop target_oop = JNIHandles::resolve_non_null(target_jh); - if (sun_dyn_MemberName::is_instance(target_oop) && - sun_dyn_MemberName::vmindex(target_oop) == VM_INDEX_UNINITIALIZED) { + if (java_lang_invoke_MemberName::is_instance(target_oop) && + java_lang_invoke_MemberName::vmindex(target_oop) == VM_INDEX_UNINITIALIZED) { Handle mname(THREAD, target_oop); MethodHandles::resolve_MemberName(mname, CHECK); target_oop = mname(); // in case of GC @@ -2232,7 +2266,7 @@ JVM_END // bound method handles -JVM_ENTRY(void, MHI_init_BMH(JNIEnv *env, jobject igcls, jobject mh_jh, +JVM_ENTRY(void, MHN_init_BMH(JNIEnv *env, jobject igcls, jobject mh_jh, jobject target_jh, int argnum)) { ResourceMark rm; // for error messages @@ -2241,12 +2275,12 @@ Handle mh(THREAD, JNIHandles::resolve_non_null(mh_jh)); // Early returns out of this method leave the BMH in an unfinished state. - assert(java_dyn_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); + assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); if (target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); - if (!java_dyn_MethodHandle::is_instance(target())) { + if (!java_lang_invoke_MethodHandle::is_instance(target())) { // Target object is a reflective method. (%%% Do we need this alternate path?) Untested("init_BMH of non-MH"); if (argnum != 0) { THROW(vmSymbols::java_lang_InternalError()); } @@ -2269,7 +2303,7 @@ JVM_END // adapter method handles -JVM_ENTRY(void, MHI_init_AMH(JNIEnv *env, jobject igcls, jobject mh_jh, +JVM_ENTRY(void, MHN_init_AMH(JNIEnv *env, jobject igcls, jobject mh_jh, jobject target_jh, int argnum)) { // This is the guy we are initializing: if (mh_jh == NULL || target_jh == NULL) { @@ -2279,14 +2313,14 @@ Handle target(THREAD, JNIHandles::resolve_non_null(target_jh)); // Early returns out of this method leave the AMH in an unfinished state. - assert(java_dyn_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); + assert(java_lang_invoke_MethodHandle::vmentry(mh()) == NULL, "must be safely null"); MethodHandles::init_AdapterMethodHandle(mh, target, argnum, CHECK); } JVM_END // method type forms -JVM_ENTRY(void, MHI_init_MT(JNIEnv *env, jobject igcls, jobject erased_jh)) { +JVM_ENTRY(void, MHN_init_MT(JNIEnv *env, jobject igcls, jobject erased_jh)) { if (erased_jh == NULL) return; if (TraceMethodHandles) { tty->print("creating MethodType form "); @@ -2307,9 +2341,9 @@ JVM_END // debugging and reflection -JVM_ENTRY(jobject, MHI_getTarget(JNIEnv *env, jobject igcls, jobject mh_jh, jint format)) { +JVM_ENTRY(jobject, MHN_getTarget(JNIEnv *env, jobject igcls, jobject mh_jh, jint format)) { Handle mh(THREAD, JNIHandles::resolve(mh_jh)); - if (!java_dyn_MethodHandle::is_instance(mh())) { + if (!java_lang_invoke_MethodHandle::is_instance(mh())) { THROW_NULL(vmSymbols::java_lang_IllegalArgumentException()); } oop target = MethodHandles::encode_target(mh, format, CHECK_NULL); @@ -2317,7 +2351,7 @@ } JVM_END -JVM_ENTRY(jint, MHI_getConstant(JNIEnv *env, jobject igcls, jint which)) { +JVM_ENTRY(jint, MHN_getConstant(JNIEnv *env, jobject igcls, jint which)) { switch (which) { case MethodHandles::GC_JVM_PUSH_LIMIT: guarantee(MethodHandlePushLimit >= 2 && MethodHandlePushLimit <= 0xFF, @@ -2341,36 +2375,36 @@ template(MethodHandles,ETF_DIRECT_HANDLE) \ template(MethodHandles,ETF_METHOD_NAME) \ template(MethodHandles,ETF_REFLECT_METHOD) \ - template(sun_dyn_MemberName,MN_IS_METHOD) \ - template(sun_dyn_MemberName,MN_IS_CONSTRUCTOR) \ - template(sun_dyn_MemberName,MN_IS_FIELD) \ - template(sun_dyn_MemberName,MN_IS_TYPE) \ - template(sun_dyn_MemberName,MN_SEARCH_SUPERCLASSES) \ - template(sun_dyn_MemberName,MN_SEARCH_INTERFACES) \ - template(sun_dyn_MemberName,VM_INDEX_UNINITIALIZED) \ - template(sun_dyn_AdapterMethodHandle,OP_RETYPE_ONLY) \ - template(sun_dyn_AdapterMethodHandle,OP_RETYPE_RAW) \ - template(sun_dyn_AdapterMethodHandle,OP_CHECK_CAST) \ - template(sun_dyn_AdapterMethodHandle,OP_PRIM_TO_PRIM) \ - template(sun_dyn_AdapterMethodHandle,OP_REF_TO_PRIM) \ - template(sun_dyn_AdapterMethodHandle,OP_PRIM_TO_REF) \ - template(sun_dyn_AdapterMethodHandle,OP_SWAP_ARGS) \ - template(sun_dyn_AdapterMethodHandle,OP_ROT_ARGS) \ - template(sun_dyn_AdapterMethodHandle,OP_DUP_ARGS) \ - template(sun_dyn_AdapterMethodHandle,OP_DROP_ARGS) \ - template(sun_dyn_AdapterMethodHandle,OP_COLLECT_ARGS) \ - template(sun_dyn_AdapterMethodHandle,OP_SPREAD_ARGS) \ - template(sun_dyn_AdapterMethodHandle,OP_FLYBY) \ - template(sun_dyn_AdapterMethodHandle,OP_RICOCHET) \ - template(sun_dyn_AdapterMethodHandle,CONV_OP_LIMIT) \ - template(sun_dyn_AdapterMethodHandle,CONV_OP_MASK) \ - template(sun_dyn_AdapterMethodHandle,CONV_VMINFO_MASK) \ - template(sun_dyn_AdapterMethodHandle,CONV_VMINFO_SHIFT) \ - template(sun_dyn_AdapterMethodHandle,CONV_OP_SHIFT) \ - template(sun_dyn_AdapterMethodHandle,CONV_DEST_TYPE_SHIFT) \ - template(sun_dyn_AdapterMethodHandle,CONV_SRC_TYPE_SHIFT) \ - template(sun_dyn_AdapterMethodHandle,CONV_STACK_MOVE_SHIFT) \ - template(sun_dyn_AdapterMethodHandle,CONV_STACK_MOVE_MASK) \ + template(java_lang_invoke_MemberName,MN_IS_METHOD) \ + template(java_lang_invoke_MemberName,MN_IS_CONSTRUCTOR) \ + template(java_lang_invoke_MemberName,MN_IS_FIELD) \ + template(java_lang_invoke_MemberName,MN_IS_TYPE) \ + template(java_lang_invoke_MemberName,MN_SEARCH_SUPERCLASSES) \ + template(java_lang_invoke_MemberName,MN_SEARCH_INTERFACES) \ + template(java_lang_invoke_MemberName,VM_INDEX_UNINITIALIZED) \ + template(java_lang_invoke_AdapterMethodHandle,OP_RETYPE_ONLY) \ + template(java_lang_invoke_AdapterMethodHandle,OP_RETYPE_RAW) \ + template(java_lang_invoke_AdapterMethodHandle,OP_CHECK_CAST) \ + template(java_lang_invoke_AdapterMethodHandle,OP_PRIM_TO_PRIM) \ + template(java_lang_invoke_AdapterMethodHandle,OP_REF_TO_PRIM) \ + template(java_lang_invoke_AdapterMethodHandle,OP_PRIM_TO_REF) \ + template(java_lang_invoke_AdapterMethodHandle,OP_SWAP_ARGS) \ + template(java_lang_invoke_AdapterMethodHandle,OP_ROT_ARGS) \ + template(java_lang_invoke_AdapterMethodHandle,OP_DUP_ARGS) \ + template(java_lang_invoke_AdapterMethodHandle,OP_DROP_ARGS) \ + template(java_lang_invoke_AdapterMethodHandle,OP_COLLECT_ARGS) \ + template(java_lang_invoke_AdapterMethodHandle,OP_SPREAD_ARGS) \ + template(java_lang_invoke_AdapterMethodHandle,OP_FLYBY) \ + template(java_lang_invoke_AdapterMethodHandle,OP_RICOCHET) \ + template(java_lang_invoke_AdapterMethodHandle,CONV_OP_LIMIT) \ + template(java_lang_invoke_AdapterMethodHandle,CONV_OP_MASK) \ + template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_MASK) \ + template(java_lang_invoke_AdapterMethodHandle,CONV_VMINFO_SHIFT) \ + template(java_lang_invoke_AdapterMethodHandle,CONV_OP_SHIFT) \ + template(java_lang_invoke_AdapterMethodHandle,CONV_DEST_TYPE_SHIFT) \ + template(java_lang_invoke_AdapterMethodHandle,CONV_SRC_TYPE_SHIFT) \ + template(java_lang_invoke_AdapterMethodHandle,CONV_STACK_MOVE_SHIFT) \ + template(java_lang_invoke_AdapterMethodHandle,CONV_STACK_MOVE_MASK) \ /*end*/ #define ONE_PLUS(scope,value) 1+ @@ -2386,7 +2420,7 @@ #undef EACH_NAMED_CON #endif -JVM_ENTRY(jint, MHI_getNamedCon(JNIEnv *env, jobject igcls, jint which, jobjectArray box_jh)) { +JVM_ENTRY(jint, MHN_getNamedCon(JNIEnv *env, jobject igcls, jint which, jobjectArray box_jh)) { #ifndef PRODUCT if (which >= 0 && which < con_value_count) { int con = con_values[which]; @@ -2406,7 +2440,7 @@ JVM_END // void init(MemberName self, AccessibleObject ref) -JVM_ENTRY(void, MHI_init_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jobject target_jh)) { +JVM_ENTRY(void, MHN_init_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jobject target_jh)) { if (mname_jh == NULL || target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh)); oop target_oop = JNIHandles::resolve_non_null(target_jh); @@ -2415,7 +2449,7 @@ JVM_END // void expand(MemberName self) -JVM_ENTRY(void, MHI_expand_Mem(JNIEnv *env, jobject igcls, jobject mname_jh)) { +JVM_ENTRY(void, MHN_expand_Mem(JNIEnv *env, jobject igcls, jobject mname_jh)) { if (mname_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh)); MethodHandles::expand_MemberName(mname, 0, CHECK); @@ -2423,14 +2457,14 @@ JVM_END // void resolve(MemberName self, Class<?> caller) -JVM_ENTRY(void, MHI_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) { +JVM_ENTRY(void, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh, jclass caller_jh)) { if (mname_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); } Handle mname(THREAD, JNIHandles::resolve_non_null(mname_jh)); // The trusted Java code that calls this method should already have performed // access checks on behalf of the given caller. But, we can verify this. if (VerifyMethodHandles && caller_jh != NULL) { - klassOop reference_klass = java_lang_Class::as_klassOop(sun_dyn_MemberName::clazz(mname())); + klassOop reference_klass = java_lang_Class::as_klassOop(java_lang_invoke_MemberName::clazz(mname())); if (reference_klass != NULL) { // Emulate LinkResolver::check_klass_accessability. klassOop caller = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(caller_jh)); @@ -2448,7 +2482,7 @@ // static native int getMembers(Class<?> defc, String matchName, String matchSig, // int matchFlags, Class<?> caller, int skip, MemberName[] results); -JVM_ENTRY(jint, MHI_getMembers(JNIEnv *env, jobject igcls, +JVM_ENTRY(jint, MHN_getMembers(JNIEnv *env, jobject igcls, jclass clazz_jh, jstring name_jh, jstring sig_jh, int mflags, jclass caller_jh, jint skip, jobjectArray results_jh)) { if (clazz_jh == NULL || results_jh == NULL) return -1; @@ -2487,116 +2521,51 @@ } JVM_END -JVM_ENTRY(void, MHI_registerBootstrap(JNIEnv *env, jobject igcls, jclass caller_jh, jobject bsm_jh)) { - instanceKlassHandle ik = MethodHandles::resolve_instance_klass(caller_jh, THREAD); - if (!AllowTransitionalJSR292) { - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), - "registerBootstrapMethod is only supported in JSR 292 EDR"); - } - ik->link_class(CHECK); - if (!java_dyn_MethodHandle::is_instance(JNIHandles::resolve(bsm_jh))) { - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "method handle"); - } - const char* err = NULL; - if (ik->is_initialized() || ik->is_in_error_state()) { - err = "too late: class is already initialized"; - } else { - ObjectLocker ol(ik, THREAD); // note: this should be a recursive lock - if (ik->is_not_initialized() || - (ik->is_being_initialized() && ik->is_reentrant_initialization(THREAD))) { - if (ik->bootstrap_method() != NULL) { - err = "class is already equipped with a bootstrap method"; - } else { - ik->set_bootstrap_method(JNIHandles::resolve_non_null(bsm_jh)); - err = NULL; - } - } else { - err = "class is already initialized"; - if (ik->is_being_initialized()) - err = "class is already being initialized in a different thread"; - } - } - if (err != NULL) { - THROW_MSG(vmSymbols::java_lang_IllegalStateException(), err); - } -} -JVM_END - -JVM_ENTRY(jobject, MHI_getBootstrap(JNIEnv *env, jobject igcls, jclass caller_jh)) { - instanceKlassHandle ik = MethodHandles::resolve_instance_klass(caller_jh, THREAD); - return JNIHandles::make_local(THREAD, ik->bootstrap_method()); -} -JVM_END - -JVM_ENTRY(void, MHI_setCallSiteTarget(JNIEnv *env, jobject igcls, jobject site_jh, jobject target_jh)) { - // No special action required, yet. - oop site_oop = JNIHandles::resolve(site_jh); - if (!java_dyn_CallSite::is_instance(site_oop)) - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "not a CallSite"); - java_dyn_CallSite::set_target(site_oop, JNIHandles::resolve(target_jh)); -} -JVM_END - /// JVM_RegisterMethodHandleMethods -#define ADR "J" - #define LANG "Ljava/lang/" -#define JDYN "Ljava/dyn/" -#define IDYN "Lsun/dyn/" +#define JLINV "Ljava/lang/invoke/" #define OBJ LANG"Object;" #define CLS LANG"Class;" #define STRG LANG"String;" -#define CST JDYN"CallSite;" -#define MT JDYN"MethodType;" -#define MH JDYN"MethodHandle;" -#define MHI IDYN"MethodHandleImpl;" -#define MEM IDYN"MemberName;" -#define AMH IDYN"AdapterMethodHandle;" -#define BMH IDYN"BoundMethodHandle;" -#define DMH IDYN"DirectMethodHandle;" +#define MT JLINV"MethodType;" +#define MH JLINV"MethodHandle;" +#define MEM JLINV"MemberName;" +#define AMH JLINV"AdapterMethodHandle;" +#define BMH JLINV"BoundMethodHandle;" +#define DMH JLINV"DirectMethodHandle;" #define CC (char*) /*cast a literal from (const char*)*/ #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) -// These are the native methods on sun.dyn.MethodHandleNatives. +// These are the native methods on sun.invoke.MethodHandleNatives. static JNINativeMethod methods[] = { // void init(MemberName self, AccessibleObject ref) - {CC"init", CC"("AMH""MH"I)V", FN_PTR(MHI_init_AMH)}, - {CC"init", CC"("BMH""OBJ"I)V", FN_PTR(MHI_init_BMH)}, - {CC"init", CC"("DMH""OBJ"Z"CLS")V", FN_PTR(MHI_init_DMH)}, - {CC"init", CC"("MT")V", FN_PTR(MHI_init_MT)}, - {CC"init", CC"("MEM""OBJ")V", FN_PTR(MHI_init_Mem)}, - {CC"expand", CC"("MEM")V", FN_PTR(MHI_expand_Mem)}, - {CC"resolve", CC"("MEM""CLS")V", FN_PTR(MHI_resolve_Mem)}, - {CC"getTarget", CC"("MH"I)"OBJ, FN_PTR(MHI_getTarget)}, - {CC"getConstant", CC"(I)I", FN_PTR(MHI_getConstant)}, + {CC"init", CC"("AMH""MH"I)V", FN_PTR(MHN_init_AMH)}, + {CC"init", CC"("BMH""OBJ"I)V", FN_PTR(MHN_init_BMH)}, + {CC"init", CC"("DMH""OBJ"Z"CLS")V", FN_PTR(MHN_init_DMH)}, + {CC"init", CC"("MT")V", FN_PTR(MHN_init_MT)}, + {CC"init", CC"("MEM""OBJ")V", FN_PTR(MHN_init_Mem)}, + {CC"expand", CC"("MEM")V", FN_PTR(MHN_expand_Mem)}, + {CC"resolve", CC"("MEM""CLS")V", FN_PTR(MHN_resolve_Mem)}, + {CC"getTarget", CC"("MH"I)"OBJ, FN_PTR(MHN_getTarget)}, + {CC"getConstant", CC"(I)I", FN_PTR(MHN_getConstant)}, // static native int getNamedCon(int which, Object[] name) - {CC"getNamedCon", CC"(I["OBJ")I", FN_PTR(MHI_getNamedCon)}, + {CC"getNamedCon", CC"(I["OBJ")I", FN_PTR(MHN_getNamedCon)}, // static native int getMembers(Class<?> defc, String matchName, String matchSig, // int matchFlags, Class<?> caller, int skip, MemberName[] results); - {CC"getMembers", CC"("CLS""STRG""STRG"I"CLS"I["MEM")I", FN_PTR(MHI_getMembers)} + {CC"getMembers", CC"("CLS""STRG""STRG"I"CLS"I["MEM")I", FN_PTR(MHN_getMembers)} }; -// More entry points specifically for EnableInvokeDynamic. -static JNINativeMethod methods2[] = { - {CC"registerBootstrap", CC"("CLS MH")V", FN_PTR(MHI_registerBootstrap)}, - {CC"getBootstrap", CC"("CLS")"MH, FN_PTR(MHI_getBootstrap)}, - {CC"setCallSiteTarget", CC"("CST MH")V", FN_PTR(MHI_setCallSiteTarget)} -}; - - // This one function is exported, used by NativeLookup. JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) { assert(MethodHandles::spot_check_entry_names(), "entry enum is OK"); - // note: this explicit warning-producing stuff will be replaced by auto-detection of the JSR 292 classes - - if (!EnableMethodHandles) { - warning("JSR 292 method handles are disabled in this JVM. Use -XX:+UnlockExperimentalVMOptions -XX:+EnableMethodHandles to enable."); + if (!EnableInvokeDynamic) { + warning("JSR 292 is disabled in this JVM. Use -XX:+UnlockDiagnosticVMOptions -XX:+EnableInvokeDynamic to enable."); return; // bind nothing } @@ -2615,11 +2584,11 @@ } if (enable_MH) { - KlassHandle MHI_klass = SystemDictionaryHandles::MethodHandleImpl_klass(); - if (MHI_klass.not_null()) { + KlassHandle MHN_klass = SystemDictionaryHandles::MethodHandleNatives_klass(); + if (MHN_klass.not_null()) { TempNewSymbol raiseException_name = SymbolTable::new_symbol("raiseException", CHECK); TempNewSymbol raiseException_sig = SymbolTable::new_symbol("(ILjava/lang/Object;Ljava/lang/Object;)V", CHECK); - methodOop raiseException_method = instanceKlass::cast(MHI_klass->as_klassOop()) + methodOop raiseException_method = instanceKlass::cast(MHN_klass->as_klassOop()) ->find_method(raiseException_name, raiseException_sig); if (raiseException_method != NULL && raiseException_method->is_static()) { MethodHandles::set_raise_exception_method(raiseException_method); @@ -2633,33 +2602,8 @@ } if (enable_MH) { - // We need to link the MethodHandleImpl klass before we generate - // the method handle adapters as the _raise_exception adapter uses - // one of its methods (and its c2i-adapter). - KlassHandle k = SystemDictionaryHandles::MethodHandleImpl_klass(); - instanceKlass* ik = instanceKlass::cast(k()); - ik->link_class(CHECK); - MethodHandles::generate_adapters(); MethodHandles::set_enabled(true); } - - if (!EnableInvokeDynamic) { - warning("JSR 292 invokedynamic is disabled in this JVM. Use -XX:+UnlockExperimentalVMOptions -XX:+EnableInvokeDynamic to enable."); - return; // bind nothing - } - - { - ThreadToNativeFromVM ttnfv(thread); - - int status = env->RegisterNatives(MHN_class, methods2, sizeof(methods2)/sizeof(JNINativeMethod)); - if (env->ExceptionOccurred()) { - MethodHandles::set_enabled(false); - warning("JSR 292 method handle code is mismatched to this JVM. Disabling support."); - env->ExceptionClear(); - } else { - MethodHandles::set_enabled(true); - } - } } JVM_END
--- a/src/share/vm/prims/methodHandles.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/methodHandles.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -37,8 +37,8 @@ class MethodHandles: AllStatic { // JVM support for MethodHandle, MethodType, and related types - // in java.dyn and java.dyn.hotspot. - // See also javaClasses for layouts java_dyn_Method{Handle,Type,Type::Form}. + // in java.lang.invoke and sun.invoke. + // See also javaClasses for layouts java_lang_invoke_Method{Handle,Type,Type::Form}. public: enum EntryKind { _raise_exception, // stub for error generation from other stubs @@ -54,21 +54,21 @@ _bound_long_direct_mh, _adapter_mh_first, // adapter sequence goes here... - _adapter_retype_only = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY, - _adapter_retype_raw = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW, - _adapter_check_cast = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_CHECK_CAST, - _adapter_prim_to_prim = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM, - _adapter_ref_to_prim = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM, - _adapter_prim_to_ref = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_PRIM_TO_REF, - _adapter_swap_args = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS, - _adapter_rot_args = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_ROT_ARGS, - _adapter_dup_args = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_DUP_ARGS, - _adapter_drop_args = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_DROP_ARGS, - _adapter_collect_args = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_COLLECT_ARGS, - _adapter_spread_args = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS, - _adapter_flyby = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_FLYBY, - _adapter_ricochet = _adapter_mh_first + sun_dyn_AdapterMethodHandle::OP_RICOCHET, - _adapter_mh_last = _adapter_mh_first + sun_dyn_AdapterMethodHandle::CONV_OP_LIMIT - 1, + _adapter_retype_only = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY, + _adapter_retype_raw = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW, + _adapter_check_cast = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST, + _adapter_prim_to_prim = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM, + _adapter_ref_to_prim = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM, + _adapter_prim_to_ref = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF, + _adapter_swap_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS, + _adapter_rot_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS, + _adapter_dup_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS, + _adapter_drop_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS, + _adapter_collect_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS, + _adapter_spread_args = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS, + _adapter_flyby = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_FLYBY, + _adapter_ricochet = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::OP_RICOCHET, + _adapter_mh_last = _adapter_mh_first + java_lang_invoke_AdapterMethodHandle::CONV_OP_LIMIT - 1, // Optimized adapter types @@ -107,16 +107,16 @@ static void set_enabled(bool z); private: - enum { // import sun_dyn_AdapterMethodHandle::CONV_OP_* - CONV_OP_LIMIT = sun_dyn_AdapterMethodHandle::CONV_OP_LIMIT, - CONV_OP_MASK = sun_dyn_AdapterMethodHandle::CONV_OP_MASK, - CONV_VMINFO_MASK = sun_dyn_AdapterMethodHandle::CONV_VMINFO_MASK, - CONV_VMINFO_SHIFT = sun_dyn_AdapterMethodHandle::CONV_VMINFO_SHIFT, - CONV_OP_SHIFT = sun_dyn_AdapterMethodHandle::CONV_OP_SHIFT, - CONV_DEST_TYPE_SHIFT = sun_dyn_AdapterMethodHandle::CONV_DEST_TYPE_SHIFT, - CONV_SRC_TYPE_SHIFT = sun_dyn_AdapterMethodHandle::CONV_SRC_TYPE_SHIFT, - CONV_STACK_MOVE_SHIFT = sun_dyn_AdapterMethodHandle::CONV_STACK_MOVE_SHIFT, - CONV_STACK_MOVE_MASK = sun_dyn_AdapterMethodHandle::CONV_STACK_MOVE_MASK + enum { // import java_lang_invoke_AdapterMethodHandle::CONV_OP_* + CONV_OP_LIMIT = java_lang_invoke_AdapterMethodHandle::CONV_OP_LIMIT, + CONV_OP_MASK = java_lang_invoke_AdapterMethodHandle::CONV_OP_MASK, + CONV_VMINFO_MASK = java_lang_invoke_AdapterMethodHandle::CONV_VMINFO_MASK, + CONV_VMINFO_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_VMINFO_SHIFT, + CONV_OP_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_OP_SHIFT, + CONV_DEST_TYPE_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_DEST_TYPE_SHIFT, + CONV_SRC_TYPE_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_SRC_TYPE_SHIFT, + CONV_STACK_MOVE_SHIFT = java_lang_invoke_AdapterMethodHandle::CONV_STACK_MOVE_SHIFT, + CONV_STACK_MOVE_MASK = java_lang_invoke_AdapterMethodHandle::CONV_STACK_MOVE_MASK }; static bool _enabled; @@ -471,7 +471,7 @@ }; -// Access methods for the "entry" field of a java.dyn.MethodHandle. +// Access methods for the "entry" field of a java.lang.invoke.MethodHandle. // The field is primarily a jump target for compiled calls. // However, we squirrel away some nice pointers for other uses, // just before the jump target.
--- a/src/share/vm/prims/nativeLookup.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/nativeLookup.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -107,29 +107,28 @@ void JNICALL JVM_RegisterPerfMethods(JNIEnv *env, jclass perfclass); } +#define CC (char*) /* cast a literal from (const char*) */ +#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f) + +static JNINativeMethod lookup_special_native_methods[] = { + // Next two functions only exist for compatibility with 1.3.1 and earlier. + { CC"Java_java_io_ObjectOutputStream_getPrimitiveFieldValues", NULL, FN_PTR(JVM_GetPrimitiveFieldValues) }, // intercept ObjectOutputStream getPrimitiveFieldValues for faster serialization + { CC"Java_java_io_ObjectInputStream_setPrimitiveFieldValues", NULL, FN_PTR(JVM_SetPrimitiveFieldValues) }, // intercept ObjectInputStream setPrimitiveFieldValues for faster serialization + + { CC"Java_sun_misc_Unsafe_registerNatives", NULL, FN_PTR(JVM_RegisterUnsafeMethods) }, + { CC"Java_java_lang_invoke_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) }, + { CC"Java_sun_misc_Perf_registerNatives", NULL, FN_PTR(JVM_RegisterPerfMethods) } +}; + static address lookup_special_native(char* jni_name) { - // NB: To ignore the jni prefix and jni postfix strstr is used matching. - if (!JDK_Version::is_gte_jdk14x_version()) { - // These functions only exist for compatibility with 1.3.1 and earlier - // Intercept ObjectOutputStream getPrimitiveFieldValues for faster serialization - if (strstr(jni_name, "Java_java_io_ObjectOutputStream_getPrimitiveFieldValues") != NULL) { - return CAST_FROM_FN_PTR(address, JVM_GetPrimitiveFieldValues); - } - // Intercept ObjectInputStream setPrimitiveFieldValues for faster serialization - if (strstr(jni_name, "Java_java_io_ObjectInputStream_setPrimitiveFieldValues") != NULL) { - return CAST_FROM_FN_PTR(address, JVM_SetPrimitiveFieldValues); + int i = !JDK_Version::is_gte_jdk14x_version() ? 0 : 2; // see comment in lookup_special_native_methods + int count = sizeof(lookup_special_native_methods) / sizeof(JNINativeMethod); + for (; i < count; i++) { + // NB: To ignore the jni prefix and jni postfix strstr is used matching. + if (strstr(jni_name, lookup_special_native_methods[i].name) != NULL) { + return CAST_FROM_FN_PTR(address, lookup_special_native_methods[i].fnPtr); } } - if (strstr(jni_name, "Java_sun_misc_Unsafe_registerNatives") != NULL) { - return CAST_FROM_FN_PTR(address, JVM_RegisterUnsafeMethods); - } - if (strstr(jni_name, "Java_sun_dyn_MethodHandleNatives_registerNatives") != NULL) { - return CAST_FROM_FN_PTR(address, JVM_RegisterMethodHandleMethods); - } - if (strstr(jni_name, "Java_sun_misc_Perf_registerNatives") != NULL) { - return CAST_FROM_FN_PTR(address, JVM_RegisterPerfMethods); - } - return NULL; }
--- a/src/share/vm/prims/unsafe.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/prims/unsafe.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -110,6 +110,8 @@ inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) { jlong byte_offset = field_offset_to_byte_offset(field_offset); + // Don't allow unsafe to be used to read or write the header word of oops + assert(p == NULL || field_offset >= oopDesc::header_size(), "offset must be outside of header"); #ifdef ASSERT if (p != NULL) { assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset"); @@ -686,7 +688,7 @@ THROW_0(vmSymbols::java_lang_IllegalArgumentException()); } - return JNIHandles::make_local(env, java_lang_Class::as_klassOop(mirror)); + return JNIHandles::make_local(env, mirror); UNSAFE_END //@deprecated @@ -704,7 +706,7 @@ if (clazz == NULL) { THROW_0(vmSymbols::java_lang_NullPointerException()); } - return JNIHandles::make_local(env, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz))); + return JNIHandles::make_local(env, JNIHandles::resolve_non_null(clazz)); UNSAFE_END UNSAFE_ENTRY(void, Unsafe_EnsureClassInitialized(JNIEnv *env, jobject unsafe, jobject clazz)) @@ -1558,7 +1560,7 @@ } } } - if (AnonymousClasses) { + if (EnableInvokeDynamic) { env->RegisterNatives(unsafecls, anonk_methods, sizeof(anonk_methods)/sizeof(JNINativeMethod)); if (env->ExceptionOccurred()) { if (PrintMiscellaneous && (Verbose || WizardMode)) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,450 @@ +/* +* Copyright (c) 2010, 2011 Oracle and/or its affiliates. All rights reserved. +* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. +*/ + +#include "precompiled.hpp" +#include "runtime/advancedThresholdPolicy.hpp" +#include "runtime/simpleThresholdPolicy.inline.hpp" + +#ifdef TIERED +// Print an event. +void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh, + int bci, CompLevel level) { + tty->print(" rate: "); + if (mh->prev_time() == 0) tty->print("n/a"); + else tty->print("%f", mh->rate()); + + tty->print(" k: %.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback), + threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback)); + +} + +void AdvancedThresholdPolicy::initialize() { + // Turn on ergonomic compiler count selection + if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) { + FLAG_SET_DEFAULT(CICompilerCountPerCPU, true); + } + int count = CICompilerCount; + if (CICompilerCountPerCPU) { + // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n + int log_cpu = log2_intptr(os::active_processor_count()); + int loglog_cpu = log2_intptr(MAX2(log_cpu, 1)); + count = MAX2(log_cpu * loglog_cpu, 1) * 3 / 2; + } + + set_c1_count(MAX2(count / 3, 1)); + set_c2_count(MAX2(count - count / 3, 1)); + + // Some inlining tuning +#ifdef X86 + if (FLAG_IS_DEFAULT(InlineSmallCode)) { + FLAG_SET_DEFAULT(InlineSmallCode, 2000); + } +#endif + +#ifdef SPARC + if (FLAG_IS_DEFAULT(InlineSmallCode)) { + FLAG_SET_DEFAULT(InlineSmallCode, 2500); + } +#endif + + + set_start_time(os::javaTimeMillis()); +} + +// update_rate() is called from select_task() while holding a compile queue lock. +void AdvancedThresholdPolicy::update_rate(jlong t, methodOop m) { + if (is_old(m)) { + // We don't remove old methods from the queue, + // so we can just zero the rate. + m->set_rate(0); + return; + } + + // We don't update the rate if we've just came out of a safepoint. + // delta_s is the time since last safepoint in milliseconds. + jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint(); + jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement + // How many events were there since the last time? + int event_count = m->invocation_count() + m->backedge_count(); + int delta_e = event_count - m->prev_event_count(); + + // We should be running for at least 1ms. + if (delta_s >= TieredRateUpdateMinTime) { + // And we must've taken the previous point at least 1ms before. + if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) { + m->set_prev_time(t); + m->set_prev_event_count(event_count); + m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond + } else + if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) { + // If nothing happened for 25ms, zero the rate. Don't modify prev values. + m->set_rate(0); + } + } +} + +// Check if this method has been stale from a given number of milliseconds. +// See select_task(). +bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, methodOop m) { + jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint(); + jlong delta_t = t - m->prev_time(); + if (delta_t > timeout && delta_s > timeout) { + int event_count = m->invocation_count() + m->backedge_count(); + int delta_e = event_count - m->prev_event_count(); + // Return true if there were no events. + return delta_e == 0; + } + return false; +} + +// We don't remove old methods from the compile queue even if they have +// very low activity. See select_task(). +bool AdvancedThresholdPolicy::is_old(methodOop method) { + return method->invocation_count() > 50000 || method->backedge_count() > 500000; +} + +double AdvancedThresholdPolicy::weight(methodOop method) { + return (method->rate() + 1) * ((method->invocation_count() + 1) * (method->backedge_count() + 1)); +} + +// Apply heuristics and return true if x should be compiled before y +bool AdvancedThresholdPolicy::compare_methods(methodOop x, methodOop y) { + if (x->highest_comp_level() > y->highest_comp_level()) { + // recompilation after deopt + return true; + } else + if (x->highest_comp_level() == y->highest_comp_level()) { + if (weight(x) > weight(y)) { + return true; + } + } + return false; +} + +// Is method profiled enough? +bool AdvancedThresholdPolicy::is_method_profiled(methodOop method) { + methodDataOop mdo = method->method_data(); + if (mdo != NULL) { + int i = mdo->invocation_count_delta(); + int b = mdo->backedge_count_delta(); + return call_predicate_helper<CompLevel_full_profile>(i, b, 1); + } + return false; +} + +// Called with the queue locked and with at least one element +CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) { + CompileTask *max_task = NULL; + methodOop max_method; + jlong t = os::javaTimeMillis(); + // Iterate through the queue and find a method with a maximum rate. + for (CompileTask* task = compile_queue->first(); task != NULL;) { + CompileTask* next_task = task->next(); + methodOop method = (methodOop)JNIHandles::resolve(task->method_handle()); + methodDataOop mdo = method->method_data(); + update_rate(t, method); + if (max_task == NULL) { + max_task = task; + max_method = method; + } else { + // If a method has been stale for some time, remove it from the queue. + if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) { + if (PrintTieredEvents) { + print_event(KILL, method, method, task->osr_bci(), (CompLevel)task->comp_level()); + } + CompileTaskWrapper ctw(task); // Frees the task + compile_queue->remove(task); + method->clear_queued_for_compilation(); + task = next_task; + continue; + } + + // Select a method with a higher rate + if (compare_methods(method, max_method)) { + max_task = task; + max_method = method; + } + } + task = next_task; + } + + if (max_task->comp_level() == CompLevel_full_profile && is_method_profiled(max_method)) { + max_task->set_comp_level(CompLevel_limited_profile); + if (PrintTieredEvents) { + print_event(UPDATE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level()); + } + } + + return max_task; +} + +double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) { + double queue_size = CompileBroker::queue_size(level); + int comp_count = compiler_count(level); + double k = queue_size / (feedback_k * comp_count) + 1; + return k; +} + +// Call and loop predicates determine whether a transition to a higher +// compilation level should be performed (pointers to predicate functions +// are passed to common()). +// Tier?LoadFeedback is basically a coefficient that determines of +// how many methods per compiler thread can be in the queue before +// the threshold values double. +bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) { + switch(cur_level) { + case CompLevel_none: + case CompLevel_limited_profile: { + double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); + return loop_predicate_helper<CompLevel_none>(i, b, k); + } + case CompLevel_full_profile: { + double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); + return loop_predicate_helper<CompLevel_full_profile>(i, b, k); + } + default: + return true; + } +} + +bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) { + switch(cur_level) { + case CompLevel_none: + case CompLevel_limited_profile: { + double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); + return call_predicate_helper<CompLevel_none>(i, b, k); + } + case CompLevel_full_profile: { + double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); + return call_predicate_helper<CompLevel_full_profile>(i, b, k); + } + default: + return true; + } +} + +// If a method is old enough and is still in the interpreter we would want to +// start profiling without waiting for the compiled method to arrive. +// We also take the load on compilers into the account. +bool AdvancedThresholdPolicy::should_create_mdo(methodOop method, CompLevel cur_level) { + if (cur_level == CompLevel_none && + CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { + int i = method->invocation_count(); + int b = method->backedge_count(); + double k = Tier0ProfilingStartPercentage / 100.0; + return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k); + } + return false; +} + +// Create MDO if necessary. +void AdvancedThresholdPolicy::create_mdo(methodHandle mh, TRAPS) { + if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return; + if (mh->method_data() == NULL) { + methodOopDesc::build_interpreter_method_data(mh, THREAD); + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + } + } +} + + +/* + * Method states: + * 0 - interpreter (CompLevel_none) + * 1 - pure C1 (CompLevel_simple) + * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile) + * 3 - C1 with full profiling (CompLevel_full_profile) + * 4 - C2 (CompLevel_full_optimization) + * + * Common state transition patterns: + * a. 0 -> 3 -> 4. + * The most common path. But note that even in this straightforward case + * profiling can start at level 0 and finish at level 3. + * + * b. 0 -> 2 -> 3 -> 4. + * This case occures when the load on C2 is deemed too high. So, instead of transitioning + * into state 3 directly and over-profiling while a method is in the C2 queue we transition to + * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs. + * + * c. 0 -> (3->2) -> 4. + * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough + * to enable the profiling to fully occur at level 0. In this case we change the compilation level + * of the method to 2, because it'll allow it to run much faster without full profiling while c2 + * is compiling. + * + * d. 0 -> 3 -> 1 or 0 -> 2 -> 1. + * After a method was once compiled with C1 it can be identified as trivial and be compiled to + * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1. + * + * e. 0 -> 4. + * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter) + * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because + * the compiled version already exists). + * + * Note that since state 0 can be reached from any other state via deoptimization different loops + * are possible. + * + */ + +// Common transition function. Given a predicate determines if a method should transition to another level. +CompLevel AdvancedThresholdPolicy::common(Predicate p, methodOop method, CompLevel cur_level) { + if (is_trivial(method)) return CompLevel_simple; + + CompLevel next_level = cur_level; + int i = method->invocation_count(); + int b = method->backedge_count(); + + switch(cur_level) { + case CompLevel_none: + // If we were at full profile level, would we switch to full opt? + if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) { + next_level = CompLevel_full_optimization; + } else if ((this->*p)(i, b, cur_level)) { + // C1-generated fully profiled code is about 30% slower than the limited profile + // code that has only invocation and backedge counters. The observation is that + // if C2 queue is large enough we can spend too much time in the fully profiled code + // while waiting for C2 to pick the method from the queue. To alleviate this problem + // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long + // we choose to compile a limited profiled version and then recompile with full profiling + // when the load on C2 goes down. + if (CompileBroker::queue_size(CompLevel_full_optimization) > + Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { + next_level = CompLevel_limited_profile; + } else { + next_level = CompLevel_full_profile; + } + } + break; + case CompLevel_limited_profile: + if (is_method_profiled(method)) { + // Special case: we got here because this method was fully profiled in the interpreter. + next_level = CompLevel_full_optimization; + } else { + methodDataOop mdo = method->method_data(); + if (mdo != NULL) { + if (mdo->would_profile()) { + if (CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOff * compiler_count(CompLevel_full_optimization) && + (this->*p)(i, b, cur_level)) { + next_level = CompLevel_full_profile; + } + } else { + next_level = CompLevel_full_optimization; + } + } + } + break; + case CompLevel_full_profile: + { + methodDataOop mdo = method->method_data(); + if (mdo != NULL) { + if (mdo->would_profile()) { + int mdo_i = mdo->invocation_count_delta(); + int mdo_b = mdo->backedge_count_delta(); + if ((this->*p)(mdo_i, mdo_b, cur_level)) { + next_level = CompLevel_full_optimization; + } + } else { + next_level = CompLevel_full_optimization; + } + } + } + break; + } + return next_level; +} + +// Determine if a method should be compiled with a normal entry point at a different level. +CompLevel AdvancedThresholdPolicy::call_event(methodOop method, CompLevel cur_level) { + CompLevel osr_level = (CompLevel) method->highest_osr_comp_level(); + CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level); + + // If OSR method level is greater than the regular method level, the levels should be + // equalized by raising the regular method level in order to avoid OSRs during each + // invocation of the method. + if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) { + methodDataOop mdo = method->method_data(); + guarantee(mdo != NULL, "MDO should not be NULL"); + if (mdo->invocation_count() >= 1) { + next_level = CompLevel_full_optimization; + } + } else { + next_level = MAX2(osr_level, next_level); + } + + return next_level; +} + +// Determine if we should do an OSR compilation of a given method. +CompLevel AdvancedThresholdPolicy::loop_event(methodOop method, CompLevel cur_level) { + if (cur_level == CompLevel_none) { + // If there is a live OSR method that means that we deopted to the interpreter + // for the transition. + CompLevel osr_level = (CompLevel)method->highest_osr_comp_level(); + if (osr_level > CompLevel_none) { + return osr_level; + } + } + return common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level); +} + +// Update the rate and submit compile +void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS) { + int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count(); + update_rate(os::javaTimeMillis(), mh()); + CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", THREAD); +} + + +// Handle the invocation event. +void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh, + CompLevel level, TRAPS) { + if (should_create_mdo(mh(), level)) { + create_mdo(mh, THREAD); + } + if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) { + CompLevel next_level = call_event(mh(), level); + if (next_level != level) { + compile(mh, InvocationEntryBci, next_level, THREAD); + } + } +} + +// Handle the back branch event. Notice that we can compile the method +// with a regular entry from here. +void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh, + int bci, CompLevel level, TRAPS) { + if (should_create_mdo(mh(), level)) { + create_mdo(mh, THREAD); + } + + // If the method is already compiling, quickly bail out. + if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) { + // Use loop event as an opportinity to also check there's been + // enough calls. + CompLevel cur_level = comp_level(mh()); + CompLevel next_level = call_event(mh(), cur_level); + CompLevel next_osr_level = loop_event(mh(), level); + if (next_osr_level == CompLevel_limited_profile) { + next_osr_level = CompLevel_full_profile; // OSRs are supposed to be for very hot methods. + } + next_level = MAX2(next_level, + next_osr_level < CompLevel_full_optimization ? next_osr_level : cur_level); + bool is_compiling = false; + if (next_level != cur_level) { + compile(mh, InvocationEntryBci, next_level, THREAD); + is_compiling = true; + } + + // Do the OSR version + if (!is_compiling && next_osr_level != level) { + compile(mh, bci, next_osr_level, THREAD); + } + } +} + +#endif // TIERED
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/runtime/advancedThresholdPolicy.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,207 @@ +/* +* Copyright (c) 2010, 2011 Oracle and/or its affiliates. All rights reserved. +* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. +*/ + +#ifndef SHARE_VM_RUNTIME_ADVANCEDTHRESHOLDPOLICY_HPP +#define SHARE_VM_RUNTIME_ADVANCEDTHRESHOLDPOLICY_HPP + +#include "runtime/simpleThresholdPolicy.hpp" + +#ifdef TIERED +class CompileTask; +class CompileQueue; + +/* + * The system supports 5 execution levels: + * * level 0 - interpreter + * * level 1 - C1 with full optimization (no profiling) + * * level 2 - C1 with invocation and backedge counters + * * level 3 - C1 with full profiling (level 2 + MDO) + * * level 4 - C2 + * + * Levels 0, 2 and 3 periodically notify the runtime about the current value of the counters + * (invocation counters and backedge counters). The frequency of these notifications is + * different at each level. These notifications are used by the policy to decide what transition + * to make. + * + * Execution starts at level 0 (interpreter), then the policy can decide either to compile the + * method at level 3 or level 2. The decision is based on the following factors: + * 1. The length of the C2 queue determines the next level. The observation is that level 2 + * is generally faster than level 3 by about 30%, therefore we would want to minimize the time + * a method spends at level 3. We should only spend the time at level 3 that is necessary to get + * adequate profiling. So, if the C2 queue is long enough it is more beneficial to go first to + * level 2, because if we transitioned to level 3 we would be stuck there until our C2 compile + * request makes its way through the long queue. When the load on C2 recedes we are going to + * recompile at level 3 and start gathering profiling information. + * 2. The length of C1 queue is used to dynamically adjust the thresholds, so as to introduce + * additional filtering if the compiler is overloaded. The rationale is that by the time a + * method gets compiled it can become unused, so it doesn't make sense to put too much onto the + * queue. + * + * After profiling is completed at level 3 the transition is made to level 4. Again, the length + * of the C2 queue is used as a feedback to adjust the thresholds. + * + * After the first C1 compile some basic information is determined about the code like the number + * of the blocks and the number of the loops. Based on that it can be decided that a method + * is trivial and compiling it with C1 will yield the same code. In this case the method is + * compiled at level 1 instead of 4. + * + * We also support profiling at level 0. If C1 is slow enough to produce the level 3 version of + * the code and the C2 queue is sufficiently small we can decide to start profiling in the + * interpreter (and continue profiling in the compiled code once the level 3 version arrives). + * If the profiling at level 0 is fully completed before level 3 version is produced, a level 2 + * version is compiled instead in order to run faster waiting for a level 4 version. + * + * Compile queues are implemented as priority queues - for each method in the queue we compute + * the event rate (the number of invocation and backedge counter increments per unit of time). + * When getting an element off the queue we pick the one with the largest rate. Maintaining the + * rate also allows us to remove stale methods (the ones that got on the queue but stopped + * being used shortly after that). +*/ + +/* Command line options: + * - Tier?InvokeNotifyFreqLog and Tier?BackedgeNotifyFreqLog control the frequency of method + * invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread + * makes a call into the runtime. + * + * - Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control + * compilation thresholds. + * Level 2 thresholds are not used and are provided for option-compatibility and potential future use. + * Other thresholds work as follows: + * + * Transition from interpreter (level 0) to C1 with full profiling (level 3) happens when + * the following predicate is true (X is the level): + * + * i > TierXInvocationThreshold * s || (i > TierXMinInvocationThreshold * s && i + b > TierXCompileThreshold * s), + * + * where $i$ is the number of method invocations, $b$ number of backedges and $s$ is the scaling + * coefficient that will be discussed further. + * The intuition is to equalize the time that is spend profiling each method. + * The same predicate is used to control the transition from level 3 to level 4 (C2). It should be + * noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come + * from methodOop and for 3->4 transition they come from MDO (since profiled invocations are + * counted separately). + * + * OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates. + * + * - Tier?LoadFeedback options are used to automatically scale the predicates described above depending + * on the compiler load. The scaling coefficients are computed as follows: + * + * s = queue_size_X / (TierXLoadFeedback * compiler_count_X) + 1, + * + * where queue_size_X is the current size of the compiler queue of level X, and compiler_count_X + * is the number of level X compiler threads. + * + * Basically these parameters describe how many methods should be in the compile queue + * per compiler thread before the scaling coefficient increases by one. + * + * This feedback provides the mechanism to automatically control the flow of compilation requests + * depending on the machine speed, mutator load and other external factors. + * + * - Tier3DelayOn and Tier3DelayOff parameters control another important feedback loop. + * Consider the following observation: a method compiled with full profiling (level 3) + * is about 30% slower than a method at level 2 (just invocation and backedge counters, no MDO). + * Normally, the following transitions will occur: 0->3->4. The problem arises when the C2 queue + * gets congested and the 3->4 transition is delayed. While the method is the C2 queue it continues + * executing at level 3 for much longer time than is required by the predicate and at suboptimal speed. + * The idea is to dynamically change the behavior of the system in such a way that if a substantial + * load on C2 is detected we would first do the 0->2 transition allowing a method to run faster. + * And then when the load decreases to allow 2->3 transitions. + * + * Tier3Delay* parameters control this switching mechanism. + * Tier3DelayOn is the number of methods in the C2 queue per compiler thread after which the policy + * no longer does 0->3 transitions but does 0->2 transitions instead. + * Tier3DelayOff switches the original behavior back when the number of methods in the C2 queue + * per compiler thread falls below the specified amount. + * The hysteresis is necessary to avoid jitter. + * + * - TieredCompileTaskTimeout is the amount of time an idle method can spend in the compile queue. + * Basically, since we use the event rate d(i + b)/dt as a value of priority when selecting a method to + * compile from the compile queue, we also can detect stale methods for which the rate has been + * 0 for some time in the same iteration. Stale methods can appear in the queue when an application + * abruptly changes its behavior. + * + * - TieredStopAtLevel, is used mostly for testing. It allows to bypass the policy logic and stick + * to a given level. For example it's useful to set TieredStopAtLevel = 1 in order to compile everything + * with pure c1. + * + * - Tier0ProfilingStartPercentage allows the interpreter to start profiling when the inequalities in the + * 0->3 predicate are already exceeded by the given percentage but the level 3 version of the + * method is still not ready. We can even go directly from level 0 to 4 if c1 doesn't produce a compiled + * version in time. This reduces the overall transition to level 4 and decreases the startup time. + * Note that this behavior is also guarded by the Tier3Delay mechanism: when the c2 queue is too long + * these is not reason to start profiling prematurely. + * + * - TieredRateUpdateMinTime and TieredRateUpdateMaxTime are parameters of the rate computation. + * Basically, the rate is not computed more frequently than TieredRateUpdateMinTime and is considered + * to be zero if no events occurred in TieredRateUpdateMaxTime. + */ + + +class AdvancedThresholdPolicy : public SimpleThresholdPolicy { + jlong _start_time; + + // Call and loop predicates determine whether a transition to a higher compilation + // level should be performed (pointers to predicate functions are passed to common(). + // Predicates also take compiler load into account. + typedef bool (AdvancedThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level); + bool call_predicate(int i, int b, CompLevel cur_level); + bool loop_predicate(int i, int b, CompLevel cur_level); + // Common transition function. Given a predicate determines if a method should transition to another level. + CompLevel common(Predicate p, methodOop method, CompLevel cur_level); + // Transition functions. + // call_event determines if a method should be compiled at a different + // level with a regular invocation entry. + CompLevel call_event(methodOop method, CompLevel cur_level); + // loop_event checks if a method should be OSR compiled at a different + // level. + CompLevel loop_event(methodOop method, CompLevel cur_level); + // Has a method been long around? + // We don't remove old methods from the compile queue even if they have + // very low activity (see select_task()). + inline bool is_old(methodOop method); + // Was a given method inactive for a given number of milliseconds. + // If it is, we would remove it from the queue (see select_task()). + inline bool is_stale(jlong t, jlong timeout, methodOop m); + // Compute the weight of the method for the compilation scheduling + inline double weight(methodOop method); + // Apply heuristics and return true if x should be compiled before y + inline bool compare_methods(methodOop x, methodOop y); + // Compute event rate for a given method. The rate is the number of event (invocations + backedges) + // per millisecond. + inline void update_rate(jlong t, methodOop m); + // Compute threshold scaling coefficient + inline double threshold_scale(CompLevel level, int feedback_k); + // If a method is old enough and is still in the interpreter we would want to + // start profiling without waiting for the compiled method to arrive. This function + // determines whether we should do that. + inline bool should_create_mdo(methodOop method, CompLevel cur_level); + // Create MDO if necessary. + void create_mdo(methodHandle mh, TRAPS); + // Is method profiled enough? + bool is_method_profiled(methodOop method); + +protected: + void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level); + + void set_start_time(jlong t) { _start_time = t; } + jlong start_time() const { return _start_time; } + + // Submit a given method for compilation (and update the rate). + virtual void submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS); + // event() from SimpleThresholdPolicy would call these. + virtual void method_invocation_event(methodHandle method, methodHandle inlinee, + CompLevel level, TRAPS); + virtual void method_back_branch_event(methodHandle method, methodHandle inlinee, + int bci, CompLevel level, TRAPS); +public: + AdvancedThresholdPolicy() : _start_time(0) { } + // Select task is called by CompileBroker. We should return a task or NULL. + virtual CompileTask* select_task(CompileQueue* compile_queue); + virtual void initialize(); +}; + +#endif // TIERED + +#endif // SHARE_VM_RUNTIME_ADVANCEDTHRESHOLDPOLICY_HPP
--- a/src/share/vm/runtime/arguments.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/arguments.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -59,7 +59,8 @@ #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" #endif -#define DEFAULT_VENDOR_URL_BUG "http://java.sun.com/webapps/bugreport/crash.jsp" +// Note: This is a special bug reporting site for the JVM +#define DEFAULT_VENDOR_URL_BUG "http://bugreport.sun.com/bugreport/crash.jsp" #define DEFAULT_JAVA_LAUNCHER "generic" char** Arguments::_jvm_flags_array = NULL; @@ -80,6 +81,7 @@ const char* Arguments::_java_vendor_url_bug = DEFAULT_VENDOR_URL_BUG; const char* Arguments::_sun_java_launcher = DEFAULT_JAVA_LAUNCHER; int Arguments::_sun_java_launcher_pid = -1; +bool Arguments::_created_by_gamma_launcher = false; // These parameters are reset in method parse_vm_init_args(JavaVMInitArgs*) bool Arguments::_AlwaysCompileLoopMethods = AlwaysCompileLoopMethods; @@ -243,6 +245,8 @@ JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) }, { "MaxLiveObjectEvacuationRatio", JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) }, + { "ForceSharedSpaces", JDK_Version::jdk_update(6,25), JDK_Version::jdk(8) }, + { "AllowTransitionalJSR292", JDK_Version::jdk(7), JDK_Version::jdk(8) }, { NULL, JDK_Version(0), JDK_Version(0) } }; @@ -966,6 +970,16 @@ UseCompiler = true; UseLoopCounter = true; +#ifndef ZERO + // Turn these off for mixed and comp. Leave them on for Zero. + if (FLAG_IS_DEFAULT(UseFastAccessorMethods)) { + UseFastAccessorMethods = mode == _int; + } + if (FLAG_IS_DEFAULT(UseFastEmptyMethods)) { + UseFastEmptyMethods = mode == _int; + } +#endif + // Default values may be platform/compiler dependent - // use the saved values ClipInlining = Arguments::_ClipInlining; @@ -1008,31 +1022,10 @@ } } -void Arguments::check_compressed_oops_compat() { -#ifdef _LP64 - assert(UseCompressedOops, "Precondition"); - // Is it on by default or set on ergonomically - bool is_on_by_default = FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops); - - // If dumping an archive or forcing its use, disable compressed oops if possible - if (DumpSharedSpaces || RequireSharedSpaces) { - if (is_on_by_default) { - FLAG_SET_DEFAULT(UseCompressedOops, false); - return; - } else { - vm_exit_during_initialization( - "Class Data Sharing is not supported with compressed oops yet", NULL); - } - } else if (UseSharedSpaces) { - // UseSharedSpaces is on by default. With compressed oops, we turn it off. - FLAG_SET_DEFAULT(UseSharedSpaces, false); - } -#endif -} - void Arguments::set_tiered_flags() { + // With tiered, set default policy to AdvancedThresholdPolicy, which is 3. if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) { - FLAG_SET_DEFAULT(CompilationPolicyChoice, 2); + FLAG_SET_DEFAULT(CompilationPolicyChoice, 3); } if (CompilationPolicyChoice < 2) { vm_exit_during_initialization( @@ -1127,40 +1120,28 @@ set_parnew_gc_flags(); } + // MaxHeapSize is aligned down in collectorPolicy + size_t max_heap = align_size_down(MaxHeapSize, + CardTableRS::ct_max_alignment_constraint()); + // Now make adjustments for CMS - size_t young_gen_per_worker; - intx new_ratio; - size_t min_new_default; - intx tenuring_default; - if (CMSUseOldDefaults) { // old defaults: "old" as of 6.0 - if FLAG_IS_DEFAULT(CMSYoungGenPerWorker) { - FLAG_SET_ERGO(intx, CMSYoungGenPerWorker, 4*M); - } - young_gen_per_worker = 4*M; - new_ratio = (intx)15; - min_new_default = 4*M; - tenuring_default = (intx)0; - } else { // new defaults: "new" as of 6.0 - young_gen_per_worker = CMSYoungGenPerWorker; - new_ratio = (intx)7; - min_new_default = 16*M; - tenuring_default = (intx)4; - } - - // Preferred young gen size for "short" pauses + intx tenuring_default = (intx)6; + size_t young_gen_per_worker = CMSYoungGenPerWorker; + + // Preferred young gen size for "short" pauses: + // upper bound depends on # of threads and NewRatio. const uintx parallel_gc_threads = (ParallelGCThreads == 0 ? 1 : ParallelGCThreads); const size_t preferred_max_new_size_unaligned = - ScaleForWordSize(young_gen_per_worker * parallel_gc_threads); - const size_t preferred_max_new_size = + MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * parallel_gc_threads)); + size_t preferred_max_new_size = align_size_up(preferred_max_new_size_unaligned, os::vm_page_size()); // Unless explicitly requested otherwise, size young gen - // for "short" pauses ~ 4M*ParallelGCThreads + // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads // If either MaxNewSize or NewRatio is set on the command line, // assume the user is trying to set the size of the young gen. - if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) { // Set MaxNewSize to our calculated preferred_max_new_size unless @@ -1173,49 +1154,13 @@ } if (PrintGCDetails && Verbose) { // Too early to use gclog_or_tty - tty->print_cr("Ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize); + tty->print_cr("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize); } - // Unless explicitly requested otherwise, prefer a large - // Old to Young gen size so as to shift the collection load - // to the old generation concurrent collector - - // If this is only guarded by FLAG_IS_DEFAULT(NewRatio) - // then NewSize and OldSize may be calculated. That would - // generally lead to some differences with ParNewGC for which - // there was no obvious reason. Also limit to the case where - // MaxNewSize has not been set. - - FLAG_SET_ERGO(intx, NewRatio, MAX2(NewRatio, new_ratio)); - // Code along this path potentially sets NewSize and OldSize - // Calculate the desired minimum size of the young gen but if - // NewSize has been set on the command line, use it here since - // it should be the final value. - size_t min_new; - if (FLAG_IS_DEFAULT(NewSize)) { - min_new = align_size_up(ScaleForWordSize(min_new_default), - os::vm_page_size()); - } else { - min_new = NewSize; - } - size_t prev_initial_size = InitialHeapSize; - if (prev_initial_size != 0 && prev_initial_size < min_new + OldSize) { - FLAG_SET_ERGO(uintx, InitialHeapSize, min_new + OldSize); - // Currently minimum size and the initial heap sizes are the same. - set_min_heap_size(InitialHeapSize); - if (PrintGCDetails && Verbose) { - warning("Initial heap size increased to " SIZE_FORMAT " M from " - SIZE_FORMAT " M; use -XX:NewSize=... for finer control.", - InitialHeapSize/M, prev_initial_size/M); - } - } - - // MaxHeapSize is aligned down in collectorPolicy - size_t max_heap = - align_size_down(MaxHeapSize, - CardTableRS::ct_max_alignment_constraint()); + assert(max_heap >= InitialHeapSize, "Error"); + assert(max_heap >= NewSize, "Error"); if (PrintGCDetails && Verbose) { // Too early to use gclog_or_tty @@ -1224,7 +1169,11 @@ " max_heap: " SIZE_FORMAT, min_heap_size(), InitialHeapSize, max_heap); } - if (max_heap > min_new) { + size_t min_new = preferred_max_new_size; + if (FLAG_IS_CMDLINE(NewSize)) { + min_new = NewSize; + } + if (max_heap > min_new && min_heap_size() > min_new) { // Unless explicitly requested otherwise, make young gen // at least min_new, and at most preferred_max_new_size. if (FLAG_IS_DEFAULT(NewSize)) { @@ -1232,18 +1181,17 @@ FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize)); if (PrintGCDetails && Verbose) { // Too early to use gclog_or_tty - tty->print_cr("Ergo set NewSize: " SIZE_FORMAT, NewSize); + tty->print_cr("CMS ergo set NewSize: " SIZE_FORMAT, NewSize); } } // Unless explicitly requested otherwise, size old gen - // so that it's at least 3X of NewSize to begin with; - // later NewRatio will decide how it grows; see above. + // so it's NewRatio x of NewSize. if (FLAG_IS_DEFAULT(OldSize)) { if (max_heap > NewSize) { - FLAG_SET_ERGO(uintx, OldSize, MIN2(3*NewSize, max_heap - NewSize)); + FLAG_SET_ERGO(uintx, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize)); if (PrintGCDetails && Verbose) { // Too early to use gclog_or_tty - tty->print_cr("Ergo set OldSize: " SIZE_FORMAT, OldSize); + tty->print_cr("CMS ergo set OldSize: " SIZE_FORMAT, OldSize); } } } @@ -1387,7 +1335,7 @@ void Arguments::set_ergonomics_flags() { // Parallel GC is not compatible with sharing. If one specifies // that they want sharing explicitly, do not set ergonomics flags. - if (DumpSharedSpaces || ForceSharedSpaces) { + if (DumpSharedSpaces || RequireSharedSpaces) { return; } @@ -1662,6 +1610,9 @@ void Arguments::process_java_launcher_argument(const char* launcher, void* extra_info) { _sun_java_launcher = strdup(launcher); + if (strcmp("gamma", _sun_java_launcher) == 0) { + _created_by_gamma_launcher = true; + } } bool Arguments::created_by_java_launcher() { @@ -1669,6 +1620,10 @@ return strcmp(DEFAULT_JAVA_LAUNCHER, _sun_java_launcher) != 0; } +bool Arguments::created_by_gamma_launcher() { + return _created_by_gamma_launcher; +} + //=========================================================================================================== // Parsing of main arguments @@ -1687,13 +1642,13 @@ } bool Arguments::verify_min_value(intx val, intx min, const char* name) { - // Returns true if given value is greater than specified min threshold + // Returns true if given value is at least specified min threshold // false, otherwise. if (val >= min ) { return true; } jio_fprintf(defaultStream::error_stream(), - "%s of " INTX_FORMAT " is invalid; must be greater than " INTX_FORMAT "\n", + "%s of " INTX_FORMAT " is invalid; must be at least " INTX_FORMAT "\n", name, val, min); return false; } @@ -1843,33 +1798,6 @@ status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit"); - // Check whether user-specified sharing option conflicts with GC or page size. - // Both sharing and large pages are enabled by default on some platforms; - // large pages override sharing only if explicitly set on the command line. - const bool cannot_share = UseConcMarkSweepGC || CMSIncrementalMode || - UseG1GC || UseParNewGC || UseParallelGC || UseParallelOldGC || - UseLargePages && FLAG_IS_CMDLINE(UseLargePages); - if (cannot_share) { - // Either force sharing on by forcing the other options off, or - // force sharing off. - if (DumpSharedSpaces || ForceSharedSpaces) { - jio_fprintf(defaultStream::error_stream(), - "Using Serial GC and default page size because of %s\n", - ForceSharedSpaces ? "-Xshare:on" : "-Xshare:dump"); - force_serial_gc(); - FLAG_SET_DEFAULT(UseLargePages, false); - } else { - if (UseSharedSpaces && Verbose) { - jio_fprintf(defaultStream::error_stream(), - "Turning off use of shared archive because of " - "choice of garbage collector or large pages\n"); - } - no_shared_spaces(); - } - } else if (UseLargePages && (UseSharedSpaces || DumpSharedSpaces)) { - FLAG_SET_DEFAULT(UseLargePages, false); - } - status = status && check_gc_consistency(); status = status && check_stack_pages(); @@ -1947,6 +1875,8 @@ status = false; } + status = status && verify_min_value(ParGCArrayScanChunk, 1, "ParGCArrayScanChunk"); + #ifndef SERIALGC if (UseG1GC) { status = status && verify_percentage(InitiatingHeapOccupancyPercent, @@ -2410,9 +2340,6 @@ } else if (match_option(option, "-Xshare:on", &tail)) { FLAG_SET_CMDLINE(bool, UseSharedSpaces, true); FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true); -#ifdef TIERED - FLAG_SET_CMDLINE(bool, ForceSharedSpaces, true); -#endif // TIERED // -Xshare:auto } else if (match_option(option, "-Xshare:auto", &tail)) { FLAG_SET_CMDLINE(bool, UseSharedSpaces, true); @@ -2832,10 +2759,6 @@ if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) { FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1); } - // Temporary disable bulk zeroing reduction with G1. See CR 6627983. - if (UseG1GC) { - FLAG_SET_DEFAULT(ReduceBulkZeroing, false); - } #endif // If we are running in a headless jre, force java.awt.headless property @@ -2949,6 +2872,52 @@ return JNI_OK; } +void Arguments::set_shared_spaces_flags() { + const bool must_share = DumpSharedSpaces || RequireSharedSpaces; + const bool might_share = must_share || UseSharedSpaces; + + // The string table is part of the shared archive so the size must match. + if (!FLAG_IS_DEFAULT(StringTableSize)) { + // Disable sharing. + if (must_share) { + warning("disabling shared archive %s because of non-default " + "StringTableSize", DumpSharedSpaces ? "creation" : "use"); + } + if (might_share) { + FLAG_SET_DEFAULT(DumpSharedSpaces, false); + FLAG_SET_DEFAULT(RequireSharedSpaces, false); + FLAG_SET_DEFAULT(UseSharedSpaces, false); + } + return; + } + + // Check whether class data sharing settings conflict with GC, compressed oops + // or page size, and fix them up. Explicit sharing options override other + // settings. + const bool cannot_share = UseConcMarkSweepGC || CMSIncrementalMode || + UseG1GC || UseParNewGC || UseParallelGC || UseParallelOldGC || + UseCompressedOops || UseLargePages && FLAG_IS_CMDLINE(UseLargePages); + if (cannot_share) { + if (must_share) { + warning("selecting serial gc and disabling large pages %s" + "because of %s", "" LP64_ONLY("and compressed oops "), + DumpSharedSpaces ? "-Xshare:dump" : "-Xshare:on"); + force_serial_gc(); + FLAG_SET_CMDLINE(bool, UseLargePages, false); + LP64_ONLY(FLAG_SET_CMDLINE(bool, UseCompressedOops, false)); + } else { + if (UseSharedSpaces && Verbose) { + warning("turning off use of shared archive because of " + "choice of garbage collector or large pages"); + } + no_shared_spaces(); + } + } else if (UseLargePages && might_share) { + // Disable large pages to allow shared spaces. This is sub-optimal, since + // there may not even be a shared archive to use. + FLAG_SET_DEFAULT(UseLargePages, false); + } +} // Parse entry point called from JNI_CreateJavaVM @@ -3059,21 +3028,34 @@ } #endif // PRODUCT - if (EnableInvokeDynamic && !EnableMethodHandles) { - if (!FLAG_IS_DEFAULT(EnableMethodHandles)) { - warning("forcing EnableMethodHandles true because EnableInvokeDynamic is true"); + // Transitional + if (EnableMethodHandles || AnonymousClasses) { + if (!EnableInvokeDynamic && !FLAG_IS_DEFAULT(EnableInvokeDynamic)) { + warning("EnableMethodHandles and AnonymousClasses are obsolete. Keeping EnableInvokeDynamic disabled."); + } else { + EnableInvokeDynamic = true; } - EnableMethodHandles = true; } - if (EnableMethodHandles && !AnonymousClasses) { - if (!FLAG_IS_DEFAULT(AnonymousClasses)) { - warning("forcing AnonymousClasses true because EnableMethodHandles is true"); + + // JSR 292 is not supported before 1.7 + if (!JDK_Version::is_gte_jdk17x_version()) { + if (EnableInvokeDynamic) { + if (!FLAG_IS_DEFAULT(EnableInvokeDynamic)) { + warning("JSR 292 is not supported before 1.7. Disabling support."); + } + EnableInvokeDynamic = false; } - AnonymousClasses = true; } - if ((EnableMethodHandles || AnonymousClasses) && ScavengeRootsInCode == 0) { + + if (EnableInvokeDynamic && ScavengeRootsInCode == 0) { if (!FLAG_IS_DEFAULT(ScavengeRootsInCode)) { - warning("forcing ScavengeRootsInCode non-zero because EnableMethodHandles or AnonymousClasses is true"); + warning("forcing ScavengeRootsInCode non-zero because EnableInvokeDynamic is true"); + } + ScavengeRootsInCode = 1; + } + if (!JavaObjectsInPerm && ScavengeRootsInCode == 0) { + if (!FLAG_IS_DEFAULT(ScavengeRootsInCode)) { + warning("forcing ScavengeRootsInCode non-zero because JavaObjectsInPerm is false"); } ScavengeRootsInCode = 1; } @@ -3096,9 +3078,7 @@ // Set flags based on ergonomics. set_ergonomics_flags(); - if (UseCompressedOops) { - check_compressed_oops_compat(); - } + set_shared_spaces_flags(); // Check the GC selections again. if (!check_gc_consistency()) { @@ -3116,22 +3096,17 @@ } #ifndef KERNEL - if (UseConcMarkSweepGC) { - // Set flags for CMS and ParNew. Check UseConcMarkSweep first - // to ensure that when both UseConcMarkSweepGC and UseParNewGC - // are true, we don't call set_parnew_gc_flags() as well. + // Set heap size based on available physical memory + set_heap_size(); + // Set per-collector flags + if (UseParallelGC || UseParallelOldGC) { + set_parallel_gc_flags(); + } else if (UseConcMarkSweepGC) { // should be done before ParNew check below set_cms_and_parnew_gc_flags(); - } else { - // Set heap size based on available physical memory - set_heap_size(); - // Set per-collector flags - if (UseParallelGC || UseParallelOldGC) { - set_parallel_gc_flags(); - } else if (UseParNewGC) { - set_parnew_gc_flags(); - } else if (UseG1GC) { - set_g1_gc_flags(); - } + } else if (UseParNewGC) { // skipped if CMS is set above + set_parnew_gc_flags(); + } else if (UseG1GC) { + set_g1_gc_flags(); } #endif // KERNEL @@ -3148,7 +3123,11 @@ // Turn off biased locking for locking debug mode flags, // which are subtlely different from each other but neither works with // biased locking. - if (!UseFastLocking || UseHeavyMonitors) { + if (UseHeavyMonitors +#ifdef COMPILER1 + || !UseFastLocking +#endif // COMPILER1 + ) { if (!FLAG_IS_DEFAULT(UseBiasedLocking) && UseBiasedLocking) { // flag set to true on command line; warn the user that they // can't enable biased locking here @@ -3197,6 +3176,16 @@ } } + // set PauseAtExit if the gamma launcher was used and a debugger is attached + // but only if not already set on the commandline + if (Arguments::created_by_gamma_launcher() && os::is_debugger_attached()) { + bool set = false; + CommandLineFlags::wasSetOnCmdline("PauseAtExit", &set); + if (!set) { + FLAG_SET_DEFAULT(PauseAtExit, true); + } + } + return JNI_OK; }
--- a/src/share/vm/runtime/arguments.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/arguments.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -260,6 +260,9 @@ // sun.java.launcher.pid, private property static int _sun_java_launcher_pid; + // was this VM created by the gamma launcher + static bool _created_by_gamma_launcher; + // Option flags static bool _has_profile; static bool _has_alloc_profile; @@ -301,8 +304,6 @@ // Tiered static void set_tiered_flags(); - // Check compressed oops compatibility with other flags - static void check_compressed_oops_compat(); // CMS/ParNew garbage collectors static void set_parnew_gc_flags(); static void set_cms_and_parnew_gc_flags(); @@ -312,6 +313,7 @@ static void set_g1_gc_flags(); // GC ergonomics static void set_ergonomics_flags(); + static void set_shared_spaces_flags(); // Setup heap size static void set_heap_size(); // Based on automatic selection criteria, should the @@ -450,6 +452,8 @@ static const char* sun_java_launcher() { return _sun_java_launcher; } // Was VM created by a Java launcher? static bool created_by_java_launcher(); + // Was VM created by the gamma Java launcher? + static bool created_by_gamma_launcher(); // -Dsun.java.launcher.pid static int sun_java_launcher_pid() { return _sun_java_launcher_pid; }
--- a/src/share/vm/runtime/compilationPolicy.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/compilationPolicy.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,7 @@ #include "oops/methodOop.hpp" #include "oops/oop.inline.hpp" #include "prims/nativeLookup.hpp" +#include "runtime/advancedThresholdPolicy.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/frame.hpp" #include "runtime/handles.inline.hpp" @@ -72,8 +73,15 @@ Unimplemented(); #endif break; + case 3: +#ifdef TIERED + CompilationPolicy::set_policy(new AdvancedThresholdPolicy()); +#else + Unimplemented(); +#endif + break; default: - fatal("CompilationPolicyChoice must be in the range: [0-2]"); + fatal("CompilationPolicyChoice must be in the range: [0-3]"); } CompilationPolicy::policy()->initialize(); } @@ -388,8 +396,6 @@ // SimpleCompPolicy - compile current method void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) { - assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now."); - int hot_count = m->invocation_count(); reset_counter_for_invocation_event(m); const char* comment = "count"; @@ -405,8 +411,6 @@ } void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) { - assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now."); - int hot_count = m->backedge_count(); const char* comment = "backedge_count"; @@ -424,8 +428,6 @@ // Consider m for compilation void StackWalkCompPolicy::method_invocation_event(methodHandle m, TRAPS) { - assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now."); - int hot_count = m->invocation_count(); reset_counter_for_invocation_event(m); const char* comment = "count"; @@ -465,8 +467,6 @@ } void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) { - assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now."); - int hot_count = m->backedge_count(); const char* comment = "backedge_count";
--- a/src/share/vm/runtime/deoptimization.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/deoptimization.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -101,9 +101,9 @@ _frame_pcs = frame_pcs; _register_block = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2); _return_type = return_type; + _initial_fp = 0; // PD (x86 only) _counter_temp = 0; - _initial_fp = 0; _unpack_kind = 0; _sender_sp_temp = 0; @@ -463,18 +463,9 @@ frame_sizes, frame_pcs, return_type); -#if defined(IA32) || defined(AMD64) - // We need a way to pass fp to the unpacking code so the skeletal frames - // come out correct. This is only needed for x86 because of c2 using ebp - // as an allocatable register. So this update is useless (and harmless) - // on the other platforms. It would be nice to do this in a different - // way but even the old style deoptimization had a problem with deriving - // this value. NEEDS_CLEANUP - // Note: now that c1 is using c2's deopt blob we must do this on all - // x86 based platforms - intptr_t** fp_addr = (intptr_t**) (((address)info) + info->initial_fp_offset_in_bytes()); - *fp_addr = array->sender().fp(); // was adapter_caller -#endif /* IA32 || AMD64 */ + // On some platforms, we need a way to pass fp to the unpacking code + // so the skeletal frames come out correct. + info->set_initial_fp((intptr_t) array->sender().fp()); if (array->frames() > 1) { if (VerifyStack && TraceDeoptimization) {
--- a/src/share/vm/runtime/deoptimization.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/deoptimization.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -136,12 +136,12 @@ address* _frame_pcs; // Array of frame pc's, in bytes, for unrolling the stack intptr_t* _register_block; // Block for storing callee-saved registers. BasicType _return_type; // Tells if we have to restore double or long return value + intptr_t _initial_fp; // FP of the sender frame // The following fields are used as temps during the unpacking phase // (which is tight on registers, especially on x86). They really ought // to be PD variables but that involves moving this class into its own // file to use the pd include mechanism. Maybe in a later cleanup ... intptr_t _counter_temp; // SHOULD BE PD VARIABLE (x86 frame count temp) - intptr_t _initial_fp; // SHOULD BE PD VARIABLE (x86/c2 initial ebp) intptr_t _unpack_kind; // SHOULD BE PD VARIABLE (x86 unpack kind) intptr_t _sender_sp_temp; // SHOULD BE PD VARIABLE (x86 sender_sp) public: @@ -165,6 +165,8 @@ // Returns the total size of frames int size_of_frames() const; + void set_initial_fp(intptr_t fp) { _initial_fp = fp; } + // Accessors used by the code generator for the unpack stub. static int size_of_deoptimized_frame_offset_in_bytes() { return offset_of(UnrollBlock, _size_of_deoptimized_frame); } static int caller_adjustment_offset_in_bytes() { return offset_of(UnrollBlock, _caller_adjustment); }
--- a/src/share/vm/runtime/dtraceJSDT.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/dtraceJSDT.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/fieldDescriptor.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/fieldDescriptor.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/fieldType.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/fieldType.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/fieldType.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/fieldType.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/fprofiler.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/fprofiler.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/fprofiler.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/fprofiler.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/frame.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/frame.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/frame.inline.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/frame.inline.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/globals.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/globals.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -63,6 +63,12 @@ bool Flag::is_unlocked() const { if (strcmp(kind, "{diagnostic}") == 0) { + if (strcmp(name, "EnableInvokeDynamic") == 0 && UnlockExperimentalVMOptions && !UnlockDiagnosticVMOptions) { + // transitional logic to allow tests to run until they are changed + static int warned; + if (++warned == 1) warning("Use -XX:+UnlockDiagnosticVMOptions before EnableInvokeDynamic flag"); + return true; + } return UnlockDiagnosticVMOptions; } else if (strcmp(kind, "{experimental}") == 0 || strcmp(kind, "{C2 experimental}") == 0) {
--- a/src/share/vm/runtime/globals.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/globals.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1224,6 +1224,11 @@ "Decay time (in milliseconds) to re-enable bulk rebiasing of a " \ "type after previous bulk rebias") \ \ + develop(bool, JavaObjectsInPerm, false, \ + "controls whether Classes and interned Strings are allocated" \ + "in perm. This purely intended to allow debugging issues" \ + "in production.") \ + \ /* tracing */ \ \ notproduct(bool, TraceRuntimeCalls, false, \ @@ -1543,12 +1548,8 @@ product(bool, AlwaysPreTouch, false, \ "It forces all freshly committed pages to be pre-touched.") \ \ - product(bool, CMSUseOldDefaults, false, \ - "A flag temporarily introduced to allow reverting to some " \ - "older default settings; older as of 6.0") \ - \ - product(intx, CMSYoungGenPerWorker, 16*M, \ - "The amount of young gen chosen by default per GC worker " \ + product_pd(intx, CMSYoungGenPerWorker, \ + "The maximum size of young gen chosen by default per GC worker " \ "thread available") \ \ product(bool, GCOverheadReporting, false, \ @@ -1926,7 +1927,7 @@ experimental(intx, WorkStealingSleepMillis, 1, \ "Sleep time when sleep is used for yields") \ \ - experimental(uintx, WorkStealingYieldsBeforeSleep, 1000, \ + experimental(uintx, WorkStealingYieldsBeforeSleep, 5000, \ "Number of yields before a sleep is done during workstealing") \ \ experimental(uintx, WorkStealingHardSpins, 4096, \ @@ -2379,6 +2380,9 @@ develop(intx, CICloneLoopTestLimit, 100, \ "size limit for blocks heuristically cloned in ciTypeFlow") \ \ + develop(intx, OSROnlyBCI, -1, \ + "OSR only at this bci. Negative values mean exclude that bci") \ + \ /* temp diagnostics */ \ \ diagnostic(bool, TraceRedundantCompiles, false, \ @@ -2613,9 +2617,6 @@ develop(bool, CompileTheWorldPreloadClasses, true, \ "Preload all classes used by a class before start loading") \ \ - notproduct(bool, CompileTheWorldIgnoreInitErrors, false, \ - "Compile all methods although class initializer failed") \ - \ notproduct(intx, CompileTheWorldSafepointInterval, 100, \ "Force a safepoint every n compiles so sweeper can keep up") \ \ @@ -3659,9 +3660,6 @@ product(bool, RequireSharedSpaces, false, \ "Require shared spaces in the permanent generation") \ \ - product(bool, ForceSharedSpaces, false, \ - "Require shared spaces in the permanent generation") \ - \ product(bool, DumpSharedSpaces, false, \ "Special mode: JVM reads a class list, loads classes, builds " \ "shared spaces, and dumps the shared spaces to a file to be " \ @@ -3698,11 +3696,15 @@ "Skip assert() and verify() which page-in unwanted shared " \ "objects. ") \ \ + diagnostic(bool, EnableInvokeDynamic, true, \ + "support JSR 292 (method handles, invokedynamic, " \ + "anonymous classes") \ + \ product(bool, AnonymousClasses, false, \ - "support sun.misc.Unsafe.defineAnonymousClass") \ + "support sun.misc.Unsafe.defineAnonymousClass (deprecated)") \ \ experimental(bool, EnableMethodHandles, false, \ - "support method handles (true by default under JSR 292)") \ + "support method handles (deprecated)") \ \ diagnostic(intx, MethodHandlePushLimit, 3, \ "number of additional stack slots a method handle may push") \ @@ -3719,11 +3721,9 @@ experimental(bool, TrustFinalNonStaticFields, false, \ "trust final non-static declarations for constant folding") \ \ - experimental(bool, EnableInvokeDynamic, false, \ - "recognize the invokedynamic instruction") \ - \ - experimental(bool, AllowTransitionalJSR292, true, \ - "recognize pre-PFD formats of invokedynamic") \ + experimental(bool, AllowInvokeGeneric, true, \ + "accept MethodHandle.invoke and MethodHandle.invokeGeneric " \ + "as equivalent methods") \ \ develop(bool, TraceInvokeDynamic, false, \ "trace internal invoke dynamic operations") \ @@ -3736,6 +3736,9 @@ "The file to create and for whose removal to await when pausing " \ "at startup. (default: ./vm.paused.<pid>)") \ \ + diagnostic(bool, PauseAtExit, false, \ + "Pause and wait for keypress on exit if a debugger is attached") \ + \ product(bool, ExtendedDTraceProbes, false, \ "Enable performance-impacting dtrace probes") \ \ @@ -3754,6 +3757,9 @@ diagnostic(bool, PrintDTraceDOF, false, \ "Print the DTrace DOF passed to the system for JSDT probes") \ \ + product(uintx, StringTableSize, 1009, \ + "Number of buckets in the interned String table") \ + \ product(bool, UseVMInterruptibleIO, false, \ "(Unstable, Solaris-specific) Thread interrupt before or with " \ "EINTR for I/O operations results in OS_INTRPT. The default value"\
--- a/src/share/vm/runtime/handles.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/handles.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/icache.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/icache.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/interfaceSupport.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/interfaceSupport.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/interfaceSupport.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/interfaceSupport.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/java.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/java.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -551,6 +551,7 @@ void vm_direct_exit(int code) { notify_vm_shutdown(); + os::wait_for_keypress_at_exit(); ::exit(code); } @@ -577,11 +578,13 @@ void vm_shutdown() { vm_perform_shutdown_actions(); + os::wait_for_keypress_at_exit(); os::shutdown(); } void vm_abort(bool dump_core) { vm_perform_shutdown_actions(); + os::wait_for_keypress_at_exit(); os::abort(dump_core); ShouldNotReachHere(); }
--- a/src/share/vm/runtime/javaCalls.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/javaCalls.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/javaCalls.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/javaCalls.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/javaFrameAnchor.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/javaFrameAnchor.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/jniHandles.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/jniHandles.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/objectMonitor.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/objectMonitor.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/os.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/os.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -633,10 +633,10 @@ *q = (u_char)freeBlockPad; } if (PrintMalloc && tty != NULL) - fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (intptr_t)memblock); + fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)memblock); } else if (PrintMalloc && tty != NULL) { // tty->print_cr("os::free %p", memblock); - fprintf(stderr, "os::free " PTR_FORMAT "\n", (intptr_t)memblock); + fprintf(stderr, "os::free " PTR_FORMAT "\n", (uintptr_t)memblock); } #endif ::free((char*)memblock - space_before); @@ -1079,11 +1079,6 @@ "%/lib/jsse.jar:" "%/lib/jce.jar:" "%/lib/charsets.jar:" - - // ## TEMPORARY hack to keep the legacy launcher working when - // ## only the boot module is installed (cf. j.l.ClassLoader) - "%/lib/modules/jdk.boot.jar:" - "%/classes"; char* sysclasspath = format_boot_path(classpath_format, home, home_len, fileSep, pathSep); if (sysclasspath == NULL) return false; @@ -1296,3 +1291,41 @@ } return result; } + +// Read file line by line, if line is longer than bsize, +// skip rest of line. +int os::get_line_chars(int fd, char* buf, const size_t bsize){ + size_t sz, i = 0; + + // read until EOF, EOL or buf is full + while ((sz = (int) read(fd, &buf[i], 1)) == 1 && i < (bsize-1) && buf[i] != '\n') { + ++i; + } + + if (buf[i] == '\n') { + // EOL reached so ignore EOL character and return + + buf[i] = 0; + return (int) i; + } + + buf[i+1] = 0; + + if (sz != 1) { + // EOF reached. if we read chars before EOF return them and + // return EOF on next call otherwise return EOF + + return (i == 0) ? -1 : (int) i; + } + + // line is longer than size of buf, skip to EOL + int ch; + while (read(fd, &ch, 1) == 1 && ch != '\n') { + // Do nothing + } + + // return initial part of line that fits in buf. + // If we reached EOF, it will be returned on next call. + + return (int) i; +}
--- a/src/share/vm/runtime/os.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/os.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -492,6 +492,12 @@ static void print_location(outputStream* st, intptr_t x, bool verbose = false); static size_t lasterror(char *buf, size_t len); + // Determines whether the calling process is being debugged by a user-mode debugger. + static bool is_debugger_attached(); + + // wait for a key press if PauseAtExit is set + static void wait_for_keypress_at_exit(void); + // The following two functions are used by fatal error handler to trace // native (C) frames. They are not part of frame.hpp/frame.cpp because // frame.hpp/cpp assume thread is JavaThread, and also because different @@ -652,6 +658,10 @@ // Hook for os specific jvm options that we don't want to abort on seeing static bool obsolete_option(const JavaVMOption *option); + // Read file line by line. If line is longer than bsize, + // rest of line is skipped. Returns number of bytes read or -1 on EOF + static int get_line_chars(int fd, char *buf, const size_t bsize); + // Platform dependent stuff #ifdef TARGET_OS_FAMILY_linux # include "os_linux.hpp"
--- a/src/share/vm/runtime/osThread.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/osThread.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,7 +65,7 @@ OSThreadStartFunc _start_proc; // Thread start routine void* _start_parm; // Thread start routine parameter volatile ThreadState _state; // Thread state *hint* - jint _interrupted; // Thread.isInterrupted state + volatile jint _interrupted; // Thread.isInterrupted state // Note: _interrupted must be jint, so that Java intrinsics can access it. // The value stored there must be either 0 or 1. It must be possible @@ -89,7 +89,7 @@ void* start_parm() const { return _start_parm; } void set_start_parm(void* start_parm) { _start_parm = start_parm; } - bool interrupted() const { return _interrupted != 0; } + volatile bool interrupted() const { return _interrupted != 0; } void set_interrupted(bool z) { _interrupted = z ? 1 : 0; } // Printing
--- a/src/share/vm/runtime/reflection.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/reflection.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -649,7 +649,7 @@ if (TraceClassResolution) { trace_class_resolution(k); } - return k->klass_part()->java_mirror(); + return k->java_mirror(); }; }
--- a/src/share/vm/runtime/reflection.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/reflection.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/reflectionUtils.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/reflectionUtils.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/registerMap.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/registerMap.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/rframe.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/rframe.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/safepoint.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/safepoint.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/serviceThread.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/serviceThread.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -70,11 +70,10 @@ java_lang_Thread::set_priority(thread_oop(), NearMaxPriority); java_lang_Thread::set_daemon(thread_oop()); thread->set_threadObj(thread_oop()); + _instance = thread; Threads::add(thread); Thread::start(thread); - - _instance = thread; } }
--- a/src/share/vm/runtime/sharedRuntime.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/sharedRuntime.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -431,25 +431,24 @@ // previous frame depending on the return address. address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) { - assert(frame::verify_return_pc(return_address), "must be a return pc"); - - // Reset MethodHandle flag. + assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address)); + + // Reset method handle flag. thread->set_is_method_handle_return(false); - // the fastest case first + // The fastest case first CodeBlob* blob = CodeCache::find_blob(return_address); - if (blob != NULL && blob->is_nmethod()) { - nmethod* code = (nmethod*)blob; - assert(code != NULL, "nmethod must be present"); - // Check if the return address is a MethodHandle call site. - thread->set_is_method_handle_return(code->is_method_handle_return(return_address)); + nmethod* nm = (blob != NULL) ? blob->as_nmethod_or_null() : NULL; + if (nm != NULL) { + // Set flag if return address is a method handle call site. + thread->set_is_method_handle_return(nm->is_method_handle_return(return_address)); // native nmethods don't have exception handlers - assert(!code->is_native_method(), "no exception handler"); - assert(code->header_begin() != code->exception_begin(), "no exception handler"); - if (code->is_deopt_pc(return_address)) { + assert(!nm->is_native_method(), "no exception handler"); + assert(nm->header_begin() != nm->exception_begin(), "no exception handler"); + if (nm->is_deopt_pc(return_address)) { return SharedRuntime::deopt_blob()->unpack_with_exception(); } else { - return code->exception_begin(); + return nm->exception_begin(); } } @@ -462,22 +461,9 @@ return Interpreter::rethrow_exception_entry(); } - // Compiled code - if (CodeCache::contains(return_address)) { - CodeBlob* blob = CodeCache::find_blob(return_address); - if (blob->is_nmethod()) { - nmethod* code = (nmethod*)blob; - assert(code != NULL, "nmethod must be present"); - // Check if the return address is a MethodHandle call site. - thread->set_is_method_handle_return(code->is_method_handle_return(return_address)); - assert(code->header_begin() != code->exception_begin(), "no exception handler"); - return code->exception_begin(); - } - if (blob->is_runtime_stub()) { - ShouldNotReachHere(); // callers are responsible for skipping runtime stub frames - } - } + guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub"); guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!"); + #ifndef PRODUCT { ResourceMark rm; tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address); @@ -485,6 +471,7 @@ tty->print_cr("b) other problem"); } #endif // PRODUCT + ShouldNotReachHere(); return NULL; } @@ -1707,7 +1694,7 @@ tty->print_cr("WrongMethodType thread="PTR_FORMAT" req="PTR_FORMAT" act="PTR_FORMAT"", thread, required, actual); } - assert(EnableMethodHandles, ""); + assert(EnableInvokeDynamic, ""); oop singleKlass = wrong_method_type_is_for_single_argument(thread, required); char* message = NULL; if (singleKlass != NULL) { @@ -1725,9 +1712,11 @@ message = generate_class_cast_message(objName, targetKlass->external_name()); } else { // %%% need to get the MethodType string, without messing around too much + const char* desc = NULL; // Get a signature from the invoke instruction const char* mhName = "method handle"; const char* targetType = "the required signature"; + int targetArity = -1, mhArity = -1; vframeStream vfst(thread, true); if (!vfst.at_end()) { Bytecode_invoke call(vfst.method(), vfst.bci()); @@ -1741,20 +1730,35 @@ && target->is_method_handle_invoke() && required == target->method_handle_type()) { targetType = target->signature()->as_C_string(); + targetArity = ArgumentCount(target->signature()).size(); } } - klassOop kignore; int fignore; - methodOop actual_method = MethodHandles::decode_method(actual, - kignore, fignore); + klassOop kignore; int dmf_flags = 0; + methodOop actual_method = MethodHandles::decode_method(actual, kignore, dmf_flags); + if ((dmf_flags & ~(MethodHandles::_dmf_has_receiver | + MethodHandles::_dmf_does_dispatch | + MethodHandles::_dmf_from_interface)) != 0) + actual_method = NULL; // MH does extra binds, drops, etc. + bool has_receiver = ((dmf_flags & MethodHandles::_dmf_has_receiver) != 0); if (actual_method != NULL) { - if (methodOopDesc::is_method_handle_invoke_name(actual_method->name())) - mhName = "$"; + mhName = actual_method->signature()->as_C_string(); + mhArity = ArgumentCount(actual_method->signature()).size(); + if (!actual_method->is_static()) mhArity += 1; + } else if (java_lang_invoke_MethodHandle::is_instance(actual)) { + oopDesc* mhType = java_lang_invoke_MethodHandle::type(actual); + mhArity = java_lang_invoke_MethodType::ptype_count(mhType); + stringStream st; + java_lang_invoke_MethodType::print_signature(mhType, &st); + mhName = st.as_string(); + } + if (targetArity != -1 && targetArity != mhArity) { + if (has_receiver && targetArity == mhArity-1) + desc = " cannot be called without a receiver argument as "; else - mhName = actual_method->signature()->as_C_string(); - if (mhName[0] == '$') - mhName = actual_method->signature()->as_C_string(); + desc = " cannot be called with a different arity as "; } message = generate_class_cast_message(mhName, targetType, + desc != NULL ? desc : " cannot be called as "); } if (TraceMethodHandles) { @@ -2504,20 +2508,10 @@ // java compiled calling convention to the native convention, handlizes // arguments, and transitions to native. On return from the native we transition // back to java blocking if a safepoint is in progress. -nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) { +nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method, int compile_id) { ResourceMark rm; nmethod* nm = NULL; - if (PrintCompilation) { - ttyLocker ttyl; - tty->print("--- n%s ", (method->is_synchronized() ? "s" : " ")); - method->print_short_name(tty); - if (method->is_static()) { - tty->print(" (static)"); - } - tty->cr(); - } - assert(method->has_native_function(), "must have something valid to call!"); { @@ -2562,6 +2556,7 @@ // Generate the compiled-to-native wrapper code nm = SharedRuntime::generate_native_wrapper(&_masm, method, + compile_id, total_args_passed, comp_args_on_stack, sig_bt,regs, @@ -2573,6 +2568,10 @@ // Install the generated code. if (nm != NULL) { + if (PrintCompilation) { + ttyLocker ttyl; + CompileTask::print_compilation(tty, nm, method->is_static() ? "(static)" : ""); + } method->set_code(method, nm); nm->post_compiled_method_load_event(); } else {
--- a/src/share/vm/runtime/sharedRuntime.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/sharedRuntime.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -438,6 +438,7 @@ // returns. static nmethod *generate_native_wrapper(MacroAssembler* masm, methodHandle method, + int compile_id, int total_args_passed, int max_arg, BasicType *sig_bt, @@ -659,7 +660,7 @@ static AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry); - static nmethod* create_native_wrapper(methodHandle method); + static nmethod* create_native_wrapper(methodHandle method, int compile_id); static AdapterHandlerEntry* get_adapter(methodHandle method); #ifdef HAVE_DTRACE_H
--- a/src/share/vm/runtime/signature.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/signature.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/signature.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/signature.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/stackValueCollection.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/stackValueCollection.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/statSampler.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/statSampler.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/stubCodeGenerator.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/stubCodeGenerator.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/stubRoutines.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/stubRoutines.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,30 +80,36 @@ jint StubRoutines::_fpu_subnormal_bias2[3] = { 0, 0, 0 }; // Compiled code entry points default values -// The dafault functions don't have separate disjoint versions. +// The default functions don't have separate disjoint versions. address StubRoutines::_jbyte_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jbyte_copy); address StubRoutines::_jshort_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jshort_copy); address StubRoutines::_jint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jint_copy); address StubRoutines::_jlong_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jlong_copy); address StubRoutines::_oop_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy); +address StubRoutines::_oop_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy_uninit); address StubRoutines::_jbyte_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jbyte_copy); address StubRoutines::_jshort_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jshort_copy); address StubRoutines::_jint_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jint_copy); address StubRoutines::_jlong_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jlong_copy); address StubRoutines::_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy); +address StubRoutines::_oop_disjoint_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy_uninit); address StubRoutines::_arrayof_jbyte_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jbyte_copy); address StubRoutines::_arrayof_jshort_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jshort_copy); address StubRoutines::_arrayof_jint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jint_copy); address StubRoutines::_arrayof_jlong_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jlong_copy); address StubRoutines::_arrayof_oop_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy); +address StubRoutines::_arrayof_oop_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy_uninit); address StubRoutines::_arrayof_jbyte_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jbyte_copy); address StubRoutines::_arrayof_jshort_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jshort_copy); address StubRoutines::_arrayof_jint_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jint_copy); address StubRoutines::_arrayof_jlong_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jlong_copy); -address StubRoutines::_arrayof_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy); +address StubRoutines::_arrayof_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy); +address StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy_uninit); + address StubRoutines::_checkcast_arraycopy = NULL; +address StubRoutines::_checkcast_arraycopy_uninit = NULL; address StubRoutines::_unsafe_arraycopy = NULL; address StubRoutines::_generic_arraycopy = NULL; @@ -282,12 +288,12 @@ // Default versions of arraycopy functions // -static void gen_arraycopy_barrier_pre(oop* dest, size_t count) { +static void gen_arraycopy_barrier_pre(oop* dest, size_t count, bool dest_uninitialized) { assert(count != 0, "count should be non-zero"); assert(count <= (size_t)max_intx, "count too large"); BarrierSet* bs = Universe::heap()->barrier_set(); assert(bs->has_write_ref_array_pre_opt(), "Must have pre-barrier opt"); - bs->write_ref_array_pre(dest, (int)count); + bs->write_ref_array_pre(dest, (int)count, dest_uninitialized); } static void gen_arraycopy_barrier(oop* dest, size_t count) { @@ -330,7 +336,17 @@ SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy #endif // !PRODUCT assert(count != 0, "count should be non-zero"); - gen_arraycopy_barrier_pre(dest, count); + gen_arraycopy_barrier_pre(dest, count, /*dest_uninitialized*/false); + Copy::conjoint_oops_atomic(src, dest, count); + gen_arraycopy_barrier(dest, count); +JRT_END + +JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count)) +#ifndef PRODUCT + SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy +#endif // !PRODUCT + assert(count != 0, "count should be non-zero"); + gen_arraycopy_barrier_pre(dest, count, /*dest_uninitialized*/true); Copy::conjoint_oops_atomic(src, dest, count); gen_arraycopy_barrier(dest, count); JRT_END @@ -368,11 +384,20 @@ SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy #endif // !PRODUCT assert(count != 0, "count should be non-zero"); - gen_arraycopy_barrier_pre((oop *) dest, count); + gen_arraycopy_barrier_pre((oop *) dest, count, /*dest_uninitialized*/false); Copy::arrayof_conjoint_oops(src, dest, count); gen_arraycopy_barrier((oop *) dest, count); JRT_END +JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count)) +#ifndef PRODUCT + SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy +#endif // !PRODUCT + assert(count != 0, "count should be non-zero"); + gen_arraycopy_barrier_pre((oop *) dest, count, /*dest_uninitialized*/true); + Copy::arrayof_conjoint_oops(src, dest, count); + gen_arraycopy_barrier((oop *) dest, count); +JRT_END address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) { #define RETURN_STUB(xxx_fill) { \ @@ -408,3 +433,77 @@ #undef RETURN_STUB } + +// constants for computing the copy function +enum { + COPYFUNC_UNALIGNED = 0, + COPYFUNC_ALIGNED = 1, // src, dest aligned to HeapWordSize + COPYFUNC_CONJOINT = 0, + COPYFUNC_DISJOINT = 2 // src != dest, or transfer can descend +}; + +// Note: The condition "disjoint" applies also for overlapping copies +// where an descending copy is permitted (i.e., dest_offset <= src_offset). +address +StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) { + int selector = + (aligned ? COPYFUNC_ALIGNED : COPYFUNC_UNALIGNED) + + (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT); + +#define RETURN_STUB(xxx_arraycopy) { \ + name = #xxx_arraycopy; \ + return StubRoutines::xxx_arraycopy(); } + +#define RETURN_STUB_PARM(xxx_arraycopy, parm) { \ + name = #xxx_arraycopy; \ + return StubRoutines::xxx_arraycopy(parm); } + + switch (t) { + case T_BYTE: + case T_BOOLEAN: + switch (selector) { + case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_arraycopy); + case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jbyte_disjoint_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jbyte_disjoint_arraycopy); + } + case T_CHAR: + case T_SHORT: + switch (selector) { + case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_arraycopy); + case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jshort_disjoint_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jshort_disjoint_arraycopy); + } + case T_INT: + case T_FLOAT: + switch (selector) { + case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_arraycopy); + case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jint_disjoint_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jint_disjoint_arraycopy); + } + case T_DOUBLE: + case T_LONG: + switch (selector) { + case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_arraycopy); + case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(jlong_disjoint_arraycopy); + case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_jlong_disjoint_arraycopy); + } + case T_ARRAY: + case T_OBJECT: + switch (selector) { + case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized); + case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized); + case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized); + case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized); + } + default: + ShouldNotReachHere(); + return NULL; + } + +#undef RETURN_STUB +#undef RETURN_STUB_PARM +}
--- a/src/share/vm/runtime/stubRoutines.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/stubRoutines.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -164,12 +164,12 @@ static address _jshort_arraycopy; static address _jint_arraycopy; static address _jlong_arraycopy; - static address _oop_arraycopy; + static address _oop_arraycopy, _oop_arraycopy_uninit; static address _jbyte_disjoint_arraycopy; static address _jshort_disjoint_arraycopy; static address _jint_disjoint_arraycopy; static address _jlong_disjoint_arraycopy; - static address _oop_disjoint_arraycopy; + static address _oop_disjoint_arraycopy, _oop_disjoint_arraycopy_uninit; // arraycopy operands aligned on zero'th element boundary // These are identical to the ones aligned aligned on an @@ -179,15 +179,15 @@ static address _arrayof_jshort_arraycopy; static address _arrayof_jint_arraycopy; static address _arrayof_jlong_arraycopy; - static address _arrayof_oop_arraycopy; + static address _arrayof_oop_arraycopy, _arrayof_oop_arraycopy_uninit; static address _arrayof_jbyte_disjoint_arraycopy; static address _arrayof_jshort_disjoint_arraycopy; static address _arrayof_jint_disjoint_arraycopy; static address _arrayof_jlong_disjoint_arraycopy; - static address _arrayof_oop_disjoint_arraycopy; + static address _arrayof_oop_disjoint_arraycopy, _arrayof_oop_disjoint_arraycopy_uninit; // these are recommended but optional: - static address _checkcast_arraycopy; + static address _checkcast_arraycopy, _checkcast_arraycopy_uninit; static address _unsafe_arraycopy; static address _generic_arraycopy; @@ -282,30 +282,42 @@ static address addr_fpu_subnormal_bias2() { return (address)&_fpu_subnormal_bias2; } + static address select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized); + static address jbyte_arraycopy() { return _jbyte_arraycopy; } static address jshort_arraycopy() { return _jshort_arraycopy; } static address jint_arraycopy() { return _jint_arraycopy; } static address jlong_arraycopy() { return _jlong_arraycopy; } - static address oop_arraycopy() { return _oop_arraycopy; } + static address oop_arraycopy(bool dest_uninitialized = false) { + return dest_uninitialized ? _oop_arraycopy_uninit : _oop_arraycopy; + } static address jbyte_disjoint_arraycopy() { return _jbyte_disjoint_arraycopy; } static address jshort_disjoint_arraycopy() { return _jshort_disjoint_arraycopy; } static address jint_disjoint_arraycopy() { return _jint_disjoint_arraycopy; } static address jlong_disjoint_arraycopy() { return _jlong_disjoint_arraycopy; } - static address oop_disjoint_arraycopy() { return _oop_disjoint_arraycopy; } + static address oop_disjoint_arraycopy(bool dest_uninitialized = false) { + return dest_uninitialized ? _oop_disjoint_arraycopy_uninit : _oop_disjoint_arraycopy; + } static address arrayof_jbyte_arraycopy() { return _arrayof_jbyte_arraycopy; } static address arrayof_jshort_arraycopy() { return _arrayof_jshort_arraycopy; } static address arrayof_jint_arraycopy() { return _arrayof_jint_arraycopy; } static address arrayof_jlong_arraycopy() { return _arrayof_jlong_arraycopy; } - static address arrayof_oop_arraycopy() { return _arrayof_oop_arraycopy; } + static address arrayof_oop_arraycopy(bool dest_uninitialized = false) { + return dest_uninitialized ? _arrayof_oop_arraycopy_uninit : _arrayof_oop_arraycopy; + } static address arrayof_jbyte_disjoint_arraycopy() { return _arrayof_jbyte_disjoint_arraycopy; } static address arrayof_jshort_disjoint_arraycopy() { return _arrayof_jshort_disjoint_arraycopy; } static address arrayof_jint_disjoint_arraycopy() { return _arrayof_jint_disjoint_arraycopy; } static address arrayof_jlong_disjoint_arraycopy() { return _arrayof_jlong_disjoint_arraycopy; } - static address arrayof_oop_disjoint_arraycopy() { return _arrayof_oop_disjoint_arraycopy; } + static address arrayof_oop_disjoint_arraycopy(bool dest_uninitialized = false) { + return dest_uninitialized ? _arrayof_oop_disjoint_arraycopy_uninit : _arrayof_oop_disjoint_arraycopy; + } - static address checkcast_arraycopy() { return _checkcast_arraycopy; } + static address checkcast_arraycopy(bool dest_uninitialized = false) { + return dest_uninitialized ? _checkcast_arraycopy_uninit : _checkcast_arraycopy; + } static address unsafe_arraycopy() { return _unsafe_arraycopy; } static address generic_arraycopy() { return _generic_arraycopy; } @@ -352,17 +364,19 @@ // Default versions of the above arraycopy functions for platforms which do // not have specialized versions // - static void jbyte_copy (jbyte* src, jbyte* dest, size_t count); - static void jshort_copy(jshort* src, jshort* dest, size_t count); - static void jint_copy (jint* src, jint* dest, size_t count); - static void jlong_copy (jlong* src, jlong* dest, size_t count); - static void oop_copy (oop* src, oop* dest, size_t count); + static void jbyte_copy (jbyte* src, jbyte* dest, size_t count); + static void jshort_copy (jshort* src, jshort* dest, size_t count); + static void jint_copy (jint* src, jint* dest, size_t count); + static void jlong_copy (jlong* src, jlong* dest, size_t count); + static void oop_copy (oop* src, oop* dest, size_t count); + static void oop_copy_uninit(oop* src, oop* dest, size_t count); - static void arrayof_jbyte_copy (HeapWord* src, HeapWord* dest, size_t count); - static void arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count); - static void arrayof_jint_copy (HeapWord* src, HeapWord* dest, size_t count); - static void arrayof_jlong_copy (HeapWord* src, HeapWord* dest, size_t count); - static void arrayof_oop_copy (HeapWord* src, HeapWord* dest, size_t count); + static void arrayof_jbyte_copy (HeapWord* src, HeapWord* dest, size_t count); + static void arrayof_jshort_copy (HeapWord* src, HeapWord* dest, size_t count); + static void arrayof_jint_copy (HeapWord* src, HeapWord* dest, size_t count); + static void arrayof_jlong_copy (HeapWord* src, HeapWord* dest, size_t count); + static void arrayof_oop_copy (HeapWord* src, HeapWord* dest, size_t count); + static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count); }; #endif // SHARE_VM_RUNTIME_STUBROUTINES_HPP
--- a/src/share/vm/runtime/sweeper.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/sweeper.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -418,6 +418,11 @@ // state of the code cache if it's requested. void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { if (PrintMethodFlushing) { + stringStream s; + // Dump code cache state into a buffer before locking the tty, + // because log_state() will use locks causing lock conflicts. + CodeCache::log_state(&s); + ttyLocker ttyl; tty->print("### sweeper: %s ", msg); if (format != NULL) { @@ -426,12 +431,15 @@ tty->vprint(format, ap); va_end(ap); } - tty->print_cr(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" - " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", - CodeCache::nof_blobs(), CodeCache::nof_nmethods(), CodeCache::nof_adapters(), CodeCache::unallocated_capacity()); + tty->print_cr(s.as_string()); } if (LogCompilation && (xtty != NULL)) { + stringStream s; + // Dump code cache state into a buffer before locking the tty, + // because log_state() will use locks causing lock conflicts. + CodeCache::log_state(&s); + ttyLocker ttyl; xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count()); if (format != NULL) { @@ -440,9 +448,7 @@ xtty->vprint(format, ap); va_end(ap); } - xtty->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" - " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", - CodeCache::nof_blobs(), CodeCache::nof_nmethods(), CodeCache::nof_adapters(), CodeCache::unallocated_capacity()); + xtty->print(s.as_string()); xtty->stamp(); xtty->end_elem(); }
--- a/src/share/vm/runtime/synchronizer.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/synchronizer.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/thread.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/thread.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -3169,7 +3169,7 @@ fieldDescriptor fd; // Possible we might not find this field; if so, don't break if (ik->find_local_field(vmSymbols::frontCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) { - k()->bool_field_put(fd.offset(), true); + k()->java_mirror()->bool_field_put(fd.offset(), true); } } @@ -3185,7 +3185,7 @@ fieldDescriptor fd; // Possible we might not find this field: if so, silently don't break if (ik->find_local_field(vmSymbols::stringCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) { - k()->bool_field_put(fd.offset(), true); + k()->java_mirror()->bool_field_put(fd.offset(), true); } } } @@ -3232,7 +3232,7 @@ warning("java.lang.ArithmeticException has not been initialized"); warning("java.lang.StackOverflowError has not been initialized"); } - } + } // See : bugid 4211085. // Background : the static initializer of java.lang.Compiler tries to read @@ -3647,6 +3647,7 @@ if (ShowMessageBoxOnError && is_error_reported()) { os::infinite_sleep(); } + os::wait_for_keypress_at_exit(); if (JDK_Version::is_jdk12x_version()) { // We are the last thread running, so check if finalizers should be run.
--- a/src/share/vm/runtime/threadLocalStorage.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/threadLocalStorage.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vframe.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/vframe.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vmStructs.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/vmStructs.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -70,6 +70,7 @@ #include "oops/cpCacheKlass.hpp" #include "oops/cpCacheOop.hpp" #include "oops/instanceKlass.hpp" +#include "oops/instanceMirrorKlass.hpp" #include "oops/instanceKlassKlass.hpp" #include "oops/instanceOop.hpp" #include "oops/klass.hpp" @@ -269,7 +270,7 @@ nonstatic_field(instanceKlass, _inner_classes, typeArrayOop) \ nonstatic_field(instanceKlass, _nonstatic_field_size, int) \ nonstatic_field(instanceKlass, _static_field_size, int) \ - nonstatic_field(instanceKlass, _static_oop_field_size, int) \ + nonstatic_field(instanceKlass, _static_oop_field_count, int) \ nonstatic_field(instanceKlass, _nonstatic_oop_map_size, int) \ nonstatic_field(instanceKlass, _is_marked_dependent, bool) \ nonstatic_field(instanceKlass, _minor_version, u2) \ @@ -840,7 +841,7 @@ /* OSThread */ \ /************/ \ \ - nonstatic_field(OSThread, _interrupted, jint) \ + volatile_nonstatic_field(OSThread, _interrupted, jint) \ \ /************************/ \ /* OopMap and OopMapSet */ \ @@ -945,6 +946,15 @@ static_field(Arguments, _num_jvm_args, int) \ static_field(Arguments, _java_command, char*) \ \ + /*********************************/ \ + /* java_lang_Class fields */ \ + /*********************************/ \ + \ + static_field(java_lang_Class, klass_offset, int) \ + static_field(java_lang_Class, resolved_constructor_offset, int) \ + static_field(java_lang_Class, array_klass_offset, int) \ + static_field(java_lang_Class, oop_size_offset, int) \ + static_field(java_lang_Class, static_oop_field_count_offset, int) \ \ /************************/ \ /* Miscellaneous fields */ \ @@ -1092,6 +1102,7 @@ declare_type(instanceKlass, Klass) \ declare_type(instanceKlassKlass, klassKlass) \ declare_type(instanceOopDesc, oopDesc) \ + declare_type(instanceMirrorKlass, instanceKlass) \ declare_type(instanceRefKlass, instanceKlass) \ declare_type(klassKlass, Klass) \ declare_type(klassOopDesc, oopDesc) \ @@ -1414,6 +1425,7 @@ declare_toplevel_type(intptr_t*) \ declare_unsigned_integer_type(InvocationCounter) /* FIXME: wrong type (not integer) */ \ declare_toplevel_type(JavaThread*) \ + declare_toplevel_type(java_lang_Class) \ declare_toplevel_type(jbyte*) \ declare_toplevel_type(jbyte**) \ declare_toplevel_type(jint*) \ @@ -1543,12 +1555,6 @@ \ declare_constant(SymbolTable::symbol_table_size) \ \ - /***************/ \ - /* StringTable */ \ - /***************/ \ - \ - declare_constant(StringTable::string_table_size) \ - \ /********************/ \ /* SystemDictionary */ \ /********************/ \ @@ -1700,15 +1706,6 @@ \ declare_constant(ConstantPoolCacheEntry::tosBits) \ \ - /*********************************/ \ - /* java_lang_Class field offsets */ \ - /*********************************/ \ - \ - declare_constant(java_lang_Class::hc_klass_offset) \ - declare_constant(java_lang_Class::hc_array_klass_offset) \ - declare_constant(java_lang_Class::hc_resolved_constructor_offset) \ - declare_constant(java_lang_Class::hc_number_of_fake_oop_fields) \ - \ /***************************************/ \ /* java_lang_Thread::ThreadStatus enum */ \ /***************************************/ \
--- a/src/share/vm/runtime/vmStructs.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/vmStructs.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vm_operations.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/vm_operations.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vm_operations.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/vm_operations.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vm_version.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/vm_version.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vm_version.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/runtime/vm_version.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/attachListener.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/services/attachListener.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/attachListener.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/services/attachListener.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/classLoadingService.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/services/classLoadingService.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/heapDumper.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/services/heapDumper.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -832,7 +832,7 @@ // value int offset = fld.offset(); - address addr = (address)k + offset; + address addr = (address)ikh->java_mirror() + offset; dump_field_value(writer, sig->byte_at(0), addr); }
--- a/src/share/vm/services/management.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/services/management.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/memoryManager.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/services/memoryManager.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/memoryPool.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/services/memoryPool.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/memoryService.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/services/memoryService.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/shark/llvmHeaders.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/shark/llvmHeaders.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright 2008, 2009, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -46,7 +46,11 @@ #include <llvm/ModuleProvider.h> #endif #include <llvm/Support/IRBuilder.h> +#if SHARK_LLVM_VERSION >= 29 +#include <llvm/Support/Threading.h> +#else #include <llvm/System/Threading.h> +#endif #include <llvm/Target/TargetSelect.h> #include <llvm/Type.h> #include <llvm/ExecutionEngine/JITMemoryManager.h> @@ -55,8 +59,12 @@ #include <llvm/ExecutionEngine/JIT.h> #include <llvm/ADT/StringMap.h> #include <llvm/Support/Debug.h> +#if SHARK_LLVM_VERSION >= 29 +#include <llvm/Support/Host.h> +#else #include <llvm/System/Host.h> #endif +#endif #include <map>
--- a/src/share/vm/shark/sharkCompiler.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/shark/sharkCompiler.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,6 +1,6 @@ /* * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright 2008, 2009, 2010 Red Hat, Inc. + * Copyright 2008, 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -218,6 +218,7 @@ nmethod* SharkCompiler::generate_native_wrapper(MacroAssembler* masm, methodHandle target, + int compile_id, BasicType* arg_types, BasicType return_type) { assert(is_initialized(), "should be"); @@ -241,6 +242,7 @@ // Return the nmethod for installation in the VM return nmethod::new_native_nmethod(target, + compile_id, masm->code(), 0, 0,
--- a/src/share/vm/shark/sharkCompiler.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/shark/sharkCompiler.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,6 +1,6 @@ /* * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright 2008, 2009 Red Hat, Inc. + * Copyright 2008, 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,6 +60,7 @@ // Generate a wrapper for a native (JNI) method nmethod* generate_native_wrapper(MacroAssembler* masm, methodHandle target, + int compile_id, BasicType* arg_types, BasicType return_type); @@ -113,7 +114,8 @@ // Global access public: static SharkCompiler* compiler() { - AbstractCompiler *compiler = CompileBroker::compiler(CompLevel_simple); + AbstractCompiler *compiler = + CompileBroker::compiler(CompLevel_full_optimization); assert(compiler->is_shark() && compiler->is_initialized(), "should be"); return (SharkCompiler *) compiler; }
--- a/src/share/vm/shark/sharkNativeWrapper.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/shark/sharkNativeWrapper.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright 2009, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -101,7 +101,7 @@ builder()->CreateStore( builder()->CreateInlineOop( JNIHandles::make_local( - target()->method_holder()->klass_part()->java_mirror())), + target()->method_holder()->java_mirror())), oop_tmp_slot()); param_types.push_back(box_type);
--- a/src/share/vm/utilities/constantTag.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/constantTag.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -93,8 +93,6 @@ return "MethodType"; case JVM_CONSTANT_InvokeDynamic : return "InvokeDynamic"; - case JVM_CONSTANT_InvokeDynamicTrans : - return "InvokeDynamic/transitional"; case JVM_CONSTANT_Object : return "Object"; case JVM_CONSTANT_Utf8 :
--- a/src/share/vm/utilities/constantTag.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/constantTag.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -86,8 +86,7 @@ bool is_method_type() const { return _tag == JVM_CONSTANT_MethodType; } bool is_method_handle() const { return _tag == JVM_CONSTANT_MethodHandle; } - bool is_invoke_dynamic() const { return (_tag == JVM_CONSTANT_InvokeDynamic || - _tag == JVM_CONSTANT_InvokeDynamicTrans); } + bool is_invoke_dynamic() const { return _tag == JVM_CONSTANT_InvokeDynamic; } bool is_loadable_constant() const { return ((_tag >= JVM_CONSTANT_Integer && _tag <= JVM_CONSTANT_String) ||
--- a/src/share/vm/utilities/copy.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/copy.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/debug.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/debug.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/debug.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/debug.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #ifndef SHARE_VM_UTILITIES_DEBUG_HPP #define SHARE_VM_UTILITIES_DEBUG_HPP +#include "prims/jvm.h" #include "utilities/globalDefinitions.hpp" #include <stdarg.h> @@ -48,7 +49,7 @@ FormatBuffer<bufsz>::FormatBuffer(const char * format, ...) { va_list argp; va_start(argp, format); - vsnprintf(_buf, bufsz, format, argp); + jio_vsnprintf(_buf, bufsz, format, argp); va_end(argp); } @@ -61,7 +62,7 @@ va_list argp; va_start(argp, format); - vsnprintf(buf_end, bufsz - len, format, argp); + jio_vsnprintf(buf_end, bufsz - len, format, argp); va_end(argp); }
--- a/src/share/vm/utilities/elfSymbolTable.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/elfSymbolTable.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/exceptions.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/exceptions.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/exceptions.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/exceptions.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/globalDefinitions.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/globalDefinitions.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -746,9 +746,9 @@ CompLevel_simple = 1, // C1 CompLevel_limited_profile = 2, // C1, invocation & backedge counters CompLevel_full_profile = 3, // C1, invocation & backedge counters + mdo - CompLevel_full_optimization = 4, // C2 + CompLevel_full_optimization = 4, // C2 or Shark -#if defined(COMPILER2) +#if defined(COMPILER2) || defined(SHARK) CompLevel_highest_tier = CompLevel_full_optimization, // pure C2 and tiered #elif defined(COMPILER1) CompLevel_highest_tier = CompLevel_simple, // pure C1 @@ -760,7 +760,7 @@ CompLevel_initial_compile = CompLevel_full_profile // tiered #elif defined(COMPILER1) CompLevel_initial_compile = CompLevel_simple // pure C1 -#elif defined(COMPILER2) +#elif defined(COMPILER2) || defined(SHARK) CompLevel_initial_compile = CompLevel_full_optimization // pure C2 #else CompLevel_initial_compile = CompLevel_none @@ -1185,7 +1185,7 @@ // '%d' formats to indicate a 64-bit quantity; commonly "l" (in LP64) or "ll" // (in ILP32). -#define BOOL_TO_STR(__b) (__b) ? "true" : "false" +#define BOOL_TO_STR(_b_) ((_b_) ? "true" : "false") // Format 32-bit quantities. #define INT32_FORMAT "%d"
--- a/src/share/vm/utilities/globalDefinitions_gcc.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/globalDefinitions_gcc.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,6 +77,9 @@ # endif #ifdef LINUX +#ifndef __STDC_LIMIT_MACROS +#define __STDC_LIMIT_MACROS +#endif // __STDC_LIMIT_MACROS #include <inttypes.h> #include <signal.h> #include <ucontext.h>
--- a/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -148,6 +148,17 @@ #endif #endif +// On solaris 8, UINTPTR_MAX is defined as empty. +// Everywhere else it's an actual value. +#if UINTPTR_MAX - 1 == -1 +#undef UINTPTR_MAX +#ifdef _LP64 +#define UINTPTR_MAX UINT64_MAX +#else +#define UINTPTR_MAX UINT32_MAX +#endif /* ifdef _LP64 */ +#endif + // Additional Java basic types typedef unsigned char jubyte;
--- a/src/share/vm/utilities/globalDefinitions_visCPP.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/globalDefinitions_visCPP.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,6 +41,7 @@ # include <stdio.h> // for va_list # include <time.h> # include <fcntl.h> +# include <limits.h> // Need this on windows to get the math constants (e.g., M_PI). #define _USE_MATH_DEFINES # include <math.h> @@ -99,6 +100,14 @@ typedef signed int ssize_t; #endif +#ifndef UINTPTR_MAX +#ifdef _WIN64 +#define UINTPTR_MAX _UI64_MAX +#else +#define UINTPTR_MAX _UI32_MAX +#endif +#endif + //---------------------------------------------------------------------------------------------------- // Additional Java basic types
--- a/src/share/vm/utilities/hashtable.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/hashtable.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/hashtable.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/hashtable.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/hashtable.inline.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/hashtable.inline.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/macros.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/macros.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -161,6 +161,14 @@ #define NOT_WINDOWS(code) code #endif +#ifdef _WIN64 +#define WIN64_ONLY(code) code +#define NOT_WIN64(code) +#else +#define WIN64_ONLY(code) +#define NOT_WIN64(code) code +#endif + #if defined(IA32) || defined(AMD64) #define X86 #define X86_ONLY(code) code
--- a/src/share/vm/utilities/ostream.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/ostream.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -424,6 +424,15 @@ const char* star = strchr(basename, '*'); int star_pos = (star == NULL) ? -1 : (star - nametail); + int skip = 1; + if (star == NULL) { + // Try %p + star = strstr(basename, "%p"); + if (star != NULL) { + skip = 2; + } + } + star_pos = (star == NULL) ? -1 : (star - nametail); char pid[32]; if (star_pos >= 0) { @@ -442,11 +451,11 @@ } if (star_pos >= 0) { - // convert foo*bar.log to foo123bar.log + // convert foo*bar.log or foo%pbar.log to foo123bar.log int buf_pos = (int) strlen(buf); strncpy(&buf[buf_pos], nametail, star_pos); strcpy(&buf[buf_pos + star_pos], pid); - nametail += star_pos + 1; // skip prefix and star + nametail += star_pos + skip; // skip prefix and pid format } strcat(buf, nametail); // append rest of name, or all of name @@ -466,7 +475,7 @@ // Note: This feature is for maintainer use only. No need for L10N. jio_print(warnbuf); FREE_C_HEAP_ARRAY(char, try_name); - try_name = make_log_name("hs_pid*.log", os::get_temp_directory()); + try_name = make_log_name("hs_pid%p.log", os::get_temp_directory()); jio_snprintf(warnbuf, sizeof(warnbuf), "Warning: Forcing option -XX:LogFile=%s\n", try_name); jio_print(warnbuf); @@ -801,6 +810,8 @@ _buffer = buffer; _buflen = buflen; _outer_stream = outer_stream; + // compile task prints time stamp relative to VM start + _stamp.update_to(1); } void staticBufferStream::write(const char* c, size_t len) {
--- a/src/share/vm/utilities/ostream.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/ostream.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/taskqueue.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/taskqueue.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/utf8.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/utf8.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/utf8.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/utf8.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/vmError.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/vmError.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -802,7 +802,7 @@ first_error_tid = mytid; set_error_reported(); - if (ShowMessageBoxOnError) { + if (ShowMessageBoxOnError || PauseAtExit) { show_message_box(buffer, sizeof(buffer)); // User has asked JVM to abort. Reset ShowMessageBoxOnError so the
--- a/src/share/vm/utilities/workgroup.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/workgroup.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -156,7 +156,7 @@ tty->print_cr("/nFinished work gang %s: %d/%d sequence %d", name(), finished_workers(), total_workers(), _sequence_number); - } + } } void AbstractWorkGang::stop() {
--- a/src/share/vm/utilities/workgroup.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/workgroup.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,20 @@ # include "thread_windows.inline.hpp" #endif +// Task class hierarchy: +// AbstractGangTask +// AbstractGangTaskWOopQueues +// +// Gang/Group class hierarchy: +// AbstractWorkGang +// WorkGang +// FlexibleWorkGang +// YieldingFlexibleWorkGang (defined in another file) +// +// Worker class hierarchy: +// GangWorker (subclass of WorkerThread) +// YieldingFlexibleGangWorker (defined in another file) + // Forward declarations of classes defined here class WorkGang;
--- a/src/share/vm/utilities/xmlstream.cpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/xmlstream.cpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/xmlstream.hpp Fri Apr 22 15:22:45 2011 +0200 +++ b/src/share/vm/utilities/xmlstream.hpp Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/6795161/Test.java Fri Apr 22 15:22:45 2011 +0200 +++ b/test/compiler/6795161/Test.java Fri Apr 22 15:30:53 2011 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ * @test * @bug 6795161 * @summary Escape analysis leads to data corruption - * @run main/othervm -server -Xcomp -XX:CompileOnly=Test -XX:+DoEscapeAnalysis Test + * @run main/othervm -server -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:CompileOnly=Test -XX:+DoEscapeAnalysis Test */ class Test_Class_1 {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/6942326/Test.java Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6942326 + * @summary x86 code in string_indexof() could read beyond reserved heap space + * + * @run main/othervm/timeout=300 -Xmx32m -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CompileCommand=exclude,Test,main -XX:CompileCommand=exclude,Test,test_varsub_indexof -XX:CompileCommand=exclude,Test,test_varstr_indexof -XX:CompileCommand=exclude,Test,test_missub_indexof -XX:CompileCommand=exclude,Test,test_consub_indexof -XX:CompileCommand=exclude,Test,test_conmis_indexof -XX:CompileCommand=exclude,Test,test_subcon Test + * + */ + +public class Test { + + static String[] strings = new String[1024]; + private static final int ITERATIONS = 100000; + + public static void main(String[] args) { + + long start_total = System.currentTimeMillis(); + + // search variable size substring in string (33 chars). + String a = " 1111111111111xx1111111111111xx11y"; // +1 to execute a.substring(1) first + String b = "1111111111111xx1111111111111xx11y"; + test_varsub_indexof(a, b); + + // search variable size substring in string (32 chars). + a = " 1111111111111xx1111111111111xx1y"; + b = "1111111111111xx1111111111111xx1y"; + test_varsub_indexof(a, b); + + // search variable size substring in string (17 chars). + a = " 1111111111111xx1y"; + b = "1111111111111xx1y"; + test_varsub_indexof(a, b); + + // search variable size substring in string (16 chars). + a = " 111111111111xx1y"; + b = "111111111111xx1y"; + test_varsub_indexof(a, b); + + // search variable size substring in string (8 chars). + a = " 1111xx1y"; + b = "1111xx1y"; + test_varsub_indexof(a, b); + + // search variable size substring in string (7 chars). + a = " 111xx1y"; + b = "111xx1y"; + test_varsub_indexof(a, b); + + + + // search substring (17 chars) in variable size string. + a = "1111111111111xx1x"; + b = " 1111111111111xx1111111111111xx1x"; // +1 to execute b.substring(1) first + test_varstr_indexof(a, b); + + // search substring (16 chars) in variable size string. + a = "111111111111xx1x"; + b = " 1111111111111xx1111111111111xx1x"; + test_varstr_indexof(a, b); + + // search substring (9 chars) in variable size string. + a = "11111xx1x"; + b = " 1111111111111xx1111111111111xx1x"; + test_varstr_indexof(a, b); + + // search substring (8 chars) in variable size string. + a = "1111xx1x"; + b = " 1111111111111xx1111111111111xx1x"; + test_varstr_indexof(a, b); + + // search substring (4 chars) in variable size string. + a = "xx1x"; + b = " 1111111111111xx1111111111111xx1x"; + test_varstr_indexof(a, b); + + // search substring (3 chars) in variable size string. + a = "x1x"; + b = " 1111111111111xx1111111111111xx1x"; + test_varstr_indexof(a, b); + + // search substring (2 chars) in variable size string. + a = "1y"; + b = " 1111111111111xx1111111111111xx1y"; + test_varstr_indexof(a, b); + + + + // search non matching variable size substring in string (33 chars). + a = " 1111111111111xx1111111111111xx11z"; // +1 to execute a.substring(1) first + b = "1111111111111xx1111111111111xx11y"; + test_missub_indexof(a, b); + + // search non matching variable size substring in string (32 chars). + a = " 1111111111111xx1111111111111xx1z"; + b = "1111111111111xx1111111111111xx1y"; + test_missub_indexof(a, b); + + // search non matching variable size substring in string (17 chars). + a = " 1111111111111xx1z"; + b = "1111111111111xx1y"; + test_missub_indexof(a, b); + + // search non matching variable size substring in string (16 chars). + a = " 111111111111xx1z"; + b = "111111111111xx1y"; + test_missub_indexof(a, b); + + // search non matching variable size substring in string (8 chars). + a = " 1111xx1z"; + b = "1111xx1y"; + test_missub_indexof(a, b); + + // search non matching variable size substring in string (7 chars). + a = " 111xx1z"; + b = "111xx1y"; + test_missub_indexof(a, b); + + + + // Testing constant substring search in variable size string. + + // search constant substring (17 chars). + b = " 1111111111111xx1111111111111xx1x"; // +1 to execute b.substring(1) first + TestCon tc = new TestCon17(); + test_consub_indexof(tc, b); + + // search constant substring (16 chars). + b = " 1111111111111xx1111111111111xx1x"; + tc = new TestCon16(); + test_consub_indexof(tc, b); + + // search constant substring (9 chars). + b = " 1111111111111xx1111111111111xx1x"; + tc = new TestCon9(); + test_consub_indexof(tc, b); + + // search constant substring (8 chars). + b = " 1111111111111xx1111111111111xx1x"; + tc = new TestCon8(); + test_consub_indexof(tc, b); + + // search constant substring (4 chars). + b = " 1111111111111xx1111111111111xx1x"; + tc = new TestCon4(); + test_consub_indexof(tc, b); + + // search constant substring (3 chars). + b = " 1111111111111xx1111111111111xx1x"; + tc = new TestCon3(); + test_consub_indexof(tc, b); + + // search constant substring (2 chars). + b = " 1111111111111xx1111111111111xx1y"; + tc = new TestCon2(); + test_consub_indexof(tc, b); + + // search constant substring (1 chars). + b = " 1111111111111xx1111111111111xx1y"; + tc = new TestCon1(); + test_consub_indexof(tc, b); + + + // search non matching constant substring (17 chars). + b = " 1111111111111xx1111111111111xx1z"; // +1 to execute b.substring(1) first + tc = new TestCon17(); + test_conmis_indexof(tc, b); + + // search non matching constant substring (16 chars). + b = " 1111111111111xx1111111111111xx1z"; + tc = new TestCon16(); + test_conmis_indexof(tc, b); + + // search non matching constant substring (9 chars). + b = " 1111111111111xx1111111111111xx1z"; + tc = new TestCon9(); + test_conmis_indexof(tc, b); + + // search non matching constant substring (8 chars). + b = " 1111111111111xx1111111111111xx1z"; + tc = new TestCon8(); + test_conmis_indexof(tc, b); + + // search non matching constant substring (4 chars). + b = " 1111111111111xx1111111111111xx1z"; + tc = new TestCon4(); + test_conmis_indexof(tc, b); + + // search non matching constant substring (3 chars). + b = " 1111111111111xx1111111111111xx1z"; + tc = new TestCon3(); + test_conmis_indexof(tc, b); + + // search non matching constant substring (2 chars). + b = " 1111111111111xx1111111111111xx1z"; + tc = new TestCon2(); + test_conmis_indexof(tc, b); + + // search non matching constant substring (1 chars). + b = " 1111111111111xx1111111111111xx1z"; + tc = new TestCon1(); + test_conmis_indexof(tc, b); + + long end_total = System.currentTimeMillis(); + System.out.println("End run time: " + (end_total - start_total)); + + } + + public static long test_init(String a, String b) { + for (int i = 0; i < 512; i++) { + strings[i * 2] = new String(b.toCharArray()); + strings[i * 2 + 1] = new String(a.toCharArray()); + } + System.out.print(a.length() + " " + b.length() + " "); + return System.currentTimeMillis(); + } + + public static void test_end(String a, String b, int v, int expected, long start) { + long end = System.currentTimeMillis(); + int res = (v/ITERATIONS); + System.out.print(" " + res); + System.out.println(" time:" + (end - start)); + if (res != expected) { + System.out.println("wrong indexOf result: " + res + ", expected " + expected); + System.out.println("\"" + b + "\".indexOf(\"" + a + "\")"); + System.exit(97); + } + } + + public static int test_subvar() { + int s = 0; + int v = 0; + for (int i = 0; i < ITERATIONS; i++) { + v += strings[s].indexOf(strings[s + 1]); + s += 2; + if (s >= strings.length) s = 0; + } + return v; + } + + public static void test_varsub_indexof(String a, String b) { + System.out.println("Start search variable size substring in string (" + b.length() + " chars)"); + long start_it = System.currentTimeMillis(); + int limit = 1; // last a.length() == 1 + while (a.length() > limit) { + a = a.substring(1); + long start = test_init(a, b); + int v = test_subvar(); + test_end(a, b, v, (b.length() - a.length()), start); + } + long end_it = System.currentTimeMillis(); + System.out.println("End search variable size substring in string (" + b.length() + " chars), time: " + (end_it - start_it)); + } + + public static void test_varstr_indexof(String a, String b) { + System.out.println("Start search substring (" + a.length() + " chars) in variable size string"); + long start_it = System.currentTimeMillis(); + int limit = a.length(); + while (b.length() > limit) { + b = b.substring(1); + long start = test_init(a, b); + int v = test_subvar(); + test_end(a, b, v, (b.length() - a.length()), start); + } + long end_it = System.currentTimeMillis(); + System.out.println("End search substring (" + a.length() + " chars) in variable size string, time: " + (end_it - start_it)); + } + + public static void test_missub_indexof(String a, String b) { + System.out.println("Start search non matching variable size substring in string (" + b.length() + " chars)"); + long start_it = System.currentTimeMillis(); + int limit = 1; // last a.length() == 1 + while (a.length() > limit) { + a = a.substring(1); + long start = test_init(a, b); + int v = test_subvar(); + test_end(a, b, v, (-1), start); + } + long end_it = System.currentTimeMillis(); + System.out.println("End search non matching variable size substring in string (" + b.length() + " chars), time: " + (end_it - start_it)); + } + + + + public static void test_consub_indexof(TestCon tc, String b) { + System.out.println("Start search constant substring (" + tc.constr().length() + " chars)"); + long start_it = System.currentTimeMillis(); + int limit = tc.constr().length(); + while (b.length() > limit) { + b = b.substring(1); + long start = test_init(tc.constr(), b); + int v = test_subcon(tc); + test_end(tc.constr(), b, v, (b.length() - tc.constr().length()), start); + } + long end_it = System.currentTimeMillis(); + System.out.println("End search constant substring (" + tc.constr().length() + " chars), time: " + (end_it - start_it)); + } + + public static void test_conmis_indexof(TestCon tc, String b) { + System.out.println("Start search non matching constant substring (" + tc.constr().length() + " chars)"); + long start_it = System.currentTimeMillis(); + int limit = tc.constr().length(); + while (b.length() > limit) { + b = b.substring(1); + long start = test_init(tc.constr(), b); + int v = test_subcon(tc); + test_end(tc.constr(), b, v, (-1), start); + } + long end_it = System.currentTimeMillis(); + System.out.println("End search non matching constant substring (" + tc.constr().length() + " chars), time: " + (end_it - start_it)); + } + + public static int test_subcon(TestCon tc) { + int s = 0; + int v = 0; + for (int i = 0; i < ITERATIONS; i++) { + v += tc.indexOf(strings[s]); + s += 2; + if (s >= strings.length) s = 0; + } + return v; + } + + private interface TestCon { + public String constr(); + public int indexOf(String str); + } + + // search constant substring (17 chars). + private final static class TestCon17 implements TestCon { + private static final String constr = "1111111111111xx1x"; + public String constr() { return constr; } + public int indexOf(String str) { return str.indexOf(constr); } + } + + // search constant substring (16 chars). + private final static class TestCon16 implements TestCon { + private static final String constr = "111111111111xx1x"; + public String constr() { return constr; } + public int indexOf(String str) { return str.indexOf(constr); } + } + + // search constant substring (9 chars). + private final static class TestCon9 implements TestCon { + private static final String constr = "11111xx1x"; + public String constr() { return constr; } + public int indexOf(String str) { return str.indexOf(constr); } + } + + // search constant substring (8 chars). + private final static class TestCon8 implements TestCon { + private static final String constr = "1111xx1x"; + public String constr() { return constr; } + public int indexOf(String str) { return str.indexOf(constr); } + } + + // search constant substring (4 chars). + private final static class TestCon4 implements TestCon { + private static final String constr = "xx1x"; + public String constr() { return constr; } + public int indexOf(String str) { return str.indexOf(constr); } + } + + // search constant substring (3 chars). + private final static class TestCon3 implements TestCon { + private static final String constr = "x1x"; + public String constr() { return constr; } + public int indexOf(String str) { return str.indexOf(constr); } + } + + // search constant substring (2 chars). + private final static class TestCon2 implements TestCon { + private static final String constr = "1y"; + public String constr() { return constr; } + public int indexOf(String str) { return str.indexOf(constr); } + } + + + // search constant substring (1 chars). + private final static class TestCon1 implements TestCon { + private static final String constr = "y"; + public String constr() { return constr; } + public int indexOf(String str) { return str.indexOf(constr); } + } +} +
--- a/test/compiler/6987555/Test6987555.java Fri Apr 22 15:22:45 2011 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,177 +0,0 @@ -/* - * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/** - * @test - * @bug 6987555 - * @summary JSR 292 unboxing to a boolean value fails on big-endian SPARC - * - * @run main/othervm -Xint -ea -XX:+UnlockExperimentalVMOptions -XX:+EnableMethodHandles -XX:+EnableInvokeDynamic -XX:+UnlockDiagnosticVMOptions -XX:+VerifyMethodHandles Test6987555 - */ - -import java.dyn.*; - -public class Test6987555 { - private static final Class CLASS = Test6987555.class; - private static final String NAME = "foo"; - private static final boolean DEBUG = false; - - public static void main(String[] args) throws Throwable { - testboolean(); - testbyte(); - testchar(); - testshort(); - testint(); - } - - // boolean - static void testboolean() throws Throwable { - doboolean(false); - doboolean(true); - } - static void doboolean(boolean x) throws Throwable { - if (DEBUG) System.out.println("boolean=" + x); - MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(boolean.class, boolean.class)); - MethodHandle mh2 = mh1.asType(MethodType.methodType(boolean.class, Boolean.class)); - boolean a = (boolean) mh1.invokeExact(x); - boolean b = (boolean) mh2.invokeExact(Boolean.valueOf(x)); - assert a == b : a + " != " + b; - } - - // byte - static void testbyte() throws Throwable { - byte[] a = new byte[] { - Byte.MIN_VALUE, - Byte.MIN_VALUE + 1, - -0x0F, - -1, - 0, - 1, - 0x0F, - Byte.MAX_VALUE - 1, - Byte.MAX_VALUE - }; - for (int i = 0; i < a.length; i++) { - dobyte(a[i]); - } - } - static void dobyte(byte x) throws Throwable { - if (DEBUG) System.out.println("byte=" + x); - MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(byte.class, byte.class)); - MethodHandle mh2 = mh1.asType(MethodType.methodType(byte.class, Byte.class)); - byte a = (byte) mh1.invokeExact(x); - byte b = (byte) mh2.invokeExact(Byte.valueOf(x)); - assert a == b : a + " != " + b; - } - - // char - static void testchar() throws Throwable { - char[] a = new char[] { - Character.MIN_VALUE, - Character.MIN_VALUE + 1, - 0x000F, - 0x00FF, - 0x0FFF, - Character.MAX_VALUE - 1, - Character.MAX_VALUE - }; - for (int i = 0; i < a.length; i++) { - dochar(a[i]); - } - } - static void dochar(char x) throws Throwable { - if (DEBUG) System.out.println("char=" + x); - MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(char.class, char.class)); - MethodHandle mh2 = mh1.asType(MethodType.methodType(char.class, Character.class)); - char a = (char) mh1.invokeExact(x); - char b = (char) mh2.invokeExact(Character.valueOf(x)); - assert a == b : a + " != " + b; - } - - // short - static void testshort() throws Throwable { - short[] a = new short[] { - Short.MIN_VALUE, - Short.MIN_VALUE + 1, - -0x0FFF, - -0x00FF, - -0x000F, - -1, - 0, - 1, - 0x000F, - 0x00FF, - 0x0FFF, - Short.MAX_VALUE - 1, - Short.MAX_VALUE - }; - for (int i = 0; i < a.length; i++) { - doshort(a[i]); - } - } - static void doshort(short x) throws Throwable { - if (DEBUG) System.out.println("short=" + x); - MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(short.class, short.class)); - MethodHandle mh2 = mh1.asType(MethodType.methodType(short.class, Short.class)); - short a = (short) mh1.invokeExact(x); - short b = (short) mh2.invokeExact(Short.valueOf(x)); - assert a == b : a + " != " + b; - } - - // int - static void testint() throws Throwable { - int[] a = new int[] { - Integer.MIN_VALUE, - Integer.MIN_VALUE + 1, - -0x00000FFF, - -0x000000FF, - -0x0000000F, - -1, - 0, - 1, - 0x0000000F, - 0x000000FF, - 0x00000FFF, - Integer.MAX_VALUE - 1, - Integer.MAX_VALUE - }; - for (int i = 0; i < a.length; i++) { - doint(a[i]); - } - } - static void doint(int x) throws Throwable { - if (DEBUG) System.out.println("int=" + x); - MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(int.class, int.class)); - MethodHandle mh2 = mh1.asType(MethodType.methodType(int.class, Integer.class)); - int a = (int) mh1.invokeExact(x); - int b = (int) mh2.invokeExact(Integer.valueOf(x)); - assert a == b : a + " != " + b; - } - - public static boolean foo(boolean i) { return i; } - public static byte foo(byte i) { return i; } - public static char foo(char i) { return i; } - public static short foo(short i) { return i; } - public static int foo(int i) { return i; } -}
--- a/test/compiler/6991596/Test6991596.java Fri Apr 22 15:22:45 2011 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,465 +0,0 @@ -/* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/** - * @test - * @bug 6991596 - * @summary JSR 292 unimplemented adapter_opt_i2i and adapter_opt_l2i on SPARC - * - * @run main/othervm -ea -XX:+UnlockExperimentalVMOptions -XX:+EnableMethodHandles -XX:+EnableInvokeDynamic -XX:+UnlockDiagnosticVMOptions -XX:+VerifyMethodHandles Test6991596 - */ - -import java.dyn.*; - -public class Test6991596 { - private static final Class CLASS = Test6991596.class; - private static final String NAME = "foo"; - private static final boolean DEBUG = System.getProperty("DEBUG", "false").equals("true"); - - public static void main(String[] args) throws Throwable { - testboolean(); - testbyte(); - testchar(); - testshort(); - testint(); - testlong(); - } - - // Helpers to get various methods. - static MethodHandle getmh1(Class ret, Class arg) throws NoAccessException { - return MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(ret, arg)); - } - static MethodHandle getmh2(MethodHandle mh1, Class ret, Class arg) { - return MethodHandles.convertArguments(mh1, MethodType.methodType(ret, arg)); - } - static MethodHandle getmh3(MethodHandle mh1, Class ret, Class arg) { - return MethodHandles.convertArguments(mh1, MethodType.methodType(ret, arg)); - } - - // test adapter_opt_i2i - static void testboolean() throws Throwable { - boolean[] a = new boolean[] { - true, - false - }; - for (int i = 0; i < a.length; i++) { - doboolean(a[i]); - } - } - static void doboolean(boolean x) throws Throwable { - if (DEBUG) System.out.println("boolean=" + x); - - // boolean - { - MethodHandle mh1 = getmh1( boolean.class, boolean.class); - MethodHandle mh2 = getmh2(mh1, boolean.class, boolean.class); - // TODO add this for all cases when the bugs are fixed. - //MethodHandle mh3 = getmh3(mh1, boolean.class, boolean.class); - boolean a = (boolean) mh1.invokeExact((boolean) x); - boolean b = (boolean) mh2.invokeExact(x); - //boolean c = mh3.<boolean>invokeExact((boolean) x); - check(x, a, b); - //check(x, c, x); - } - - // byte - { - MethodHandle mh1 = getmh1( byte.class, byte.class ); - MethodHandle mh2 = getmh2(mh1, byte.class, boolean.class); - byte a = (byte) mh1.invokeExact((byte) (x ? 1 : 0)); - byte b = (byte) mh2.invokeExact(x); - check(x, a, b); - } - - // char - { - MethodHandle mh1 = getmh1( char.class, char.class); - MethodHandle mh2 = getmh2(mh1, char.class, boolean.class); - char a = (char) mh1.invokeExact((char) (x ? 1 : 0)); - char b = (char) mh2.invokeExact(x); - check(x, a, b); - } - - // short - { - MethodHandle mh1 = getmh1( short.class, short.class); - MethodHandle mh2 = getmh2(mh1, short.class, boolean.class); - short a = (short) mh1.invokeExact((short) (x ? 1 : 0)); - short b = (short) mh2.invokeExact(x); - check(x, a, b); - } - } - - static void testbyte() throws Throwable { - byte[] a = new byte[] { - Byte.MIN_VALUE, - Byte.MIN_VALUE + 1, - -0x0F, - -1, - 0, - 1, - 0x0F, - Byte.MAX_VALUE - 1, - Byte.MAX_VALUE - }; - for (int i = 0; i < a.length; i++) { - dobyte(a[i]); - } - } - static void dobyte(byte x) throws Throwable { - if (DEBUG) System.out.println("byte=" + x); - - // boolean - { - MethodHandle mh1 = getmh1( boolean.class, boolean.class); - MethodHandle mh2 = getmh2(mh1, boolean.class, byte.class); - boolean a = (boolean) mh1.invokeExact((x & 1) == 1); - boolean b = (boolean) mh2.invokeExact(x); - check(x, a, b); - } - - // byte - { - MethodHandle mh1 = getmh1( byte.class, byte.class); - MethodHandle mh2 = getmh2(mh1, byte.class, byte.class); - byte a = (byte) mh1.invokeExact((byte) x); - byte b = (byte) mh2.invokeExact(x); - check(x, a, b); - } - - // char - { - MethodHandle mh1 = getmh1( char.class, char.class); - MethodHandle mh2 = getmh2(mh1, char.class, byte.class); - char a = (char) mh1.invokeExact((char) x); - char b = (char) mh2.invokeExact(x); - check(x, a, b); - } - - // short - { - MethodHandle mh1 = getmh1( short.class, short.class); - MethodHandle mh2 = getmh2(mh1, short.class, byte.class); - short a = (short) mh1.invokeExact((short) x); - short b = (short) mh2.invokeExact(x); - check(x, a, b); - } - } - - static void testchar() throws Throwable { - char[] a = new char[] { - Character.MIN_VALUE, - Character.MIN_VALUE + 1, - 0x000F, - 0x00FF, - 0x0FFF, - Character.MAX_VALUE - 1, - Character.MAX_VALUE - }; - for (int i = 0; i < a.length; i++) { - dochar(a[i]); - } - } - static void dochar(char x) throws Throwable { - if (DEBUG) System.out.println("char=" + x); - - // boolean - { - MethodHandle mh1 = getmh1( boolean.class, boolean.class); - MethodHandle mh2 = getmh2(mh1, boolean.class, char.class); - boolean a = (boolean) mh1.invokeExact((x & 1) == 1); - boolean b = (boolean) mh2.invokeExact(x); - check(x, a, b); - } - - // byte - { - MethodHandle mh1 = getmh1( byte.class, byte.class); - MethodHandle mh2 = getmh2(mh1, byte.class, char.class); - byte a = (byte) mh1.invokeExact((byte) x); - byte b = (byte) mh2.invokeExact(x); - check(x, a, b); - } - - // char - { - MethodHandle mh1 = getmh1( char.class, char.class); - MethodHandle mh2 = getmh2(mh1, char.class, char.class); - char a = (char) mh1.invokeExact((char) x); - char b = (char) mh2.invokeExact(x); - check(x, a, b); - } - - // short - { - MethodHandle mh1 = getmh1( short.class, short.class); - MethodHandle mh2 = getmh2(mh1, short.class, char.class); - short a = (short) mh1.invokeExact((short) x); - short b = (short) mh2.invokeExact(x); - check(x, a, b); - } - } - - static void testshort() throws Throwable { - short[] a = new short[] { - Short.MIN_VALUE, - Short.MIN_VALUE + 1, - -0x0FFF, - -0x00FF, - -0x000F, - -1, - 0, - 1, - 0x000F, - 0x00FF, - 0x0FFF, - Short.MAX_VALUE - 1, - Short.MAX_VALUE - }; - for (int i = 0; i < a.length; i++) { - doshort(a[i]); - } - } - static void doshort(short x) throws Throwable { - if (DEBUG) System.out.println("short=" + x); - - // boolean - { - MethodHandle mh1 = getmh1( boolean.class, boolean.class); - MethodHandle mh2 = getmh2(mh1, boolean.class, short.class); - boolean a = (boolean) mh1.invokeExact((x & 1) == 1); - boolean b = (boolean) mh2.invokeExact(x); - check(x, a, b); - } - - // byte - { - MethodHandle mh1 = getmh1( byte.class, byte.class); - MethodHandle mh2 = getmh2(mh1, byte.class, short.class); - byte a = (byte) mh1.invokeExact((byte) x); - byte b = (byte) mh2.invokeExact(x); - check(x, a, b); - } - - // char - { - MethodHandle mh1 = getmh1( char.class, char.class); - MethodHandle mh2 = getmh2(mh1, char.class, short.class); - char a = (char) mh1.invokeExact((char) x); - char b = (char) mh2.invokeExact(x); - check(x, a, b); - } - - // short - { - MethodHandle mh1 = getmh1( short.class, short.class); - MethodHandle mh2 = getmh2(mh1, short.class, short.class); - short a = (short) mh1.invokeExact((short) x); - short b = (short) mh2.invokeExact(x); - check(x, a, b); - } - } - - static void testint() throws Throwable { - int[] a = new int[] { - Integer.MIN_VALUE, - Integer.MIN_VALUE + 1, - -0x0FFFFFFF, - -0x00FFFFFF, - -0x000FFFFF, - -0x0000FFFF, - -0x00000FFF, - -0x000000FF, - -0x0000000F, - -1, - 0, - 1, - 0x0000000F, - 0x000000FF, - 0x00000FFF, - 0x0000FFFF, - 0x000FFFFF, - 0x00FFFFFF, - 0x0FFFFFFF, - Integer.MAX_VALUE - 1, - Integer.MAX_VALUE - }; - for (int i = 0; i < a.length; i++) { - doint(a[i]); - } - } - static void doint(int x) throws Throwable { - if (DEBUG) System.out.println("int=" + x); - - // boolean - { - MethodHandle mh1 = getmh1( boolean.class, boolean.class); - MethodHandle mh2 = getmh2(mh1, boolean.class, int.class); - boolean a = (boolean) mh1.invokeExact((x & 1) == 1); - boolean b = (boolean) mh2.invokeExact(x); - check(x, a, b); - } - - // byte - { - MethodHandle mh1 = getmh1( byte.class, byte.class); - MethodHandle mh2 = getmh2(mh1, byte.class, int.class); - byte a = (byte) mh1.invokeExact((byte) x); - byte b = (byte) mh2.invokeExact(x); - check(x, a, b); - } - - // char - { - MethodHandle mh1 = getmh1( char.class, char.class); - MethodHandle mh2 = getmh2(mh1, char.class, int.class); - char a = (char) mh1.invokeExact((char) x); - char b = (char) mh2.invokeExact(x); - check(x, a, b); - } - - // short - { - MethodHandle mh1 = getmh1( short.class, short.class); - MethodHandle mh2 = getmh2(mh1, short.class, int.class); - short a = (short) mh1.invokeExact((short) x); - short b = (short) mh2.invokeExact(x); - assert a == b : a + " != " + b; - check(x, a, b); - } - - // int - { - MethodHandle mh1 = getmh1( int.class, int.class); - MethodHandle mh2 = getmh2(mh1, int.class, int.class); - int a = (int) mh1.invokeExact((int) x); - int b = (int) mh2.invokeExact(x); - check(x, a, b); - } - } - - // test adapter_opt_l2i - static void testlong() throws Throwable { - long[] a = new long[] { - Long.MIN_VALUE, - Long.MIN_VALUE + 1, - -0x000000000FFFFFFFL, - -0x0000000000FFFFFFL, - -0x00000000000FFFFFL, - -0x000000000000FFFFL, - -0x0000000000000FFFL, - -0x00000000000000FFL, - -0x000000000000000FL, - -1L, - 0L, - 1L, - 0x000000000000000FL, - 0x00000000000000FFL, - 0x0000000000000FFFL, - 0x0000000000000FFFL, - 0x000000000000FFFFL, - 0x00000000000FFFFFL, - 0x0000000000FFFFFFL, - 0x000000000FFFFFFFL, - Long.MAX_VALUE - 1, - Long.MAX_VALUE - }; - for (int i = 0; i < a.length; i++) { - dolong(a[i]); - } - } - static void dolong(long x) throws Throwable { - if (DEBUG) System.out.println("long=" + x); - - // boolean - { - MethodHandle mh1 = getmh1( boolean.class, boolean.class); - MethodHandle mh2 = getmh2(mh1, boolean.class, long.class); - boolean a = (boolean) mh1.invokeExact((x & 1L) == 1L); - boolean b = (boolean) mh2.invokeExact(x); - check(x, a, b); - } - - // byte - { - MethodHandle mh1 = getmh1( byte.class, byte.class); - MethodHandle mh2 = getmh2(mh1, byte.class, long.class); - byte a = (byte) mh1.invokeExact((byte) x); - byte b = (byte) mh2.invokeExact(x); - check(x, a, b); - } - - // char - { - MethodHandle mh1 = getmh1( char.class, char.class); - MethodHandle mh2 = getmh2(mh1, char.class, long.class); - char a = (char) mh1.invokeExact((char) x); - char b = (char) mh2.invokeExact(x); - check(x, a, b); - } - - // short - { - MethodHandle mh1 = getmh1( short.class, short.class); - MethodHandle mh2 = getmh2(mh1, short.class, long.class); - short a = (short) mh1.invokeExact((short) x); - short b = (short) mh2.invokeExact(x); - check(x, a, b); - } - - // int - { - MethodHandle mh1 = getmh1( int.class, int.class); - MethodHandle mh2 = getmh2(mh1, int.class, long.class); - int a = (int) mh1.invokeExact((int) x); - int b = (int) mh2.invokeExact(x); - check(x, a, b); - } - } - - static void check(boolean x, boolean e, boolean a) { p(z2h(x), z2h(e), z2h(a)); assert e == a : z2h(x) + ": " + z2h(e) + " != " + z2h(a); } - static void check(boolean x, byte e, byte a) { p(z2h(x), i2h(e), i2h(a)); assert e == a : z2h(x) + ": " + i2h(e) + " != " + i2h(a); } - static void check(boolean x, int e, int a) { p(z2h(x), i2h(e), i2h(a)); assert e == a : z2h(x) + ": " + i2h(e) + " != " + i2h(a); } - - static void check(int x, boolean e, boolean a) { p(i2h(x), z2h(e), z2h(a)); assert e == a : i2h(x) + ": " + z2h(e) + " != " + z2h(a); } - static void check(int x, byte e, byte a) { p(i2h(x), i2h(e), i2h(a)); assert e == a : i2h(x) + ": " + i2h(e) + " != " + i2h(a); } - static void check(int x, int e, int a) { p(i2h(x), i2h(e), i2h(a)); assert e == a : i2h(x) + ": " + i2h(e) + " != " + i2h(a); } - - static void check(long x, boolean e, boolean a) { p(l2h(x), z2h(e), z2h(a)); assert e == a : l2h(x) + ": " + z2h(e) + " != " + z2h(a); } - static void check(long x, byte e, byte a) { p(l2h(x), i2h(e), i2h(a)); assert e == a : l2h(x) + ": " + i2h(e) + " != " + i2h(a); } - static void check(long x, int e, int a) { p(l2h(x), i2h(e), i2h(a)); assert e == a : l2h(x) + ": " + i2h(e) + " != " + i2h(a); } - - static void p(String x, String e, String a) { if (DEBUG) System.out.println(x + ": expected: " + e + ", actual: " + a); } - - static String z2h(boolean x) { return x ? "1" : "0"; } - static String i2h(int x) { return Integer.toHexString(x); } - static String l2h(long x) { return Long.toHexString(x); } - - // to int - public static boolean foo(boolean i) { return i; } - public static byte foo(byte i) { return i; } - public static char foo(char i) { return i; } - public static short foo(short i) { return i; } - public static int foo(int i) { return i; } -}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/7024475/Test7024475.java Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 7024475 + * @summary loop doesn't terminate when compiled + * + * @run main Test7024475 + */ + +public class Test7024475 { + + static int i; + static int x1; + static int[] bucket_B; + + static void test(Test7024475 test, int i, int c0, int j, int c1) { + for (;;) { + if (c1 > c0) { + if (c0 > 253) { + throw new InternalError("c0 = " + c0); + } + int index = c0 * 256 + c1; + if (index == -1) return; + i = bucket_B[index]; + if (1 < j - i && test != null) + x1 = 0; + j = i; + c1--; + } else { + c0--; + if (j <= 0) + break; + c1 = 255; + } + } + } + + public static void main(String args[]) { + Test7024475 t = new Test7024475(); + bucket_B = new int[256*256]; + for (int i = 1; i < 256*256; i++) { + bucket_B[i] = 1; + } + for (int n = 0; n < 100000; n++) { + test(t, 2, 85, 1, 134); + } + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/7029152/Test.java Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test + * @bug 7029152 + * @summary Ideal nodes for String intrinsics miss memory edge optimization + * + * @run main/othervm -Xbatch Test + */ + +public class Test { + + static final String str = "11111xx11111xx1x"; + static int idx = 0; + + static int IndexOfTest(String str) { + return str.indexOf("11111xx1x"); + } + + public static void main(String args[]) { + final int ITERS=2000000; + + for (int i=0; i<ITERS; i++) { + idx = IndexOfTest(str); + } + System.out.println("IndexOf = " + idx); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtime/6878713/Test6878713.sh Fri Apr 22 15:30:53 2011 +0200 @@ -0,0 +1,74 @@ +#!/bin/sh + +## +## @test +## @bug 6878713 +## @summary Verifier heap corruption, relating to backward jsrs +## @run shell/timeout=120 Test6878713.sh +## + +if [ "${TESTSRC}" = "" ] +then TESTSRC=. +fi + +if [ "${TESTJAVA}" = "" ] +then + PARENT=`dirname \`which java\`` + TESTJAVA=`dirname ${PARENT}` + echo "TESTJAVA not set, selecting " ${TESTJAVA} + echo "If this is incorrect, try setting the variable manually." +fi + +if [ "${TESTCLASSES}" = "" ] +then + echo "TESTCLASSES not set. Test cannot execute. Failed." + exit 1 +fi + +BIT_FLAG="" + +# set platform-dependent variables +OS=`uname -s` +case "$OS" in + SunOS | Linux ) + NULL=/dev/null + PS=":" + FS="/" + ## for solaris, linux it's HOME + FILE_LOCATION=$HOME + if [ -f ${FILE_LOCATION}${FS}JDK64BIT -a ${OS} = "SunOS" ] + then + BIT_FLAG=`cat ${FILE_LOCATION}${FS}JDK64BIT | grep -v '^#'` + fi + ;; + Windows_* ) + NULL=NUL + PS=";" + FS="\\" + ;; + * ) + echo "Unrecognized system!" + exit 1; + ;; +esac + +JEMMYPATH=${CPAPPEND} +CLASSPATH=.${PS}${TESTCLASSES}${PS}${JEMMYPATH} ; export CLASSPATH + +THIS_DIR=`pwd` + +${TESTJAVA}${FS}bin${FS}java ${BIT_FLAG} -version + +${TESTJAVA}${FS}bin${FS}jar xvf ${TESTSRC}${FS}testcase.jar + +${TESTJAVA}${FS}bin${FS}java ${BIT_FLAG} OOMCrashClass1960_2 > test.out 2>&1 + +if [ -s core -o -s "hs_*.log" ] +then + cat hs*.log + echo "Test Failed" + exit 1 +else + echo "Test Passed" + exit 0 +fi